mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 21:44:06 +08:00
78273df7f6
The goal is to get sched.h down to a type only header, so the main thing happening in this patchset is splitting out various _types.h headers and dependency fixups, as well as moving some things out of sched.h to better locations. This is prep work for the memory allocation profiling patchset which adds new sched.h interdepencencies. Testing - it's been in -next, and fixes from pretty much all architectures have percolated in - nothing major. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEKnAFLkS8Qha+jvQrE6szbY3KbnYFAmWfBwwACgkQE6szbY3K bnZPwBAAmuRojXaeWxi01IPIOehSGDe68vw44PR9glEMZvxdnZuPOdvE4/+245/L bRKU2WBCjBUokUbV9msIShwRkFTZAmEMPNfPAAsFMA+VXeDYHKB+ZRdwTggNAQ+I SG6fZgh5m0HsewCDxU8oqVHkjVq4fXn0cy+aL6xLEd9gu67GoBzX2pDieS2Kvy6j jnyoKTxFwb+LTQgph0P4EIpq5I2umAsdLwdSR8EJ+8e9NiNvMo1pI00Lx/ntAnFZ JftWUJcMy3TQ5u1GkyfQN9y/yThX1bZK5GvmHS9SJ2Dkacaus5d+xaKCHtRuFS1I 7C6b8PsNgRczUMumBXus44HdlNfNs1yU3lvVxFvBIPE1qC9pYRHrkWIXXIocXLLC oxTEJ6B2G3BQZVQgLIA4fOaxMVhmvKffi/aEZLi9vN9VVosd1a6XNKI6KbyRnXFp GSs9qDqszhn5I3GYNlDNQTc/8UsRlhPFgS6nS0By6QnvxtGi9QkU2tBRBsXvqwCy cLoCYIhc2tvugHvld70dz26umiJ4rnmxGlobStNoigDvIKAIUt1UmIdr1so8P8eH xehnL9ZcOX6xnANDL0AqMFFHV6I58CJynhFdUoXfVQf/DWLGX48mpi9LVNsYBzsI CAwVOAQ0UjGrpdWmJ9ueY/ABYqg9vRjzaDEXQ+MhAYO55CLaVsg= =3tyT -----END PGP SIGNATURE----- Merge tag 'header_cleanup-2024-01-10' of https://evilpiepirate.org/git/bcachefs Pull header cleanups from Kent Overstreet: "The goal is to get sched.h down to a type only header, so the main thing happening in this patchset is splitting out various _types.h headers and dependency fixups, as well as moving some things out of sched.h to better locations. This is prep work for the memory allocation profiling patchset which adds new sched.h interdepencencies" * tag 'header_cleanup-2024-01-10' of https://evilpiepirate.org/git/bcachefs: (51 commits) Kill sched.h dependency on rcupdate.h kill unnecessary thread_info.h include Kill unnecessary kernel.h include preempt.h: Kill dependency on list.h rseq: Split out rseq.h from sched.h LoongArch: signal.c: add header file to fix build error restart_block: Trim includes lockdep: move held_lock to lockdep_types.h sem: Split out sem_types.h uidgid: Split out uidgid_types.h seccomp: Split out seccomp_types.h refcount: Split out refcount_types.h uapi/linux/resource.h: fix include x86/signal: kill dependency on time.h syscall_user_dispatch.h: split out *_types.h mm_types_task.h: Trim dependencies Split out irqflags_types.h ipc: Kill bogus dependency on spinlock.h shm: Slim down dependencies workqueue: Split out workqueue_types.h ...
348 lines
10 KiB
C
348 lines
10 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* async.c: Asynchronous function calls for boot performance
|
|
*
|
|
* (C) Copyright 2009 Intel Corporation
|
|
* Author: Arjan van de Ven <arjan@linux.intel.com>
|
|
*/
|
|
|
|
|
|
/*
|
|
|
|
Goals and Theory of Operation
|
|
|
|
The primary goal of this feature is to reduce the kernel boot time,
|
|
by doing various independent hardware delays and discovery operations
|
|
decoupled and not strictly serialized.
|
|
|
|
More specifically, the asynchronous function call concept allows
|
|
certain operations (primarily during system boot) to happen
|
|
asynchronously, out of order, while these operations still
|
|
have their externally visible parts happen sequentially and in-order.
|
|
(not unlike how out-of-order CPUs retire their instructions in order)
|
|
|
|
Key to the asynchronous function call implementation is the concept of
|
|
a "sequence cookie" (which, although it has an abstracted type, can be
|
|
thought of as a monotonically incrementing number).
|
|
|
|
The async core will assign each scheduled event such a sequence cookie and
|
|
pass this to the called functions.
|
|
|
|
The asynchronously called function should before doing a globally visible
|
|
operation, such as registering device numbers, call the
|
|
async_synchronize_cookie() function and pass in its own cookie. The
|
|
async_synchronize_cookie() function will make sure that all asynchronous
|
|
operations that were scheduled prior to the operation corresponding with the
|
|
cookie have completed.
|
|
|
|
Subsystem/driver initialization code that scheduled asynchronous probe
|
|
functions, but which shares global resources with other drivers/subsystems
|
|
that do not use the asynchronous call feature, need to do a full
|
|
synchronization with the async_synchronize_full() function, before returning
|
|
from their init function. This is to maintain strict ordering between the
|
|
asynchronous and synchronous parts of the kernel.
|
|
|
|
*/
|
|
|
|
#include <linux/async.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/export.h>
|
|
#include <linux/ktime.h>
|
|
#include <linux/pid.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
#include "workqueue_internal.h"
|
|
|
|
static async_cookie_t next_cookie = 1;
|
|
|
|
#define MAX_WORK 32768
|
|
#define ASYNC_COOKIE_MAX ULLONG_MAX /* infinity cookie */
|
|
|
|
static LIST_HEAD(async_global_pending); /* pending from all registered doms */
|
|
static ASYNC_DOMAIN(async_dfl_domain);
|
|
static DEFINE_SPINLOCK(async_lock);
|
|
|
|
struct async_entry {
|
|
struct list_head domain_list;
|
|
struct list_head global_list;
|
|
struct work_struct work;
|
|
async_cookie_t cookie;
|
|
async_func_t func;
|
|
void *data;
|
|
struct async_domain *domain;
|
|
};
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(async_done);
|
|
|
|
static atomic_t entry_count;
|
|
|
|
static long long microseconds_since(ktime_t start)
|
|
{
|
|
ktime_t now = ktime_get();
|
|
return ktime_to_ns(ktime_sub(now, start)) >> 10;
|
|
}
|
|
|
|
static async_cookie_t lowest_in_progress(struct async_domain *domain)
|
|
{
|
|
struct async_entry *first = NULL;
|
|
async_cookie_t ret = ASYNC_COOKIE_MAX;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&async_lock, flags);
|
|
|
|
if (domain) {
|
|
if (!list_empty(&domain->pending))
|
|
first = list_first_entry(&domain->pending,
|
|
struct async_entry, domain_list);
|
|
} else {
|
|
if (!list_empty(&async_global_pending))
|
|
first = list_first_entry(&async_global_pending,
|
|
struct async_entry, global_list);
|
|
}
|
|
|
|
if (first)
|
|
ret = first->cookie;
|
|
|
|
spin_unlock_irqrestore(&async_lock, flags);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* pick the first pending entry and run it
|
|
*/
|
|
static void async_run_entry_fn(struct work_struct *work)
|
|
{
|
|
struct async_entry *entry =
|
|
container_of(work, struct async_entry, work);
|
|
unsigned long flags;
|
|
ktime_t calltime;
|
|
|
|
/* 1) run (and print duration) */
|
|
pr_debug("calling %lli_%pS @ %i\n", (long long)entry->cookie,
|
|
entry->func, task_pid_nr(current));
|
|
calltime = ktime_get();
|
|
|
|
entry->func(entry->data, entry->cookie);
|
|
|
|
pr_debug("initcall %lli_%pS returned after %lld usecs\n",
|
|
(long long)entry->cookie, entry->func,
|
|
microseconds_since(calltime));
|
|
|
|
/* 2) remove self from the pending queues */
|
|
spin_lock_irqsave(&async_lock, flags);
|
|
list_del_init(&entry->domain_list);
|
|
list_del_init(&entry->global_list);
|
|
|
|
/* 3) free the entry */
|
|
kfree(entry);
|
|
atomic_dec(&entry_count);
|
|
|
|
spin_unlock_irqrestore(&async_lock, flags);
|
|
|
|
/* 4) wake up any waiters */
|
|
wake_up(&async_done);
|
|
}
|
|
|
|
static async_cookie_t __async_schedule_node_domain(async_func_t func,
|
|
void *data, int node,
|
|
struct async_domain *domain,
|
|
struct async_entry *entry)
|
|
{
|
|
async_cookie_t newcookie;
|
|
unsigned long flags;
|
|
|
|
INIT_LIST_HEAD(&entry->domain_list);
|
|
INIT_LIST_HEAD(&entry->global_list);
|
|
INIT_WORK(&entry->work, async_run_entry_fn);
|
|
entry->func = func;
|
|
entry->data = data;
|
|
entry->domain = domain;
|
|
|
|
spin_lock_irqsave(&async_lock, flags);
|
|
|
|
/* allocate cookie and queue */
|
|
newcookie = entry->cookie = next_cookie++;
|
|
|
|
list_add_tail(&entry->domain_list, &domain->pending);
|
|
if (domain->registered)
|
|
list_add_tail(&entry->global_list, &async_global_pending);
|
|
|
|
atomic_inc(&entry_count);
|
|
spin_unlock_irqrestore(&async_lock, flags);
|
|
|
|
/* schedule for execution */
|
|
queue_work_node(node, system_unbound_wq, &entry->work);
|
|
|
|
return newcookie;
|
|
}
|
|
|
|
/**
|
|
* async_schedule_node_domain - NUMA specific version of async_schedule_domain
|
|
* @func: function to execute asynchronously
|
|
* @data: data pointer to pass to the function
|
|
* @node: NUMA node that we want to schedule this on or close to
|
|
* @domain: the domain
|
|
*
|
|
* Returns an async_cookie_t that may be used for checkpointing later.
|
|
* @domain may be used in the async_synchronize_*_domain() functions to
|
|
* wait within a certain synchronization domain rather than globally.
|
|
*
|
|
* Note: This function may be called from atomic or non-atomic contexts.
|
|
*
|
|
* The node requested will be honored on a best effort basis. If the node
|
|
* has no CPUs associated with it then the work is distributed among all
|
|
* available CPUs.
|
|
*/
|
|
async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
|
|
int node, struct async_domain *domain)
|
|
{
|
|
struct async_entry *entry;
|
|
unsigned long flags;
|
|
async_cookie_t newcookie;
|
|
|
|
/* allow irq-off callers */
|
|
entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
|
|
|
|
/*
|
|
* If we're out of memory or if there's too much work
|
|
* pending already, we execute synchronously.
|
|
*/
|
|
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
|
|
kfree(entry);
|
|
spin_lock_irqsave(&async_lock, flags);
|
|
newcookie = next_cookie++;
|
|
spin_unlock_irqrestore(&async_lock, flags);
|
|
|
|
/* low on memory.. run synchronously */
|
|
func(data, newcookie);
|
|
return newcookie;
|
|
}
|
|
|
|
return __async_schedule_node_domain(func, data, node, domain, entry);
|
|
}
|
|
EXPORT_SYMBOL_GPL(async_schedule_node_domain);
|
|
|
|
/**
|
|
* async_schedule_node - NUMA specific version of async_schedule
|
|
* @func: function to execute asynchronously
|
|
* @data: data pointer to pass to the function
|
|
* @node: NUMA node that we want to schedule this on or close to
|
|
*
|
|
* Returns an async_cookie_t that may be used for checkpointing later.
|
|
* Note: This function may be called from atomic or non-atomic contexts.
|
|
*
|
|
* The node requested will be honored on a best effort basis. If the node
|
|
* has no CPUs associated with it then the work is distributed among all
|
|
* available CPUs.
|
|
*/
|
|
async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
|
|
{
|
|
return async_schedule_node_domain(func, data, node, &async_dfl_domain);
|
|
}
|
|
EXPORT_SYMBOL_GPL(async_schedule_node);
|
|
|
|
/**
|
|
* async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
|
|
* @func: function to execute asynchronously
|
|
* @dev: device argument to be passed to function
|
|
*
|
|
* @dev is used as both the argument for the function and to provide NUMA
|
|
* context for where to run the function.
|
|
*
|
|
* If the asynchronous execution of @func is scheduled successfully, return
|
|
* true. Otherwise, do nothing and return false, unlike async_schedule_dev()
|
|
* that will run the function synchronously then.
|
|
*/
|
|
bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
|
|
{
|
|
struct async_entry *entry;
|
|
|
|
entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
|
|
|
|
/* Give up if there is no memory or too much work. */
|
|
if (!entry || atomic_read(&entry_count) > MAX_WORK) {
|
|
kfree(entry);
|
|
return false;
|
|
}
|
|
|
|
__async_schedule_node_domain(func, dev, dev_to_node(dev),
|
|
&async_dfl_domain, entry);
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* async_synchronize_full - synchronize all asynchronous function calls
|
|
*
|
|
* This function waits until all asynchronous function calls have been done.
|
|
*/
|
|
void async_synchronize_full(void)
|
|
{
|
|
async_synchronize_full_domain(NULL);
|
|
}
|
|
EXPORT_SYMBOL_GPL(async_synchronize_full);
|
|
|
|
/**
|
|
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
|
|
* @domain: the domain to synchronize
|
|
*
|
|
* This function waits until all asynchronous function calls for the
|
|
* synchronization domain specified by @domain have been done.
|
|
*/
|
|
void async_synchronize_full_domain(struct async_domain *domain)
|
|
{
|
|
async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain);
|
|
}
|
|
EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
|
|
|
|
/**
|
|
* async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
|
|
* @cookie: async_cookie_t to use as checkpoint
|
|
* @domain: the domain to synchronize (%NULL for all registered domains)
|
|
*
|
|
* This function waits until all asynchronous function calls for the
|
|
* synchronization domain specified by @domain submitted prior to @cookie
|
|
* have been done.
|
|
*/
|
|
void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
|
|
{
|
|
ktime_t starttime;
|
|
|
|
pr_debug("async_waiting @ %i\n", task_pid_nr(current));
|
|
starttime = ktime_get();
|
|
|
|
wait_event(async_done, lowest_in_progress(domain) >= cookie);
|
|
|
|
pr_debug("async_continuing @ %i after %lli usec\n", task_pid_nr(current),
|
|
microseconds_since(starttime));
|
|
}
|
|
EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
|
|
|
|
/**
|
|
* async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
|
|
* @cookie: async_cookie_t to use as checkpoint
|
|
*
|
|
* This function waits until all asynchronous function calls prior to @cookie
|
|
* have been done.
|
|
*/
|
|
void async_synchronize_cookie(async_cookie_t cookie)
|
|
{
|
|
async_synchronize_cookie_domain(cookie, &async_dfl_domain);
|
|
}
|
|
EXPORT_SYMBOL_GPL(async_synchronize_cookie);
|
|
|
|
/**
|
|
* current_is_async - is %current an async worker task?
|
|
*
|
|
* Returns %true if %current is an async worker task.
|
|
*/
|
|
bool current_is_async(void)
|
|
{
|
|
struct worker *worker = current_wq_worker();
|
|
|
|
return worker && worker->current_func == async_run_entry_fn;
|
|
}
|
|
EXPORT_SYMBOL_GPL(current_is_async);
|