mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
smp: Add a smp_cond_func_t argument to smp_call_function_many()
on_each_cpu_cond_mask() allocates a new CPU mask. The newly allocated mask is a subset of the provided mask based on the conditional function. This memory allocation can be avoided by extending smp_call_function_many() with the conditional function and performing the remote function call based on the mask and the conditional function. Rename smp_call_function_many() to smp_call_function_many_cond() and add the smp_cond_func_t argument. If smp_cond_func_t is provided then it is used before invoking the function. Provide smp_call_function_many() with cond_func set to NULL. Let on_each_cpu_cond_mask() use smp_call_function_many_cond(). Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20200117090137.1205765-3-bigeasy@linutronix.de
This commit is contained in:
parent
5671d814db
commit
67719ef25e
77
kernel/smp.c
77
kernel/smp.c
@ -395,22 +395,9 @@ call:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_any);
|
||||
|
||||
/**
|
||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on (only runs on online subset).
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
* @info: An arbitrary pointer to pass to the function.
|
||||
* @wait: If true, wait (atomically) until function has completed
|
||||
* on other CPUs.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler. Preemption
|
||||
* must be disabled when calling this function.
|
||||
*/
|
||||
void smp_call_function_many(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, bool wait)
|
||||
static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info,
|
||||
bool wait, smp_cond_func_t cond_func)
|
||||
{
|
||||
struct call_function_data *cfd;
|
||||
int cpu, next_cpu, this_cpu = smp_processor_id();
|
||||
@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
|
||||
/* Fastpath: do that cpu by itself. */
|
||||
if (next_cpu >= nr_cpu_ids) {
|
||||
smp_call_function_single(cpu, func, info, wait);
|
||||
if (!cond_func || (cond_func && cond_func(cpu, info)))
|
||||
smp_call_function_single(cpu, func, info, wait);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
for_each_cpu(cpu, cfd->cpumask) {
|
||||
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
|
||||
|
||||
if (cond_func && !cond_func(cpu, info))
|
||||
continue;
|
||||
|
||||
csd_lock(csd);
|
||||
if (wait)
|
||||
csd->flags |= CSD_FLAG_SYNCHRONOUS;
|
||||
@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on (only runs on online subset).
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
* @info: An arbitrary pointer to pass to the function.
|
||||
* @wait: If true, wait (atomically) until function has completed
|
||||
* on other CPUs.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler. Preemption
|
||||
* must be disabled when calling this function.
|
||||
*/
|
||||
void smp_call_function_many(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, bool wait)
|
||||
{
|
||||
smp_call_function_many_cond(mask, func, info, wait, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_many);
|
||||
|
||||
/**
|
||||
@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
||||
void *info, bool wait, gfp_t gfp_flags,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
cpumask_var_t cpus;
|
||||
int cpu, ret;
|
||||
int cpu = get_cpu();
|
||||
|
||||
might_sleep_if(gfpflags_allow_blocking(gfp_flags));
|
||||
smp_call_function_many_cond(mask, func, info, wait, cond_func);
|
||||
if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
|
||||
preempt_disable();
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cond_func(cpu, info))
|
||||
__cpumask_set_cpu(cpu, cpus);
|
||||
on_each_cpu_mask(cpus, func, info, wait);
|
||||
preempt_enable();
|
||||
free_cpumask_var(cpus);
|
||||
} else {
|
||||
/*
|
||||
* No free cpumask, bother. No matter, we'll
|
||||
* just have to IPI them one by one.
|
||||
*/
|
||||
preempt_disable();
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cond_func(cpu, info)) {
|
||||
ret = smp_call_function_single(cpu, func,
|
||||
info, wait);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
preempt_enable();
|
||||
local_irq_save(flags);
|
||||
func(info);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
put_cpu();
|
||||
}
|
||||
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user