2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 18:23:53 +08:00

smp/cfd: Convert core to hotplug state machine

Install the callbacks via the state machine. They are installed at runtime so
smpcfd_prepare_cpu() needs to be invoked by the boot-CPU.

Signed-off-by: Richard Weinberger <richard@nod.at>
[ Added the dropped CPU dying case back in. ]
Signed-off-by: Richard Cochran <rcochran@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Davidlohr Bueso <dave@stgolabs>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153337.818376366@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Richard Weinberger 2016-07-13 17:17:01 +00:00 committed by Ingo Molnar
parent 6b2c28471d
commit 31487f8328
4 changed files with 49 additions and 48 deletions

View File

@ -18,6 +18,7 @@ enum cpuhp_state {
CPUHP_HRTIMERS_PREPARE,
CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU,
@ -57,6 +58,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_CORESIGHT4_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_LEDTRIG_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE,

View File

@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void);
/* SMP core functions */
int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
#endif /* __LINUX_SMP_H */

View File

@ -1195,6 +1195,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.startup = hrtimers_prepare_cpu,
.teardown = hrtimers_dead_cpu,
},
[CPUHP_SMPCFD_PREPARE] = {
.name = "SMPCFD prepare",
.startup = smpcfd_prepare_cpu,
.teardown = smpcfd_dead_cpu,
},
[CPUHP_TIMERS_DEAD] = {
.name = "timers dead",
.startup = NULL,
@ -1218,6 +1223,10 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.teardown = NULL,
.cant_stop = true,
},
[CPUHP_AP_SMPCFD_DYING] = {
.startup = NULL,
.teardown = smpcfd_dying_cpu,
},
/*
* Handled on controll processor until the plugged processor manages
* this itself.

View File

@ -33,69 +33,54 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static void flush_smp_call_function_queue(bool warn_cpu_offline);
static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
int smpcfd_prepare_cpu(unsigned int cpu)
{
long cpu = (long)hcpu;
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) {
free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
}
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
/* Fall-through to the CPU_DEAD[_FROZEN] case. */
case CPU_DEAD:
case CPU_DEAD_FROZEN:
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return -ENOMEM;
cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) {
free_cpumask_var(cfd->cpumask);
free_percpu(cfd->csd);
break;
return -ENOMEM;
}
case CPU_DYING:
case CPU_DYING_FROZEN:
/*
* The IPIs for the smp-call-function callbacks queued by other
* CPUs might arrive late, either due to hardware latencies or
* because this CPU disabled interrupts (inside stop-machine)
* before the IPIs were sent. So flush out any pending callbacks
* explicitly (without waiting for the IPIs to arrive), to
* ensure that the outgoing CPU doesn't go offline with work
* still pending.
*/
flush_smp_call_function_queue(false);
break;
#endif
};
return NOTIFY_OK;
return 0;
}
static struct notifier_block hotplug_cfd_notifier = {
.notifier_call = hotplug_cfd,
};
int smpcfd_dead_cpu(unsigned int cpu)
{
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
free_cpumask_var(cfd->cpumask);
free_percpu(cfd->csd);
return 0;
}
int smpcfd_dying_cpu(unsigned int cpu)
{
/*
* The IPIs for the smp-call-function callbacks queued by other
* CPUs might arrive late, either due to hardware latencies or
* because this CPU disabled interrupts (inside stop-machine)
* before the IPIs were sent. So flush out any pending callbacks
* explicitly (without waiting for the IPIs to arrive), to
* ensure that the outgoing CPU doesn't go offline with work
* still pending.
*/
flush_smp_call_function_queue(false);
return 0;
}
void __init call_function_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int i;
for_each_possible_cpu(i)
init_llist_head(&per_cpu(call_single_queue, i));
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&hotplug_cfd_notifier);
smpcfd_prepare_cpu(smp_processor_id());
}
/*