mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-24 23:04:17 +08:00
x86/smpboot: Enable split CPU startup
The x86 CPU bringup state currently does AP wake-up, wait for AP to respond and then release it for full bringup. It is safe to be split into a wake-up and and a separate wait+release state. Provide the required functions and enable the split CPU bringup, which prepares for parallel bringup, where the bringup of the non-boot CPUs takes two iterations: One to prepare and wake all APs and the second to wait and release them. Depending on timing this can eliminate the wait time completely. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Helge Deller <deller@gmx.de> # parisc Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck Link: https://lore.kernel.org/r/20230512205257.133453992@linutronix.de
This commit is contained in:
parent
a631be92b9
commit
8b5a0f957c
@ -274,8 +274,8 @@ config X86
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
select HAVE_USER_RETURN_NOTIFIER
|
||||
select HAVE_GENERIC_VDSO
|
||||
select HOTPLUG_CORE_SYNC_FULL if SMP
|
||||
select HOTPLUG_SMT if SMP
|
||||
select HOTPLUG_SPLIT_STARTUP if SMP
|
||||
select IRQ_FORCED_THREADING
|
||||
select NEED_PER_CPU_EMBED_FIRST_CHUNK
|
||||
select NEED_PER_CPU_PAGE_FIRST_CHUNK
|
||||
|
@ -40,7 +40,7 @@ struct smp_ops {
|
||||
|
||||
void (*cleanup_dead_cpu)(unsigned cpu);
|
||||
void (*poll_sync_state)(void);
|
||||
int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
|
||||
int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle);
|
||||
int (*cpu_disable)(void);
|
||||
void (*cpu_die)(unsigned int cpu);
|
||||
void (*play_dead)(void);
|
||||
@ -80,11 +80,6 @@ static inline void smp_cpus_done(unsigned int max_cpus)
|
||||
smp_ops.smp_cpus_done(max_cpus);
|
||||
}
|
||||
|
||||
static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
return smp_ops.cpu_up(cpu, tidle);
|
||||
}
|
||||
|
||||
static inline int __cpu_disable(void)
|
||||
{
|
||||
return smp_ops.cpu_disable();
|
||||
@ -124,7 +119,7 @@ void native_smp_prepare_cpus(unsigned int max_cpus);
|
||||
void calculate_max_logical_packages(void);
|
||||
void native_smp_cpus_done(unsigned int max_cpus);
|
||||
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
||||
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
||||
int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
|
||||
int native_cpu_disable(void);
|
||||
void __noreturn hlt_play_dead(void);
|
||||
void native_play_dead(void);
|
||||
|
@ -268,7 +268,7 @@ struct smp_ops smp_ops = {
|
||||
#endif
|
||||
.smp_send_reschedule = native_smp_send_reschedule,
|
||||
|
||||
.cpu_up = native_cpu_up,
|
||||
.kick_ap_alive = native_kick_ap,
|
||||
.cpu_disable = native_cpu_disable,
|
||||
.play_dead = native_play_dead,
|
||||
|
||||
|
@ -1052,7 +1052,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
|
||||
int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
int err;
|
||||
@ -1088,15 +1088,15 @@ static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
|
||||
return err;
|
||||
}
|
||||
|
||||
int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
return native_kick_ap(cpu, tidle);
|
||||
return smp_ops.kick_ap_alive(cpu, tidle);
|
||||
}
|
||||
|
||||
void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu)
|
||||
{
|
||||
/* Cleanup possible dangling ends... */
|
||||
if (smp_ops.cpu_up == native_cpu_up && x86_platform.legacy.warm_reset)
|
||||
if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset)
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
|
||||
|
@ -314,7 +314,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@ -438,7 +438,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
|
||||
.smp_prepare_cpus = xen_pv_smp_prepare_cpus,
|
||||
.smp_cpus_done = xen_smp_cpus_done,
|
||||
|
||||
.cpu_up = xen_pv_cpu_up,
|
||||
.kick_ap_alive = xen_pv_kick_ap,
|
||||
.cpu_die = xen_pv_cpu_die,
|
||||
.cleanup_dead_cpu = xen_pv_cleanup_dead_cpu,
|
||||
.poll_sync_state = xen_pv_poll_sync_state,
|
||||
|
Loading…
Reference in New Issue
Block a user