mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
747d5a1bf2
A reboot request sends an IPI via the reboot vector and waits for all other
CPUs to stop. If one or more CPUs are in critical regions with interrupts
disabled then the IPI is not handled on those CPUs and the shutdown hangs
if native_stop_other_cpus() is called with the wait argument set.
Such a situation can happen when one CPU was stopped within a lock held
section and another CPU is trying to acquire that lock with interrupts
disabled. There are other scenarios which can cause such a lockup as well.
In theory the shutdown should be attempted by an NMI IPI after the timeout
period elapsed. Though the wait loop after sending the reboot vector IPI
prevents this. It checks the wait request argument and the timeout. If wait
is set, which is true for sys_reboot() then it won't fall through to the
NMI shutdown method after the timeout period has finished.
This was an oversight when the NMI shutdown mechanism was added to handle
the 'reboot IPI is not working' situation. The mechanism was added to deal
with stuck panic shutdowns, which do not have the wait request set, so the
'wait request' case was probably not considered.
Remove the wait check from the post reboot vector IPI wait loop and enforce
that the wait loop in the NMI fallback path is invoked even if NMI IPIs are
disabled or the registration of the NMI handler fails. That second wait
loop will then hang if not all CPUs shutdown and the wait argument is set.
[ tglx: Avoid the hard to parse line break in the NMI fallback path,
add comments and massage the changelog ]
Fixes: 7d007d21e5
("x86/reboot: Use NMI to assist in shutting down if IRQ fails")
Signed-off-by: Grzegorz Halat <ghalat@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Don Zickus <dzickus@redhat.com>
Link: https://lkml.kernel.org/r/20190628122813.15500-1-ghalat@redhat.com
335 lines
9.5 KiB
C
335 lines
9.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Intel SMP support routines.
|
|
*
|
|
* (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
|
|
* (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
|
|
* (c) 2002,2003 Andi Kleen, SuSE Labs.
|
|
*
|
|
* i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
|
|
*/
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/mc146818rtc.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/gfp.h>
|
|
|
|
#include <asm/mtrr.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/nmi.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/trace/irq_vectors.h>
|
|
#include <asm/kexec.h>
|
|
#include <asm/virtext.h>
|
|
|
|
/*
|
|
* Some notes on x86 processor bugs affecting SMP operation:
|
|
*
|
|
* Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
|
|
* The Linux implications for SMP are handled as follows:
|
|
*
|
|
* Pentium III / [Xeon]
|
|
* None of the E1AP-E3AP errata are visible to the user.
|
|
*
|
|
* E1AP. see PII A1AP
|
|
* E2AP. see PII A2AP
|
|
* E3AP. see PII A3AP
|
|
*
|
|
* Pentium II / [Xeon]
|
|
* None of the A1AP-A3AP errata are visible to the user.
|
|
*
|
|
* A1AP. see PPro 1AP
|
|
* A2AP. see PPro 2AP
|
|
* A3AP. see PPro 7AP
|
|
*
|
|
* Pentium Pro
|
|
* None of 1AP-9AP errata are visible to the normal user,
|
|
* except occasional delivery of 'spurious interrupt' as trap #15.
|
|
* This is very rare and a non-problem.
|
|
*
|
|
* 1AP. Linux maps APIC as non-cacheable
|
|
* 2AP. worked around in hardware
|
|
* 3AP. fixed in C0 and above steppings microcode update.
|
|
* Linux does not use excessive STARTUP_IPIs.
|
|
* 4AP. worked around in hardware
|
|
* 5AP. symmetric IO mode (normal Linux operation) not affected.
|
|
* 'noapic' mode has vector 0xf filled out properly.
|
|
* 6AP. 'noapic' mode might be affected - fixed in later steppings
|
|
* 7AP. We do not assume writes to the LVT deassering IRQs
|
|
* 8AP. We do not enable low power mode (deep sleep) during MP bootup
|
|
* 9AP. We do not use mixed mode
|
|
*
|
|
* Pentium
|
|
* There is a marginal case where REP MOVS on 100MHz SMP
|
|
* machines with B stepping processors can fail. XXX should provide
|
|
* an L1cache=Writethrough or L1cache=off option.
|
|
*
|
|
* B stepping CPUs may hang. There are hardware work arounds
|
|
* for this. We warn about it in case your board doesn't have the work
|
|
* arounds. Basically that's so I can tell anyone with a B stepping
|
|
* CPU and SMP problems "tough".
|
|
*
|
|
* Specific items [From Pentium Processor Specification Update]
|
|
*
|
|
* 1AP. Linux doesn't use remote read
|
|
* 2AP. Linux doesn't trust APIC errors
|
|
* 3AP. We work around this
|
|
* 4AP. Linux never generated 3 interrupts of the same priority
|
|
* to cause a lost local interrupt.
|
|
* 5AP. Remote read is never used
|
|
* 6AP. not affected - worked around in hardware
|
|
* 7AP. not affected - worked around in hardware
|
|
* 8AP. worked around in hardware - we get explicit CS errors if not
|
|
* 9AP. only 'noapic' mode affected. Might generate spurious
|
|
* interrupts, we log only the first one and count the
|
|
* rest silently.
|
|
* 10AP. not affected - worked around in hardware
|
|
* 11AP. Linux reads the APIC between writes to avoid this, as per
|
|
* the documentation. Make sure you preserve this as it affects
|
|
* the C stepping chips too.
|
|
* 12AP. not affected - worked around in hardware
|
|
* 13AP. not affected - worked around in hardware
|
|
* 14AP. we always deassert INIT during bootup
|
|
* 15AP. not affected - worked around in hardware
|
|
* 16AP. not affected - worked around in hardware
|
|
* 17AP. not affected - worked around in hardware
|
|
* 18AP. not affected - worked around in hardware
|
|
* 19AP. not affected - worked around in BIOS
|
|
*
|
|
* If this sounds worrying believe me these bugs are either ___RARE___,
|
|
* or are signal timing bugs worked around in hardware and there's
|
|
* about nothing of note with C stepping upwards.
|
|
*/
|
|
|
|
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
|
|
static bool smp_no_nmi_ipi = false;
|
|
|
|
/*
|
|
* this function sends a 'reschedule' IPI to another CPU.
|
|
* it goes straight through and wastes no time serializing
|
|
* anything. Worst case is that we lose a reschedule ...
|
|
*/
|
|
static void native_smp_send_reschedule(int cpu)
|
|
{
|
|
if (unlikely(cpu_is_offline(cpu))) {
|
|
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
|
|
return;
|
|
}
|
|
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
|
|
}
|
|
|
|
void native_send_call_func_single_ipi(int cpu)
|
|
{
|
|
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
|
|
}
|
|
|
|
void native_send_call_func_ipi(const struct cpumask *mask)
|
|
{
|
|
cpumask_var_t allbutself;
|
|
|
|
if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
|
|
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
|
return;
|
|
}
|
|
|
|
cpumask_copy(allbutself, cpu_online_mask);
|
|
__cpumask_clear_cpu(smp_processor_id(), allbutself);
|
|
|
|
if (cpumask_equal(mask, allbutself) &&
|
|
cpumask_equal(cpu_online_mask, cpu_callout_mask))
|
|
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
|
else
|
|
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
|
|
|
free_cpumask_var(allbutself);
|
|
}
|
|
|
|
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
|
|
{
|
|
/* We are registered on stopping cpu too, avoid spurious NMI */
|
|
if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
|
|
return NMI_HANDLED;
|
|
|
|
cpu_emergency_vmxoff();
|
|
stop_this_cpu(NULL);
|
|
|
|
return NMI_HANDLED;
|
|
}
|
|
|
|
/*
|
|
* this function calls the 'stop' function on all other CPUs in the system.
|
|
*/
|
|
|
|
asmlinkage __visible void smp_reboot_interrupt(void)
|
|
{
|
|
ipi_entering_ack_irq();
|
|
cpu_emergency_vmxoff();
|
|
stop_this_cpu(NULL);
|
|
irq_exit();
|
|
}
|
|
|
|
static int register_stop_handler(void)
|
|
{
|
|
return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
|
|
NMI_FLAG_FIRST, "smp_stop");
|
|
}
|
|
|
|
static void native_stop_other_cpus(int wait)
|
|
{
|
|
unsigned long flags;
|
|
unsigned long timeout;
|
|
|
|
if (reboot_force)
|
|
return;
|
|
|
|
/*
|
|
* Use an own vector here because smp_call_function
|
|
* does lots of things not suitable in a panic situation.
|
|
*/
|
|
|
|
/*
|
|
* We start by using the REBOOT_VECTOR irq.
|
|
* The irq is treated as a sync point to allow critical
|
|
* regions of code on other cpus to release their spin locks
|
|
* and re-enable irqs. Jumping straight to an NMI might
|
|
* accidentally cause deadlocks with further shutdown/panic
|
|
* code. By syncing, we give the cpus up to one second to
|
|
* finish their work before we force them off with the NMI.
|
|
*/
|
|
if (num_online_cpus() > 1) {
|
|
/* did someone beat us here? */
|
|
if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
|
|
return;
|
|
|
|
/* sync above data before sending IRQ */
|
|
wmb();
|
|
|
|
apic->send_IPI_allbutself(REBOOT_VECTOR);
|
|
|
|
/*
|
|
* Don't wait longer than a second for IPI completion. The
|
|
* wait request is not checked here because that would
|
|
* prevent an NMI shutdown attempt in case that not all
|
|
* CPUs reach shutdown state.
|
|
*/
|
|
timeout = USEC_PER_SEC;
|
|
while (num_online_cpus() > 1 && timeout--)
|
|
udelay(1);
|
|
}
|
|
|
|
/* if the REBOOT_VECTOR didn't work, try with the NMI */
|
|
if (num_online_cpus() > 1) {
|
|
/*
|
|
* If NMI IPI is enabled, try to register the stop handler
|
|
* and send the IPI. In any case try to wait for the other
|
|
* CPUs to stop.
|
|
*/
|
|
if (!smp_no_nmi_ipi && !register_stop_handler()) {
|
|
/* Sync above data before sending IRQ */
|
|
wmb();
|
|
|
|
pr_emerg("Shutting down cpus with NMI\n");
|
|
|
|
apic->send_IPI_allbutself(NMI_VECTOR);
|
|
}
|
|
/*
|
|
* Don't wait longer than 10 ms if the caller didn't
|
|
* reqeust it. If wait is true, the machine hangs here if
|
|
* one or more CPUs do not reach shutdown state.
|
|
*/
|
|
timeout = USEC_PER_MSEC * 10;
|
|
while (num_online_cpus() > 1 && (wait || timeout--))
|
|
udelay(1);
|
|
}
|
|
|
|
local_irq_save(flags);
|
|
disable_local_APIC();
|
|
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
* Reschedule call back. KVM uses this interrupt to force a cpu out of
|
|
* guest mode
|
|
*/
|
|
__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
|
|
{
|
|
ack_APIC_irq();
|
|
inc_irq_stat(irq_resched_count);
|
|
kvm_set_cpu_l1tf_flush_l1d();
|
|
|
|
if (trace_resched_ipi_enabled()) {
|
|
/*
|
|
* scheduler_ipi() might call irq_enter() as well, but
|
|
* nested calls are fine.
|
|
*/
|
|
irq_enter();
|
|
trace_reschedule_entry(RESCHEDULE_VECTOR);
|
|
scheduler_ipi();
|
|
trace_reschedule_exit(RESCHEDULE_VECTOR);
|
|
irq_exit();
|
|
return;
|
|
}
|
|
scheduler_ipi();
|
|
}
|
|
|
|
__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
|
|
{
|
|
ipi_entering_ack_irq();
|
|
trace_call_function_entry(CALL_FUNCTION_VECTOR);
|
|
inc_irq_stat(irq_call_count);
|
|
generic_smp_call_function_interrupt();
|
|
trace_call_function_exit(CALL_FUNCTION_VECTOR);
|
|
exiting_irq();
|
|
}
|
|
|
|
__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
|
|
{
|
|
ipi_entering_ack_irq();
|
|
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
|
|
inc_irq_stat(irq_call_count);
|
|
generic_smp_call_function_single_interrupt();
|
|
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
|
|
exiting_irq();
|
|
}
|
|
|
|
static int __init nonmi_ipi_setup(char *str)
|
|
{
|
|
smp_no_nmi_ipi = true;
|
|
return 1;
|
|
}
|
|
|
|
__setup("nonmi_ipi", nonmi_ipi_setup);
|
|
|
|
struct smp_ops smp_ops = {
|
|
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
|
.smp_prepare_cpus = native_smp_prepare_cpus,
|
|
.smp_cpus_done = native_smp_cpus_done,
|
|
|
|
.stop_other_cpus = native_stop_other_cpus,
|
|
#if defined(CONFIG_KEXEC_CORE)
|
|
.crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
|
|
#endif
|
|
.smp_send_reschedule = native_smp_send_reschedule,
|
|
|
|
.cpu_up = native_cpu_up,
|
|
.cpu_die = native_cpu_die,
|
|
.cpu_disable = native_cpu_disable,
|
|
.play_dead = native_play_dead,
|
|
|
|
.send_call_func_ipi = native_send_call_func_ipi,
|
|
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
|
};
|
|
EXPORT_SYMBOL_GPL(smp_ops);
|