mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 07:34:12 +08:00
powerpc: NMI IPI make NMI IPIs fully sychronous
There is an asynchronous aspect to smp_send_nmi_ipi. The caller waits for all CPUs to call in to the handler, but it does not wait for completion of the handler. This is a needless complication, so remove it and always wait synchronously. The synchronous wait allows the caller to easily time out and clear the wait for completion (zero nmi_ipi_busy_count) in the case of badly behaved handlers. This would have prevented the recent smp_send_stop NMI IPI bug from causing the system to hang. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
parent
9b81c0211c
commit
5b73151fff
@ -56,7 +56,6 @@ struct smp_ops_t {
|
|||||||
int (*cpu_bootable)(unsigned int nr);
|
int (*cpu_bootable)(unsigned int nr);
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void smp_flush_nmi_ipi(u64 delay_us);
|
|
||||||
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
|
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
|
||||||
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
|
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
|
||||||
extern void smp_send_debugger_break(void);
|
extern void smp_send_debugger_break(void);
|
||||||
|
@ -423,7 +423,8 @@ int smp_handle_nmi_ipi(struct pt_regs *regs)
|
|||||||
fn(regs);
|
fn(regs);
|
||||||
|
|
||||||
nmi_ipi_lock();
|
nmi_ipi_lock();
|
||||||
nmi_ipi_busy_count--;
|
if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
|
||||||
|
nmi_ipi_busy_count--;
|
||||||
out:
|
out:
|
||||||
nmi_ipi_unlock_end(&flags);
|
nmi_ipi_unlock_end(&flags);
|
||||||
|
|
||||||
@ -448,29 +449,11 @@ static void do_smp_send_nmi_ipi(int cpu, bool safe)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_flush_nmi_ipi(u64 delay_us)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
nmi_ipi_lock_start(&flags);
|
|
||||||
while (nmi_ipi_busy_count) {
|
|
||||||
nmi_ipi_unlock_end(&flags);
|
|
||||||
udelay(1);
|
|
||||||
if (delay_us) {
|
|
||||||
delay_us--;
|
|
||||||
if (!delay_us)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
nmi_ipi_lock_start(&flags);
|
|
||||||
}
|
|
||||||
nmi_ipi_unlock_end(&flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
|
* - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
|
||||||
* - fn is the target callback function.
|
* - fn is the target callback function.
|
||||||
* - delay_us > 0 is the delay before giving up waiting for targets to
|
* - delay_us > 0 is the delay before giving up waiting for targets to
|
||||||
* enter the handler, == 0 specifies indefinite delay.
|
* complete executing the handler, == 0 specifies indefinite delay.
|
||||||
*/
|
*/
|
||||||
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
|
int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
|
||||||
{
|
{
|
||||||
@ -507,8 +490,23 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
|
|||||||
|
|
||||||
do_smp_send_nmi_ipi(cpu, safe);
|
do_smp_send_nmi_ipi(cpu, safe);
|
||||||
|
|
||||||
|
nmi_ipi_lock();
|
||||||
|
/* nmi_ipi_busy_count is held here, so unlock/lock is okay */
|
||||||
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
|
while (!cpumask_empty(&nmi_ipi_pending_mask)) {
|
||||||
|
nmi_ipi_unlock();
|
||||||
udelay(1);
|
udelay(1);
|
||||||
|
nmi_ipi_lock();
|
||||||
|
if (delay_us) {
|
||||||
|
delay_us--;
|
||||||
|
if (!delay_us)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
while (nmi_ipi_busy_count > 1) {
|
||||||
|
nmi_ipi_unlock();
|
||||||
|
udelay(1);
|
||||||
|
nmi_ipi_lock();
|
||||||
if (delay_us) {
|
if (delay_us) {
|
||||||
delay_us--;
|
delay_us--;
|
||||||
if (!delay_us)
|
if (!delay_us)
|
||||||
@ -516,12 +514,17 @@ int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
nmi_ipi_lock();
|
|
||||||
if (!cpumask_empty(&nmi_ipi_pending_mask)) {
|
if (!cpumask_empty(&nmi_ipi_pending_mask)) {
|
||||||
/* Could not gather all CPUs */
|
/* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
cpumask_clear(&nmi_ipi_pending_mask);
|
cpumask_clear(&nmi_ipi_pending_mask);
|
||||||
}
|
}
|
||||||
|
if (nmi_ipi_busy_count > 1) {
|
||||||
|
/* Timeout waiting for CPUs to execute fn */
|
||||||
|
ret = 0;
|
||||||
|
nmi_ipi_busy_count = 1;
|
||||||
|
}
|
||||||
|
|
||||||
nmi_ipi_busy_count--;
|
nmi_ipi_busy_count--;
|
||||||
nmi_ipi_unlock_end(&flags);
|
nmi_ipi_unlock_end(&flags);
|
||||||
|
|
||||||
@ -597,7 +600,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
|
|||||||
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
|
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
|
||||||
*/
|
*/
|
||||||
nmi_ipi_lock();
|
nmi_ipi_lock();
|
||||||
nmi_ipi_busy_count--;
|
if (nmi_ipi_busy_count > 1)
|
||||||
|
nmi_ipi_busy_count--;
|
||||||
nmi_ipi_unlock();
|
nmi_ipi_unlock();
|
||||||
|
|
||||||
spin_begin();
|
spin_begin();
|
||||||
|
@ -174,7 +174,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
|||||||
continue;
|
continue;
|
||||||
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
|
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
|
||||||
}
|
}
|
||||||
smp_flush_nmi_ipi(1000000);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Take the stuck CPUs out of the watch group */
|
/* Take the stuck CPUs out of the watch group */
|
||||||
|
Loading…
Reference in New Issue
Block a user