mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
26d1982fd1
The commit55d6af1d66
("lib/nmi_backtrace: explicitly serialize banner and regs") serialized backtraces from more CPUs using the re-entrant printk_printk_cpu lock. It was a preparation step for removing the obsolete nmi_safe buffers. The single-line messages about idle CPUs were not serialized against other CPUs and might appear in the middle of backtrace from another CPU, for example: [56394.590068] NMI backtrace for cpu 2 [56394.590069] CPU: 2 PID: 444 Comm: systemd-journal Not tainted 5.14.0-rc1-default+ #268 [56394.590071] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.12.0-59-gc9ba527-rebuilt.opensuse.org 04/01/2014 [56394.590072] RIP: 0010:lock_is_held_type+0x0/0x120 [56394.590071] NMI backtrace for cpu 0 skipped: idling at native_safe_halt+0xb/0x10 [56394.590076] Code: a2 38 ff 0f 0b 8b 44 24 04 eb bd 48 8d ... [56394.590077] RSP: 0018:ffffab02c07c7e68 EFLAGS: 00000246 [56394.590079] RAX: 0000000000000000 RBX: ffff9a7bc0ec8a40 RCX: ffffffffaab8eb40 It might cause confusion what CPU the following lines belongs to and whether the backtraces are really serialized. Prevent the confusion and serialize also the single line message against other CPUs. Fixes:55d6af1d66
("lib/nmi_backtrace: explicitly serialize banner and regs") Reviewed-by: John Ogness <john.ogness@linutronix.de> Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> Signed-off-by: Petr Mladek <pmladek@suse.com> Link: https://lore.kernel.org/r/20210727080939.27193-1-pmladek@suse.com
116 lines
3.2 KiB
C
116 lines
3.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* NMI backtrace support
|
|
*
|
|
* Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
|
|
* with the following header:
|
|
*
|
|
* HW NMI watchdog support
|
|
*
|
|
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
|
|
*
|
|
* Arch specific calls to support NMI watchdog
|
|
*
|
|
* Bits copied from original nmi.c file
|
|
*/
|
|
#include <linux/cpumask.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/nmi.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/sched/debug.h>
|
|
|
|
#ifdef arch_trigger_cpumask_backtrace
|
|
/* For reliability, we're prepared to waste bits here. */
|
|
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
|
|
|
/* "in progress" flag of arch_trigger_cpumask_backtrace */
|
|
static unsigned long backtrace_flag;
|
|
|
|
/*
|
|
* When raise() is called it will be passed a pointer to the
|
|
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
|
|
* directly from their raise() functions may rely on the mask
|
|
* they are passed being updated as a side effect of this call.
|
|
*/
|
|
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
|
|
bool exclude_self,
|
|
void (*raise)(cpumask_t *mask))
|
|
{
|
|
int i, this_cpu = get_cpu();
|
|
|
|
if (test_and_set_bit(0, &backtrace_flag)) {
|
|
/*
|
|
* If there is already a trigger_all_cpu_backtrace() in progress
|
|
* (backtrace_flag == 1), don't output double cpu dump infos.
|
|
*/
|
|
put_cpu();
|
|
return;
|
|
}
|
|
|
|
cpumask_copy(to_cpumask(backtrace_mask), mask);
|
|
if (exclude_self)
|
|
cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
|
|
|
|
/*
|
|
* Don't try to send an NMI to this cpu; it may work on some
|
|
* architectures, but on others it may not, and we'll get
|
|
* information at least as useful just by doing a dump_stack() here.
|
|
* Note that nmi_cpu_backtrace(NULL) will clear the cpu bit.
|
|
*/
|
|
if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask)))
|
|
nmi_cpu_backtrace(NULL);
|
|
|
|
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
|
|
pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
|
|
this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
|
|
raise(to_cpumask(backtrace_mask));
|
|
}
|
|
|
|
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
|
for (i = 0; i < 10 * 1000; i++) {
|
|
if (cpumask_empty(to_cpumask(backtrace_mask)))
|
|
break;
|
|
mdelay(1);
|
|
touch_softlockup_watchdog();
|
|
}
|
|
|
|
clear_bit_unlock(0, &backtrace_flag);
|
|
put_cpu();
|
|
}
|
|
|
|
// Dump stacks even for idle CPUs.
|
|
static bool backtrace_idle;
|
|
module_param(backtrace_idle, bool, 0644);
|
|
|
|
bool nmi_cpu_backtrace(struct pt_regs *regs)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
unsigned long flags;
|
|
|
|
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
|
/*
|
|
* Allow nested NMI backtraces while serializing
|
|
* against other CPUs.
|
|
*/
|
|
printk_cpu_lock_irqsave(flags);
|
|
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
|
|
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
|
|
cpu, (void *)instruction_pointer(regs));
|
|
} else {
|
|
pr_warn("NMI backtrace for cpu %d\n", cpu);
|
|
if (regs)
|
|
show_regs(regs);
|
|
else
|
|
dump_stack();
|
|
}
|
|
printk_cpu_unlock_irqrestore(flags);
|
|
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
NOKPROBE_SYMBOL(nmi_cpu_backtrace);
|
|
#endif
|