2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 09:13:55 +08:00

Merge branch 'csd.2020.09.04a' into HEAD

csd.2020.09.04a: CPU smp_call_function() torture tests.
This commit is contained in:
Paul E. McKenney 2020-09-04 11:54:52 -07:00
commit 6fe208f63a
6 changed files with 160 additions and 1 deletions

View File

@ -3073,6 +3073,10 @@
and gids from such clients. This is intended to ease and gids from such clients. This is intended to ease
migration from NFSv2/v3. migration from NFSv2/v3.
nmi_backtrace.backtrace_idle [KNL]
Dump stacks even of idle CPUs in response to an
NMI stack-backtrace request.
nmi_debug= [KNL,SH] Specify one or more actions to take nmi_debug= [KNL,SH] Specify one or more actions to take
when a NMI is triggered. when a NMI is triggered.
Format: [state][,regs][,debounce][,die] Format: [state][,regs][,debounce][,die]

View File

@ -26,6 +26,9 @@ struct __call_single_data {
struct { struct {
struct llist_node llist; struct llist_node llist;
unsigned int flags; unsigned int flags;
#ifdef CONFIG_64BIT
u16 src, dst;
#endif
}; };
}; };
smp_call_func_t func; smp_call_func_t func;

View File

@ -61,6 +61,9 @@ struct __call_single_node {
unsigned int u_flags; unsigned int u_flags;
atomic_t a_flags; atomic_t a_flags;
}; };
#ifdef CONFIG_64BIT
u16 src, dst;
#endif
}; };
#endif /* __LINUX_SMP_TYPES_H */ #endif /* __LINUX_SMP_TYPES_H */

View File

@ -20,6 +20,9 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/idle.h> #include <linux/sched/idle.h>
#include <linux/hypervisor.h> #include <linux/hypervisor.h>
#include <linux/sched/clock.h>
#include <linux/nmi.h>
#include <linux/sched/debug.h>
#include "smpboot.h" #include "smpboot.h"
#include "sched/smp.h" #include "sched/smp.h"
@ -96,6 +99,103 @@ void __init call_function_init(void)
smpcfd_prepare_cpu(smp_processor_id()); smpcfd_prepare_cpu(smp_processor_id());
} }
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
static DEFINE_PER_CPU(void *, cur_csd_info);
#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
static atomic_t csd_bug_count = ATOMIC_INIT(0);
/* Record current CSD work for current CPU, NULL to erase. */
static void csd_lock_record(call_single_data_t *csd)
{
if (!csd) {
smp_mb(); /* NULL cur_csd after unlock. */
__this_cpu_write(cur_csd, NULL);
return;
}
__this_cpu_write(cur_csd_func, csd->func);
__this_cpu_write(cur_csd_info, csd->info);
smp_wmb(); /* func and info before csd. */
__this_cpu_write(cur_csd, csd);
smp_mb(); /* Update cur_csd before function call. */
/* Or before unlock, as the case may be. */
}
static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
{
unsigned int csd_type;
csd_type = CSD_TYPE(csd);
if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
return csd->dst; /* Other CSD_TYPE_ values might not have ->dst. */
return -1;
}
/*
* Complain if too much time spent waiting. Note that only
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
* so waiting on other types gets much less information.
*/
static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
{
int cpu = -1;
int cpux;
bool firsttime;
u64 ts2, ts_delta;
call_single_data_t *cpu_cur_csd;
unsigned int flags = READ_ONCE(csd->flags);
if (!(flags & CSD_FLAG_LOCK)) {
if (!unlikely(*bug_id))
return true;
cpu = csd_lock_wait_getcpu(csd);
pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
*bug_id, raw_smp_processor_id(), cpu);
return true;
}
ts2 = sched_clock();
ts_delta = ts2 - *ts1;
if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
return false;
firsttime = !*bug_id;
if (firsttime)
*bug_id = atomic_inc_return(&csd_bug_count);
cpu = csd_lock_wait_getcpu(csd);
if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
cpux = 0;
else
cpux = cpu;
cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func and info. */
pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
cpu, csd->func, csd->info);
if (cpu_cur_csd && csd != cpu_cur_csd) {
pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
*bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
READ_ONCE(per_cpu(cur_csd_info, cpux)));
} else {
pr_alert("\tcsd: CSD lock (#%d) %s.\n",
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
}
if (cpu >= 0) {
if (!trigger_single_cpu_backtrace(cpu))
dump_cpu_task(cpu);
if (!cpu_cur_csd) {
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
arch_send_call_function_single_ipi(cpu);
}
}
dump_stack();
*ts1 = ts2;
return false;
}
/* /*
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
* *
@ -103,10 +203,30 @@ void __init call_function_init(void)
* previous function call. For multi-cpu calls its even more interesting * previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd. * as we'll have to ensure no other cpu is observing our csd.
*/ */
static __always_inline void csd_lock_wait(call_single_data_t *csd)
{
int bug_id = 0;
u64 ts0, ts1;
ts1 = ts0 = sched_clock();
for (;;) {
if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
break;
cpu_relax();
}
smp_acquire__after_ctrl_dep();
}
#else
static void csd_lock_record(call_single_data_t *csd)
{
}
static __always_inline void csd_lock_wait(call_single_data_t *csd) static __always_inline void csd_lock_wait(call_single_data_t *csd)
{ {
smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
} }
#endif
static __always_inline void csd_lock(call_single_data_t *csd) static __always_inline void csd_lock(call_single_data_t *csd)
{ {
@ -166,9 +286,11 @@ static int generic_exec_single(int cpu, call_single_data_t *csd)
* We can unlock early even for the synchronous on-stack case, * We can unlock early even for the synchronous on-stack case,
* since we're doing this from the same CPU.. * since we're doing this from the same CPU..
*/ */
csd_lock_record(csd);
csd_unlock(csd); csd_unlock(csd);
local_irq_save(flags); local_irq_save(flags);
func(info); func(info);
csd_lock_record(NULL);
local_irq_restore(flags); local_irq_restore(flags);
return 0; return 0;
} }
@ -268,8 +390,10 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
entry = &csd_next->llist; entry = &csd_next->llist;
} }
csd_lock_record(csd);
func(info); func(info);
csd_unlock(csd); csd_unlock(csd);
csd_lock_record(NULL);
} else { } else {
prev = &csd->llist; prev = &csd->llist;
} }
@ -296,8 +420,10 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
smp_call_func_t func = csd->func; smp_call_func_t func = csd->func;
void *info = csd->info; void *info = csd->info;
csd_lock_record(csd);
csd_unlock(csd); csd_unlock(csd);
func(info); func(info);
csd_lock_record(NULL);
} else if (type == CSD_TYPE_IRQ_WORK) { } else if (type == CSD_TYPE_IRQ_WORK) {
irq_work_single(csd); irq_work_single(csd);
} }
@ -375,6 +501,10 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
csd->func = func; csd->func = func;
csd->info = info; csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
csd->src = smp_processor_id();
csd->dst = cpu;
#endif
err = generic_exec_single(cpu, csd); err = generic_exec_single(cpu, csd);
@ -540,6 +670,10 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
csd->flags |= CSD_TYPE_SYNC; csd->flags |= CSD_TYPE_SYNC;
csd->func = func; csd->func = func;
csd->info = info; csd->info = info;
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
csd->src = smp_processor_id();
csd->dst = cpu;
#endif
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
__cpumask_set_cpu(cpu, cfd->cpumask_ipi); __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
} }

View File

@ -1377,6 +1377,17 @@ config SCF_TORTURE_TEST
module may be built after the fact on the running kernel to module may be built after the fact on the running kernel to
be tested, if desired. be tested, if desired.
config CSD_LOCK_WAIT_DEBUG
bool "Debugging for csd_lock_wait(), called from smp_call_function*()"
depends on DEBUG_KERNEL
depends on 64BIT
default n
help
This option enables debug prints when CPUs are slow to respond
to the smp_call_function*() IPI wrappers. These debug prints
include the IPI handler function currently executing (if any)
and relevant stack traces.
endmenu # lock debugging endmenu # lock debugging
config TRACE_IRQFLAGS config TRACE_IRQFLAGS

View File

@ -85,12 +85,16 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
put_cpu(); put_cpu();
} }
// Dump stacks even for idle CPUs.
static bool backtrace_idle;
module_param(backtrace_idle, bool, 0644);
bool nmi_cpu_backtrace(struct pt_regs *regs) bool nmi_cpu_backtrace(struct pt_regs *regs)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
if (regs && cpu_in_idle(instruction_pointer(regs))) { if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs)); cpu, (void *)instruction_pointer(regs));
} else { } else {