mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
rcutorture: Allow rcutorture without RCU Tasks Trace
Unless a kernel builds rcutorture, whether built-in or as a module, that kernel is also built with CONFIG_TASKS_TRACE_RCU, whether anything else needs Tasks Trace RCU or not. This unnecessarily increases kernel size. This commit therefore decouples the presence of rcutorture from the presence of RCU Tasks Trace. However, there is a need to select CONFIG_TASKS_TRACE_RCU for testing purposes. Except that casual users must not be bothered with questions -- for them, this needs to be fully automated. There is thus a CONFIG_FORCE_TASKS_TRACE_RCU that selects CONFIG_TASKS_TRACE_RCU, is user-selectable, but which depends on CONFIG_RCU_EXPERT. [ paulmck: Apply kernel test robot feedback. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
835f14ed53
commit
40c1278aa7
@ -95,15 +95,23 @@ config TASKS_RUDE_RCU
|
||||
switches on all online CPUs, including idle ones, so use
|
||||
with caution.
|
||||
|
||||
config TASKS_TRACE_RCU
|
||||
def_bool 0
|
||||
select IRQ_WORK
|
||||
config FORCE_TASKS_TRACE_RCU
|
||||
bool "Force selection of Tasks Trace RCU"
|
||||
depends on RCU_EXPERT
|
||||
select TASKS_TRACE_RCU
|
||||
default n
|
||||
help
|
||||
This option enables a task-based RCU implementation that uses
|
||||
explicit rcu_read_lock_trace() read-side markers, and allows
|
||||
these readers to appear in the idle loop as well as on the CPU
|
||||
hotplug code paths. It can force IPIs on online CPUs, including
|
||||
idle ones, so use with caution.
|
||||
these readers to appear in the idle loop as well as on the
|
||||
CPU hotplug code paths. It can force IPIs on online CPUs,
|
||||
including idle ones, so use with caution. Not for manual
|
||||
selection in most cases.
|
||||
|
||||
config TASKS_TRACE_RCU
|
||||
bool
|
||||
default n
|
||||
select IRQ_WORK
|
||||
|
||||
config RCU_STALL_COMMON
|
||||
def_bool TREE_RCU
|
||||
@ -227,7 +235,7 @@ config RCU_NOCB_CPU
|
||||
|
||||
config TASKS_TRACE_RCU_READ_MB
|
||||
bool "Tasks Trace RCU readers use memory barriers in user and idle"
|
||||
depends on RCU_EXPERT
|
||||
depends on RCU_EXPERT && TASKS_TRACE_RCU
|
||||
default PREEMPT_RT || NR_CPUS < 8
|
||||
help
|
||||
Use this option to further reduce the number of IPIs sent
|
||||
|
@ -49,7 +49,6 @@ config RCU_TORTURE_TEST
|
||||
select SRCU
|
||||
select TASKS_RCU
|
||||
select TASKS_RUDE_RCU
|
||||
select TASKS_TRACE_RCU
|
||||
default n
|
||||
help
|
||||
This option provides a kernel module that runs torture tests
|
||||
|
@ -737,6 +737,48 @@ static struct rcu_torture_ops busted_srcud_ops = {
|
||||
.name = "busted_srcud"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
|
||||
* This implementation does not necessarily work well with CPU hotplug.
|
||||
*/
|
||||
|
||||
static void synchronize_rcu_trivial(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
|
||||
{
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops trivial_ops = {
|
||||
.ttype = RCU_TRIVIAL_FLAVOR,
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = rcu_torture_read_lock_trivial,
|
||||
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
|
||||
.readunlock = rcu_torture_read_unlock_trivial,
|
||||
.readlock_held = torture_readlock_not_held,
|
||||
.get_gp_seq = rcu_no_completed,
|
||||
.sync = synchronize_rcu_trivial,
|
||||
.exp_sync = synchronize_rcu_trivial,
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "trivial"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for RCU-tasks torture testing.
|
||||
*/
|
||||
@ -780,48 +822,6 @@ static struct rcu_torture_ops tasks_ops = {
|
||||
.name = "tasks"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
|
||||
* This implementation does not necessarily work well with CPU hotplug.
|
||||
*/
|
||||
|
||||
static void synchronize_rcu_trivial(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
|
||||
WARN_ON_ONCE(raw_smp_processor_id() != cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
|
||||
{
|
||||
preempt_disable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
|
||||
{
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops trivial_ops = {
|
||||
.ttype = RCU_TRIVIAL_FLAVOR,
|
||||
.init = rcu_sync_torture_init,
|
||||
.readlock = rcu_torture_read_lock_trivial,
|
||||
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
|
||||
.readunlock = rcu_torture_read_unlock_trivial,
|
||||
.readlock_held = torture_readlock_not_held,
|
||||
.get_gp_seq = rcu_no_completed,
|
||||
.sync = synchronize_rcu_trivial,
|
||||
.exp_sync = synchronize_rcu_trivial,
|
||||
.fqs = NULL,
|
||||
.stats = NULL,
|
||||
.irq_capable = 1,
|
||||
.name = "trivial"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for rude RCU-tasks torture testing.
|
||||
*/
|
||||
@ -851,6 +851,8 @@ static struct rcu_torture_ops tasks_rude_ops = {
|
||||
.name = "tasks-rude"
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
/*
|
||||
* Definitions for tracing RCU-tasks torture testing.
|
||||
*/
|
||||
@ -893,6 +895,15 @@ static struct rcu_torture_ops tasks_tracing_ops = {
|
||||
.name = "tasks-tracing"
|
||||
};
|
||||
|
||||
#define TASKS_TRACING_OPS &tasks_tracing_ops,
|
||||
|
||||
#else // #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
#define TASKS_TRACING_OPS
|
||||
|
||||
#endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
|
||||
|
||||
|
||||
static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
|
||||
{
|
||||
if (!cur_ops->gp_diff)
|
||||
@ -3096,9 +3107,9 @@ rcu_torture_init(void)
|
||||
int flags = 0;
|
||||
unsigned long gp_seq = 0;
|
||||
static struct rcu_torture_ops *torture_ops[] = {
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
|
||||
&busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
|
||||
&tasks_tracing_ops, &trivial_ops,
|
||||
&rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
|
||||
&tasks_ops, &tasks_rude_ops, TASKS_TRACING_OPS
|
||||
&trivial_ops,
|
||||
};
|
||||
|
||||
if (!torture_init_begin(torture_type, verbose))
|
||||
|
@ -7,5 +7,7 @@ CONFIG_PREEMPT=n
|
||||
CONFIG_DEBUG_LOCK_ALLOC=n
|
||||
CONFIG_PROVE_LOCKING=n
|
||||
#CHECK#CONFIG_PROVE_RCU=n
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU_READ_MB=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
@ -7,5 +7,7 @@ CONFIG_PREEMPT=y
|
||||
CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_PROVE_LOCKING=y
|
||||
#CHECK#CONFIG_PROVE_RCU=y
|
||||
CONFIG_FORCE_TASKS_TRACE_RCU=y
|
||||
#CHECK#CONFIG_TASKS_TRACE_RCU=y
|
||||
CONFIG_TASKS_TRACE_RCU_READ_MB=n
|
||||
CONFIG_RCU_EXPERT=y
|
||||
|
Loading…
Reference in New Issue
Block a user