mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 06:04:23 +08:00
rcu: Distinguish "rcuo" kthreads by RCU flavor
Currently, the per-no-CBs-CPU kthreads are named "rcuo" followed by the CPU number, for example, "rcuo". This is problematic given that there are either two or three RCU flavors, each of which gets a per-CPU kthread with exactly the same name. This commit therefore introduces a one-letter abbreviation for each RCU flavor, namely 'b' for RCU-bh, 'p' for RCU-preempt, and 's' for RCU-sched. This abbreviation is used to distinguish the "rcuo" kthreads, for example, for CPU 0 we would have "rcuob/0", "rcuop/0", and "rcuos/0". Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
This commit is contained in:
parent
09c7b89062
commit
a488985851
@ -2461,9 +2461,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
In kernels built with CONFIG_RCU_NOCB_CPU=y, set
|
||||
the specified list of CPUs to be no-callback CPUs.
|
||||
Invocation of these CPUs' RCU callbacks will
|
||||
be offloaded to "rcuoN" kthreads created for
|
||||
that purpose. This reduces OS jitter on the
|
||||
be offloaded to "rcuox/N" kthreads created for
|
||||
that purpose, where "x" is "b" for RCU-bh, "p"
|
||||
for RCU-preempt, and "s" for RCU-sched, and "N"
|
||||
is the CPU number. This reduces OS jitter on the
|
||||
offloaded CPUs, which can be useful for HPC and
|
||||
|
||||
real-time workloads. It can also improve energy
|
||||
efficiency for asymmetric multiprocessors.
|
||||
|
||||
|
13
init/Kconfig
13
init/Kconfig
@ -666,12 +666,13 @@ config RCU_NOCB_CPU
|
||||
|
||||
This option offloads callback invocation from the set of
|
||||
CPUs specified at boot time by the rcu_nocbs parameter.
|
||||
For each such CPU, a kthread ("rcuoN") will be created to
|
||||
invoke callbacks, where the "N" is the CPU being offloaded.
|
||||
Nothing prevents this kthread from running on the specified
|
||||
CPUs, but (1) the kthreads may be preempted between each
|
||||
callback, and (2) affinity or cgroups can be used to force
|
||||
the kthreads to run on whatever set of CPUs is desired.
|
||||
For each such CPU, a kthread ("rcuox/N") will be created to
|
||||
invoke callbacks, where the "N" is the CPU being offloaded,
|
||||
and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and
|
||||
"s" for RCU-sched. Nothing prevents this kthread from running
|
||||
on the specified CPUs, but (1) the kthreads may be preempted
|
||||
between each callback, and (2) affinity or cgroups can be used
|
||||
to force the kthreads to run on whatever set of CPUs is desired.
|
||||
|
||||
Say Y here if you want to help to debug reduced OS jitter.
|
||||
Say N here if you are unsure.
|
||||
|
@ -64,7 +64,7 @@
|
||||
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
|
||||
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
|
||||
|
||||
#define RCU_STATE_INITIALIZER(sname, cr) { \
|
||||
#define RCU_STATE_INITIALIZER(sname, sabbr, cr) { \
|
||||
.level = { &sname##_state.node[0] }, \
|
||||
.call = cr, \
|
||||
.fqs_state = RCU_GP_IDLE, \
|
||||
@ -76,13 +76,14 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
|
||||
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
|
||||
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
|
||||
.name = #sname, \
|
||||
.abbr = sabbr, \
|
||||
}
|
||||
|
||||
struct rcu_state rcu_sched_state =
|
||||
RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
|
||||
RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
|
||||
|
||||
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
|
||||
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
|
||||
|
||||
static struct rcu_state *rcu_state;
|
||||
|
@ -443,6 +443,7 @@ struct rcu_state {
|
||||
unsigned long gp_max; /* Maximum GP duration in */
|
||||
/* jiffies. */
|
||||
char *name; /* Name of structure. */
|
||||
char abbr; /* Abbreviated name. */
|
||||
struct list_head flavors; /* List of RCU flavors. */
|
||||
};
|
||||
|
||||
|
@ -111,7 +111,7 @@ static void __init rcu_bootup_announce_oddness(void)
|
||||
#ifdef CONFIG_TREE_PREEMPT_RCU
|
||||
|
||||
struct rcu_state rcu_preempt_state =
|
||||
RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
|
||||
RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
|
||||
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
|
||||
static struct rcu_state *rcu_state = &rcu_preempt_state;
|
||||
|
||||
@ -2517,7 +2517,8 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
|
||||
return;
|
||||
for_each_cpu(cpu, rcu_nocb_mask) {
|
||||
rdp = per_cpu_ptr(rsp->rda, cpu);
|
||||
t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
|
||||
t = kthread_run(rcu_nocb_kthread, rdp,
|
||||
"rcuo%c/%d", rsp->abbr, cpu);
|
||||
BUG_ON(IS_ERR(t));
|
||||
ACCESS_ONCE(rdp->nocb_kthread) = t;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user