mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
Urgent RCU pull request for v5.17
This pull request fixes a math error added in7a30871b6a
("rcu-tasks: Introduce ->percpu_enqueue_shift for dynamic queue selection') during the v5.17 merge window. This commit works correctly only on systems with a power-of-two number of CPUs, which just so happens to be the kind that rcutorture always uses by default. This pull request fixes the math so that things also work on systems that don't happen to have a power-of-two number of CPUs. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmHx1pUTHHBhdWxtY2tA a2VybmVsLm9yZwAKCRCevxLzctn7jCqkD/4unL2PTYq1lnIs3Kx9ib6/YO1uwwlm x8p/JJXjPgr2lSDowsIWAPm1Clbvo3JjSm+aWaWIYLpwk2BomAslY860I12MqPD9 Q57Wj7A/wWcnSD5Nco/FERq2JNQF/WXws/IwNSQmphnug1LPIioHD9ubweM1d4Wn VD3e4BzVpCxaepidBQnHlb13HyTfQ/UCazmf+/4DX5yGe+FMmHB0n3d+O59CsHGf cj7ssQ8PrNA3S+dekqpZy8EGaNEauEQmd/duUziqoD4nlNCdRkj51kKn/uflXN9e mwZ/LOjKuEnepnxF+6+3BsxR3GGqNX9z5n0PvC7eErgJ+REUdG7HxaLpBvd7wZ98 PNnUg+wws0t8ALsnAzS03vh9beSAb9ttvyWoRCzdKW9gG6LG9MHhpisC+koZZEDc uBF4CeX2xS/DJ1EuVb2gFbkEP9cpITlf3RCqk5w55XnEm1UpTvdH9gaDe4+WYUD6 FD2L6PkT9Ns7aInGLJkKocj1NpJ9T7a3fxvi2OGkvQ77Yj1/3Du38k0I5N4JPxyO CB3BwOks9tf2+yOCscjvWn+cEvu2LFqU91boXs9DkUBpGJrw09vAa0z6Pwt3H8kr ZTn/kLhZoOfIVG5ybpETCR1DzVm2K2sFjCRTE2Fdwu0S6PXU70fwpZ+Lb2TivRdz nyAfhPOrm6enGg== =wRnw -----END PGP SIGNATURE----- Merge tag 'rcu-urgent.2022.01.26a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu Pull RCU fix from Paul McKenney: "This fixes a brown-paper-bag bug in RCU tasks that causes things like BPF and ftrace to fail miserably on systems with non-power-of-two numbers of CPUs. It fixes a math error added in7a30871b6a
("rcu-tasks: Introduce ->percpu_enqueue_shift for dynamic queue selection') during the v5.17 merge window. This commit works correctly only on systems with a power-of-two number of CPUs, which just so happens to be the kind that rcutorture always uses by default. This pull request fixes the math so that things also work on systems that don't happen to have a power-of-two number of CPUs" * tag 'rcu-urgent.2022.01.26a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: rcu-tasks: Fix computation of CPU-to-list shift counts
This commit is contained in:
commit
a773abf72e
@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \
|
||||
.call_func = call, \
|
||||
.rtpcpu = &rt_name ## __percpu, \
|
||||
.name = n, \
|
||||
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \
|
||||
.percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1, \
|
||||
.percpu_enqueue_lim = 1, \
|
||||
.percpu_dequeue_lim = 1, \
|
||||
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
|
||||
@ -216,6 +216,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
int lim;
|
||||
int shift;
|
||||
|
||||
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
||||
if (rcu_task_enqueue_lim < 0) {
|
||||
@ -229,7 +230,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
|
||||
|
||||
if (lim > nr_cpu_ids)
|
||||
lim = nr_cpu_ids;
|
||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim));
|
||||
shift = ilog2(nr_cpu_ids / lim);
|
||||
if (((nr_cpu_ids - 1) >> shift) >= lim)
|
||||
shift++;
|
||||
WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
|
||||
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
|
||||
smp_store_release(&rtp->percpu_enqueue_lim, lim);
|
||||
for_each_possible_cpu(cpu) {
|
||||
@ -298,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
|
||||
if (unlikely(needadjust)) {
|
||||
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
||||
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
|
||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
|
||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
|
||||
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
|
||||
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
|
||||
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
|
||||
@ -413,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
|
||||
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
|
||||
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
|
||||
if (rtp->percpu_enqueue_lim > 1) {
|
||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
|
||||
WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
|
||||
smp_store_release(&rtp->percpu_enqueue_lim, 1);
|
||||
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
|
||||
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
|
||||
|
Loading…
Reference in New Issue
Block a user