mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
rcu/nocb: Disable bypass when CPU isn't completely offloaded
Currently, the bypass is flushed at the very last moment in the deoffloading procedure. However, this approach leads to a larger state space than would be preferred. This commit therefore disables the bypass at soon as the deoffloading procedure begins, then flushes it. This guarantees that the bypass remains empty and thus out of the way of the deoffloading procedure. Symmetrically, this commit waits to enable the bypass until the offloading procedure has completed. Reported-by: Paul E. McKenney <paulmck@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Neeraj Upadhyay <neeraju@codeaurora.org> Cc: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
b2fcf21020
commit
76d00b494d
@ -109,7 +109,7 @@ struct rcu_cblist {
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | |
|
||||
* | Kthreads handle callbacks holding nocb_lock, local rcu_core() stops |
|
||||
* | handling callbacks. |
|
||||
* | handling callbacks. Enable bypass queueing. |
|
||||
* ----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
@ -125,7 +125,7 @@ struct rcu_cblist {
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | |
|
||||
* | CB/GP kthreads handle callbacks holding nocb_lock, local rcu_core() |
|
||||
* | ignores callbacks. |
|
||||
* | ignores callbacks. Bypass enqueue is enabled. |
|
||||
* ----------------------------------------------------------------------------
|
||||
* |
|
||||
* v
|
||||
@ -134,7 +134,8 @@ struct rcu_cblist {
|
||||
* | SEGCBLIST_KTHREAD_GP |
|
||||
* | |
|
||||
* | CB/GP kthreads and local rcu_core() handle callbacks concurrently |
|
||||
* | holding nocb_lock. Wake up CB and GP kthreads if necessary. |
|
||||
* | holding nocb_lock. Wake up CB and GP kthreads if necessary. Disable |
|
||||
* | bypass enqueue. |
|
||||
* ----------------------------------------------------------------------------
|
||||
* |
|
||||
* v
|
||||
|
@ -1830,11 +1830,22 @@ static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
|
||||
unsigned long j = jiffies;
|
||||
long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
// Pure softirq/rcuc based processing: no bypassing, no
|
||||
// locking.
|
||||
if (!rcu_rdp_is_offloaded(rdp)) {
|
||||
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
|
||||
return false;
|
||||
}
|
||||
|
||||
// In the process of (de-)offloading: no bypassing, but
|
||||
// locking.
|
||||
if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
|
||||
rcu_nocb_lock(rdp);
|
||||
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
|
||||
return false; /* Not offloaded, no bypassing. */
|
||||
}
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
// Don't use ->nocb_bypass during early boot.
|
||||
if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
|
||||
@ -2416,7 +2427,16 @@ static long rcu_nocb_rdp_deoffload(void *arg)
|
||||
pr_info("De-offloading %d\n", rdp->cpu);
|
||||
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
|
||||
/*
|
||||
* Flush once and for all now. This suffices because we are
|
||||
* running on the target CPU holding ->nocb_lock (thus having
|
||||
* interrupts disabled), and because rdp_offload_toggle()
|
||||
* invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
|
||||
* Thus future calls to rcu_segcblist_completely_offloaded() will
|
||||
* return false, which means that future calls to rcu_nocb_try_bypass()
|
||||
* will refuse to put anything into the bypass.
|
||||
*/
|
||||
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
|
||||
ret = rdp_offload_toggle(rdp, false, flags);
|
||||
swait_event_exclusive(rdp->nocb_state_wq,
|
||||
!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
|
||||
@ -2428,21 +2448,21 @@ static long rcu_nocb_rdp_deoffload(void *arg)
|
||||
del_timer_sync(&rdp->nocb_timer);
|
||||
|
||||
/*
|
||||
* Flush bypass. While IRQs are disabled and once we set
|
||||
* SEGCBLIST_SOFTIRQ_ONLY, no callback is supposed to be
|
||||
* enqueued on bypass.
|
||||
* Theoretically we could set SEGCBLIST_SOFTIRQ_ONLY with CB unlocked
|
||||
* and IRQs disabled but let's be paranoid.
|
||||
*/
|
||||
rcu_nocb_lock_irqsave(rdp, flags);
|
||||
rcu_nocb_flush_bypass(rdp, NULL, jiffies);
|
||||
rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
|
||||
/*
|
||||
* With SEGCBLIST_SOFTIRQ_ONLY, we can't use
|
||||
* rcu_nocb_unlock_irqrestore() anymore. Theoretically we
|
||||
* could set SEGCBLIST_SOFTIRQ_ONLY with cb unlocked and IRQs
|
||||
* disabled now, but let's be paranoid.
|
||||
* rcu_nocb_unlock_irqrestore() anymore.
|
||||
*/
|
||||
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
||||
|
||||
/* Sanity check */
|
||||
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user