mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
rcu: Use CONFIG_PREEMPTION where appropriate
The config option `CONFIG_PREEMPT' is used for the preemption model "Low-Latency Desktop". The config option `CONFIG_PREEMPTION' is enabled when kernel preemption is enabled which is true for the preemption model `CONFIG_PREEMPT' and `CONFIG_PREEMPT_RT'. Use `CONFIG_PREEMPTION' if it applies to both preemption models and not just to `CONFIG_PREEMPT'. Cc: "Paul E. McKenney" <paulmck@kernel.org> Cc: Josh Triplett <josh@joshtriplett.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: rcu@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
b3e627d3d5
commit
90326f0521
@ -154,7 +154,7 @@ static inline void exit_tasks_rcu_finish(void) { }
|
||||
*
|
||||
* This macro resembles cond_resched(), except that it is defined to
|
||||
* report potential quiescent states to RCU-tasks even if the cond_resched()
|
||||
* machinery were to be shut off, as some advocate for PREEMPT kernels.
|
||||
* machinery were to be shut off, as some advocate for PREEMPTION kernels.
|
||||
*/
|
||||
#define cond_resched_tasks_rcu_qs() \
|
||||
do { \
|
||||
@ -598,7 +598,7 @@ do { \
|
||||
*
|
||||
* You can avoid reading and understanding the next paragraph by
|
||||
* following this rule: don't put anything in an rcu_read_lock() RCU
|
||||
* read-side critical section that would block in a !PREEMPT kernel.
|
||||
* read-side critical section that would block in a !PREEMPTION kernel.
|
||||
* But if you want the full story, read on!
|
||||
*
|
||||
* In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
|
||||
|
@ -201,8 +201,8 @@ config RCU_NOCB_CPU
|
||||
specified at boot time by the rcu_nocbs parameter. For each
|
||||
such CPU, a kthread ("rcuox/N") will be created to invoke
|
||||
callbacks, where the "N" is the CPU being offloaded, and where
|
||||
the "p" for RCU-preempt (PREEMPT kernels) and "s" for RCU-sched
|
||||
(!PREEMPT kernels). Nothing prevents this kthread from running
|
||||
the "p" for RCU-preempt (PREEMPTION kernels) and "s" for RCU-sched
|
||||
(!PREEMPTION kernels). Nothing prevents this kthread from running
|
||||
on the specified CPUs, but (1) the kthreads may be preempted
|
||||
between each callback, and (2) affinity or cgroups can be used
|
||||
to force the kthreads to run on whatever set of CPUs is desired.
|
||||
|
@ -1730,7 +1730,7 @@ static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
|
||||
// Give the scheduler a chance, even on nohz_full CPUs.
|
||||
static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
|
||||
// Real call_rcu() floods hit userspace, so emulate that.
|
||||
if (need_resched() || (iter & 0xfff))
|
||||
schedule();
|
||||
|
@ -103,7 +103,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
|
||||
|
||||
/*
|
||||
* Workqueue handler to drive one grace period and invoke any callbacks
|
||||
* that become ready as a result. Single-CPU and !PREEMPT operation
|
||||
* that become ready as a result. Single-CPU and !PREEMPTION operation
|
||||
* means that we get away with murder on synchronization. ;-)
|
||||
*/
|
||||
void srcu_drive_gp(struct work_struct *wp)
|
||||
|
@ -2698,9 +2698,9 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
|
||||
|
||||
/*
|
||||
* During early boot, any blocking grace-period wait automatically
|
||||
* implies a grace period. Later on, this is never the case for PREEMPT.
|
||||
* implies a grace period. Later on, this is never the case for PREEMPTION.
|
||||
*
|
||||
* Howevr, because a context switch is a grace period for !PREEMPT, any
|
||||
* Howevr, because a context switch is a grace period for !PREEMPTION, any
|
||||
* blocking grace-period wait automatically implies a grace period if
|
||||
* there is only one CPU online at any point time during execution of
|
||||
* either synchronize_rcu() or synchronize_rcu_expedited(). It is OK to
|
||||
|
@ -670,7 +670,7 @@ static void rcu_exp_handler(void *unused)
|
||||
}
|
||||
}
|
||||
|
||||
/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
|
||||
/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
|
||||
static void sync_sched_exp_online_cleanup(int cpu)
|
||||
{
|
||||
}
|
||||
|
@ -789,7 +789,7 @@ static void __init rcu_bootup_announce(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Note a quiescent state for PREEMPT=n. Because we do not need to know
|
||||
* Note a quiescent state for PREEMPTION=n. Because we do not need to know
|
||||
* how many quiescent states passed, just if there was at least one since
|
||||
* the start of the grace period, this just sets a flag. The caller must
|
||||
* have disabled preemption.
|
||||
@ -839,7 +839,7 @@ void rcu_all_qs(void)
|
||||
EXPORT_SYMBOL_GPL(rcu_all_qs);
|
||||
|
||||
/*
|
||||
* Note a PREEMPT=n context switch. The caller must have disabled interrupts.
|
||||
* Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
|
||||
*/
|
||||
void rcu_note_context_switch(bool preempt)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user