mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
rcu: Clean up flavor-related definitions and comments in update.c
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
0ae86a2726
commit
06462efc80
@ -332,7 +332,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
|
||||
int i;
|
||||
int j;
|
||||
|
||||
/* Initialize and register callbacks for each flavor specified. */
|
||||
/* Initialize and register callbacks for each crcu_array element. */
|
||||
for (i = 0; i < n; i++) {
|
||||
if (checktiny &&
|
||||
(crcu_array[i] == call_rcu ||
|
||||
@ -697,19 +697,19 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
|
||||
/*
|
||||
* Wait for all pre-existing t->on_rq and t->nvcsw
|
||||
* transitions to complete. Invoking synchronize_sched()
|
||||
* transitions to complete. Invoking synchronize_rcu()
|
||||
* suffices because all these transitions occur with
|
||||
* interrupts disabled. Without this synchronize_sched(),
|
||||
* interrupts disabled. Without this synchronize_rcu(),
|
||||
* a read-side critical section that started before the
|
||||
* grace period might be incorrectly seen as having started
|
||||
* after the grace period.
|
||||
*
|
||||
* This synchronize_sched() also dispenses with the
|
||||
* This synchronize_rcu() also dispenses with the
|
||||
* need for a memory barrier on the first store to
|
||||
* ->rcu_tasks_holdout, as it forces the store to happen
|
||||
* after the beginning of the grace period.
|
||||
*/
|
||||
synchronize_sched();
|
||||
synchronize_rcu();
|
||||
|
||||
/*
|
||||
* There were callbacks, so we need to wait for an
|
||||
@ -736,7 +736,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
* This does only part of the job, ensuring that all
|
||||
* tasks that were previously exiting reach the point
|
||||
* where they have disabled preemption, allowing the
|
||||
* later synchronize_sched() to finish the job.
|
||||
* later synchronize_rcu() to finish the job.
|
||||
*/
|
||||
synchronize_srcu(&tasks_rcu_exit_srcu);
|
||||
|
||||
@ -786,20 +786,20 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
* cause their RCU-tasks read-side critical sections to
|
||||
* extend past the end of the grace period. However,
|
||||
* because these ->nvcsw updates are carried out with
|
||||
* interrupts disabled, we can use synchronize_sched()
|
||||
* interrupts disabled, we can use synchronize_rcu()
|
||||
* to force the needed ordering on all such CPUs.
|
||||
*
|
||||
* This synchronize_sched() also confines all
|
||||
* This synchronize_rcu() also confines all
|
||||
* ->rcu_tasks_holdout accesses to be within the grace
|
||||
* period, avoiding the need for memory barriers for
|
||||
* ->rcu_tasks_holdout accesses.
|
||||
*
|
||||
* In addition, this synchronize_sched() waits for exiting
|
||||
* In addition, this synchronize_rcu() waits for exiting
|
||||
* tasks to complete their final preempt_disable() region
|
||||
* of execution, cleaning up after the synchronize_srcu()
|
||||
* above.
|
||||
*/
|
||||
synchronize_sched();
|
||||
synchronize_rcu();
|
||||
|
||||
/* Invoke the callbacks. */
|
||||
while (list) {
|
||||
|
Loading…
Reference in New Issue
Block a user