mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-19 16:14:13 +08:00
rcu: apply RCU protection to wake_affine()
The task_group() function returns a pointer that must be protected by either RCU, the ->alloc_lock, or the cgroup lock (see the rcu_dereference_check() in task_subsys_state(), which is invoked by task_group()). The wake_affine() function currently does none of these, which means that a concurrent update would be within its rights to free the structure returned by task_group(). Because wake_affine() uses this structure only to compute load-balancing heuristics, there is no reason to acquire either of the two locks. Therefore, this commit introduces an RCU read-side critical section that starts before the first call to task_group() and ends after the last use of the "tg" pointer returned from task_group(). Thanks to Li Zefan for pointing out the need to extend the RCU read-side critical section from that proposed by the original patch. Signed-off-by: Daniel J Blueman <daniel.blueman@gmail.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
7e27d6e778
commit
f3b577dec1
@ -1240,6 +1240,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
||||
* effect of the currently running task from the load
|
||||
* of the current CPU:
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (sync) {
|
||||
tg = task_group(current);
|
||||
weight = current->se.load.weight;
|
||||
@ -1275,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
|
||||
balanced = this_eff_load <= prev_eff_load;
|
||||
} else
|
||||
balanced = true;
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* If the currently running task will sleep within
|
||||
|
Loading…
Reference in New Issue
Block a user