mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 01:04:19 +08:00
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: rcu: Fix RCU lockdep splat on freezer_fork path rcu: Fix RCU lockdep splat in set_task_cpu on fork path mutex: Don't spin when the owner CPU is offline or other weird cases
This commit is contained in:
commit
f2809d61d6
@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
* No lock is needed, since the task isn't on tasklist yet,
|
||||
* so it can't be moved to another cgroup, which means the
|
||||
* freezer won't be removed and will be valid during this
|
||||
* function call.
|
||||
* function call. Nevertheless, apply RCU read-side critical
|
||||
* section to suppress RCU lockdep false positives.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
freezer = task_freezer(task);
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* The root cgroup is non-freezable, so we can skip the
|
||||
|
@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
|
||||
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
|
||||
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* Strictly speaking this rcu_read_lock() is not needed since the
|
||||
* task_group is tied to the cgroup, which in turn can never go away
|
||||
* as long as there are tasks attached to it.
|
||||
*
|
||||
* However since task_group() uses task_subsys_state() which is an
|
||||
* rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
|
||||
p->se.parent = task_group(p)->se[cpu];
|
||||
@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
||||
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
|
||||
p->rt.parent = task_group(p)->rt_se[cpu];
|
||||
#endif
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else
|
||||
@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
||||
* the mutex owner just released it and exited.
|
||||
*/
|
||||
if (probe_kernel_address(&owner->cpu, cpu))
|
||||
goto out;
|
||||
return 0;
|
||||
#else
|
||||
cpu = owner->cpu;
|
||||
#endif
|
||||
@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
||||
* the cpu field may no longer be valid.
|
||||
*/
|
||||
if (cpu >= nr_cpumask_bits)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We need to validate that we can do a
|
||||
* get_cpu() and that we have the percpu area.
|
||||
*/
|
||||
if (!cpu_online(cpu))
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
rq = cpu_rq(cpu);
|
||||
|
||||
@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
|
||||
|
||||
cpu_relax();
|
||||
}
|
||||
out:
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user