mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
sched: Fix unregister_fair_sched_group()
In the flipping and flopping between calling unregister_fair_sched_group() on a per-cpu versus per-group basis we ended up in a bad state. Remove from the list for the passed cpu as opposed to some arbitrary index. ( This fixes explosions w/ autogroup as well as a group creation/destruction stress test. ) Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Turner <pjt@google.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> LKML-Reference: <20101130005740.080828123@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
b7a2b39d9b
commit
822bc180a7
@ -8085,7 +8085,6 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
|
|||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only empty task groups can be destroyed; so we can speculatively
|
* Only empty task groups can be destroyed; so we can speculatively
|
||||||
@ -8095,7 +8094,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
list_del_leaf_cfs_rq(tg->cfs_rq[i]);
|
list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
|
||||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
}
|
}
|
||||||
#else /* !CONFG_FAIR_GROUP_SCHED */
|
#else /* !CONFG_FAIR_GROUP_SCHED */
|
||||||
|
Loading…
Reference in New Issue
Block a user