From 822bc180a7f7a7bc5fcaaea195f41b487cc8cae8 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Mon, 29 Nov 2010 16:55:40 -0800 Subject: [PATCH] sched: Fix unregister_fair_sched_group() In the flipping and flopping between calling unregister_fair_sched_group() on a per-cpu versus per-group basis we ended up in a bad state. Remove from the list for the passed cpu as opposed to some arbitrary index. ( This fixes explosions w/ autogroup as well as a group creation/destruction stress test. ) Reported-by: Stephen Rothwell Signed-off-by: Paul Turner Cc: Peter Zijlstra Cc: Mike Galbraith LKML-Reference: <20101130005740.080828123@google.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 35a6373f1265..66ef5790d932 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8085,7 +8085,6 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long flags; - int i; /* * Only empty task groups can be destroyed; so we can speculatively @@ -8095,7 +8094,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) return; raw_spin_lock_irqsave(&rq->lock, flags); - list_del_leaf_cfs_rq(tg->cfs_rq[i]); + list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); raw_spin_unlock_irqrestore(&rq->lock, flags); } #else /* !CONFG_FAIR_GROUP_SCHED */