mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 20:48:49 +08:00
sched: Fix race on toggling cfs_bandwidth_used
When we transition cfs_bandwidth_used to false, any currently throttled groups will incorrectly return false from cfs_rq_throttled. While tg_set_cfs_bandwidth will unthrottle them eventually, currently running code (including at least dequeue_task_fair and distribute_cfs_runtime) will cause errors. Fix this by turning off cfs_bandwidth_used only after unthrottling all cfs_rqs. Tested: toggle bandwidth back and forth on a loaded cgroup. Caused crashes in minutes without the patch, hasn't crashed with it. Signed-off-by: Ben Segall <bsegall@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: pjt@google.com Link: http://lkml.kernel.org/r/20131016181611.22647.80365.stgit@sword-of-the-dawn.mtv.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ac9ff7997b
commit
1ee14e6c8c
@ -7436,7 +7436,12 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
|
||||
|
||||
runtime_enabled = quota != RUNTIME_INF;
|
||||
runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
|
||||
account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
|
||||
/*
|
||||
* If we need to toggle cfs_bandwidth_used, off->on must occur
|
||||
* before making related changes, and on->off must occur afterwards
|
||||
*/
|
||||
if (runtime_enabled && !runtime_was_enabled)
|
||||
cfs_bandwidth_usage_inc();
|
||||
raw_spin_lock_irq(&cfs_b->lock);
|
||||
cfs_b->period = ns_to_ktime(period);
|
||||
cfs_b->quota = quota;
|
||||
@ -7462,6 +7467,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
|
||||
unthrottle_cfs_rq(cfs_rq);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
if (runtime_was_enabled && !runtime_enabled)
|
||||
cfs_bandwidth_usage_dec();
|
||||
out_unlock:
|
||||
mutex_unlock(&cfs_constraints_mutex);
|
||||
|
||||
|
@ -2845,13 +2845,14 @@ static inline bool cfs_bandwidth_used(void)
|
||||
return static_key_false(&__cfs_bandwidth_used);
|
||||
}
|
||||
|
||||
void account_cfs_bandwidth_used(int enabled, int was_enabled)
|
||||
void cfs_bandwidth_usage_inc(void)
|
||||
{
|
||||
/* only need to count groups transitioning between enabled/!enabled */
|
||||
if (enabled && !was_enabled)
|
||||
static_key_slow_inc(&__cfs_bandwidth_used);
|
||||
else if (!enabled && was_enabled)
|
||||
static_key_slow_dec(&__cfs_bandwidth_used);
|
||||
static_key_slow_inc(&__cfs_bandwidth_used);
|
||||
}
|
||||
|
||||
void cfs_bandwidth_usage_dec(void)
|
||||
{
|
||||
static_key_slow_dec(&__cfs_bandwidth_used);
|
||||
}
|
||||
#else /* HAVE_JUMP_LABEL */
|
||||
static bool cfs_bandwidth_used(void)
|
||||
@ -2859,7 +2860,8 @@ static bool cfs_bandwidth_used(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
|
||||
void cfs_bandwidth_usage_inc(void) {}
|
||||
void cfs_bandwidth_usage_dec(void) {}
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
|
||||
/*
|
||||
|
@ -1352,7 +1352,8 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
|
||||
extern void init_cfs_rq(struct cfs_rq *cfs_rq);
|
||||
extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
|
||||
|
||||
extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
|
||||
extern void cfs_bandwidth_usage_inc(void);
|
||||
extern void cfs_bandwidth_usage_dec(void);
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
enum rq_nohz_flag_bits {
|
||||
|
Loading…
Reference in New Issue
Block a user