sched/fair: Don't push cfs_bandwith slack timers forward

When a cfs_rq sleeps and returns its quota, we delay for 5ms before
waking any throttled cfs_rqs to coalesce with other cfs_rqs going to
sleep, as this has to be done outside of the rq lock we hold.

The current code waits for 5ms without any sleeps, instead of waiting
for 5ms from the first sleep, which can delay the unthrottle more than
we want. Switch this around so that we can't push this forward forever.

This requires an extra flag rather than using hrtimer_active, since we
need to start a new timer if the current one is in the process of
finishing.

Signed-off-by: Ben Segall <bsegall@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Xunlei Pang <xlpang@linux.alibaba.com>
Acked-by: Phil Auld <pauld@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/xm26a7euy6iq.fsf_-_@bsegall-linux.svl.corp.google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
bsegall@google.com 2019-06-06 10:21:01 -07:00 committed by Ingo Molnar
parent aacedf26fb
commit 66567fcbae
2 changed files with 11 additions and 4 deletions

View File

@ -4729,6 +4729,11 @@ static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
if (runtime_refresh_within(cfs_b, min_left)) if (runtime_refresh_within(cfs_b, min_left))
return; return;
/* don't push forwards an existing deferred unthrottle */
if (cfs_b->slack_started)
return;
cfs_b->slack_started = true;
hrtimer_start(&cfs_b->slack_timer, hrtimer_start(&cfs_b->slack_timer,
ns_to_ktime(cfs_bandwidth_slack_period), ns_to_ktime(cfs_bandwidth_slack_period),
HRTIMER_MODE_REL); HRTIMER_MODE_REL);
@ -4782,6 +4787,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */ /* confirm we're still not at a refresh boundary */
raw_spin_lock_irqsave(&cfs_b->lock, flags); raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->slack_started = false;
if (cfs_b->distribute_running) { if (cfs_b->distribute_running) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags); raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return; return;
@ -4945,6 +4951,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->distribute_running = 0; cfs_b->distribute_running = 0;
cfs_b->slack_started = false;
} }
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)

View File

@ -338,8 +338,10 @@ struct cfs_bandwidth {
u64 runtime_expires; u64 runtime_expires;
int expires_seq; int expires_seq;
short idle; u8 idle;
short period_active; u8 period_active;
u8 distribute_running;
u8 slack_started;
struct hrtimer period_timer; struct hrtimer period_timer;
struct hrtimer slack_timer; struct hrtimer slack_timer;
struct list_head throttled_cfs_rq; struct list_head throttled_cfs_rq;
@ -348,8 +350,6 @@ struct cfs_bandwidth {
int nr_periods; int nr_periods;
int nr_throttled; int nr_throttled;
u64 throttled_time; u64 throttled_time;
bool distribute_running;
#endif #endif
}; };