2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00

sched/fair: More accurate reweight_entity()

When a (group) entity changes it's weight we should instantly change
its load_avg and propagate that change into the sums it is part of.
Because we use these values to predict future behaviour and are not
interested in its historical value.

Without this change, the change in load would need to propagate
through the average, by which time it could again have changed etc..
always chasing itself.

With this change, the cfs_rq load_avg sum will more accurately reflect
the current runnable and expected return of blocked load.

Reported-by: Paul Turner <pjt@google.com>
[josef: compile fix !SMP || !FAIR_GROUP]
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Peter Zijlstra 2017-05-06 16:11:34 +02:00 committed by Ingo Molnar
parent 8d5b9025f9
commit 840c5abca4

View File

@ -2886,12 +2886,22 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
if (cfs_rq->curr == se) if (cfs_rq->curr == se)
update_curr(cfs_rq); update_curr(cfs_rq);
account_entity_dequeue(cfs_rq, se); account_entity_dequeue(cfs_rq, se);
dequeue_runnable_load_avg(cfs_rq, se);
} }
dequeue_load_avg(cfs_rq, se);
update_load_set(&se->load, weight); update_load_set(&se->load, weight);
if (se->on_rq) #ifdef CONFIG_SMP
se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum,
LOAD_AVG_MAX - 1024 + se->avg.period_contrib);
#endif
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
account_entity_enqueue(cfs_rq, se); account_entity_enqueue(cfs_rq, se);
enqueue_runnable_load_avg(cfs_rq, se);
}
} }
static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);