mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 16:46:23 +08:00
sched: fix calc_delta_asym()
calc_delta_asym() is supposed to do the same as calc_delta_fair() except linearly shrink the result for negative nice processes - this causes them to have a smaller preemption threshold so that they are more easily preempted. The problem is that for task groups se->load.weight is the per cpu share of the actual task group weight; take that into account. Also provide a debug switch to disable the asymmetry (which I still don't like - but it does greatly benefit some workloads) This would explain the interactivity issues reported against group scheduling. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a7be37ac8e
commit
c9c294a630
@ -430,6 +430,29 @@ calc_delta_asym(unsigned long delta, struct sched_entity *se)
|
|||||||
for_each_sched_entity(se) {
|
for_each_sched_entity(se) {
|
||||||
struct load_weight *se_lw = &se->load;
|
struct load_weight *se_lw = &se->load;
|
||||||
|
|
||||||
|
#ifdef CONFIG_FAIR_SCHED_GROUP
|
||||||
|
struct cfs_rq *cfs_rq = se->my_q;
|
||||||
|
struct task_group *tg = NULL
|
||||||
|
|
||||||
|
if (cfs_rq)
|
||||||
|
tg = cfs_rq->tg;
|
||||||
|
|
||||||
|
if (tg && tg->shares < NICE_0_LOAD) {
|
||||||
|
/*
|
||||||
|
* scale shares to what it would have been had
|
||||||
|
* tg->weight been NICE_0_LOAD:
|
||||||
|
*
|
||||||
|
* weight = 1024 * shares / tg->weight
|
||||||
|
*/
|
||||||
|
lw.weight *= se->load.weight;
|
||||||
|
lw.weight /= tg->shares;
|
||||||
|
|
||||||
|
lw.inv_weight = 0;
|
||||||
|
|
||||||
|
se_lw = &lw;
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
|
||||||
if (se->load.weight < NICE_0_LOAD)
|
if (se->load.weight < NICE_0_LOAD)
|
||||||
se_lw = &lw;
|
se_lw = &lw;
|
||||||
|
|
||||||
@ -1154,7 +1177,10 @@ static unsigned long wakeup_gran(struct sched_entity *se)
|
|||||||
* More easily preempt - nice tasks, while not making it harder for
|
* More easily preempt - nice tasks, while not making it harder for
|
||||||
* + nice tasks.
|
* + nice tasks.
|
||||||
*/
|
*/
|
||||||
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
|
if (sched_feat(ASYM_GRAN))
|
||||||
|
gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se);
|
||||||
|
else
|
||||||
|
gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
|
||||||
|
|
||||||
return gran;
|
return gran;
|
||||||
}
|
}
|
||||||
|
@ -7,3 +7,4 @@ SCHED_FEAT(CACHE_HOT_BUDDY, 1)
|
|||||||
SCHED_FEAT(SYNC_WAKEUPS, 1)
|
SCHED_FEAT(SYNC_WAKEUPS, 1)
|
||||||
SCHED_FEAT(HRTICK, 1)
|
SCHED_FEAT(HRTICK, 1)
|
||||||
SCHED_FEAT(DOUBLE_TICK, 0)
|
SCHED_FEAT(DOUBLE_TICK, 0)
|
||||||
|
SCHED_FEAT(ASYM_GRAN, 1)
|
||||||
|
Loading…
Reference in New Issue
Block a user