mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 17:53:56 +08:00
sched/fair: Add per-CPU min capacity to sched_group_capacity
struct sched_group_capacity currently represents the compute capacity sum of all CPUs in the sched_group. Unless it is divided by the group_weight to get the average capacity per CPU, it hides differences in CPU capacity for mixed capacity systems (e.g. high RT/IRQ utilization or ARM big.LITTLE). But even the average may not be sufficient if the group covers CPUs of different capacities. Instead, by extending struct sched_group_capacity to indicate min per-CPU capacity in the group a suitable group for a given task utilization can more easily be found such that CPUs with reduced capacity can be avoided for tasks with high utilization (not implemented by this patch). Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dietmar.eggemann@arm.com Cc: freedom.tan@mediatek.com Cc: keita.kobayashi.ym@renesas.com Cc: mgalbraith@suse.de Cc: sgurrappadi@nvidia.com Cc: vincent.guittot@linaro.org Cc: yuyang.du@intel.com Link: http://lkml.kernel.org/r/1476452472-24740-4-git-send-email-morten.rasmussen@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6a0b19c0f3
commit
bf475ce0a3
@ -5708,7 +5708,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|||||||
printk(KERN_CONT " %*pbl",
|
printk(KERN_CONT " %*pbl",
|
||||||
cpumask_pr_args(sched_group_cpus(group)));
|
cpumask_pr_args(sched_group_cpus(group)));
|
||||||
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
|
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
|
||||||
printk(KERN_CONT " (cpu_capacity = %d)",
|
printk(KERN_CONT " (cpu_capacity = %lu)",
|
||||||
group->sgc->capacity);
|
group->sgc->capacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6185,6 +6185,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|||||||
* die on a /0 trap.
|
* die on a /0 trap.
|
||||||
*/
|
*/
|
||||||
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
|
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
|
||||||
|
sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure the first group of this domain contains the
|
* Make sure the first group of this domain contains the
|
||||||
|
@ -6909,13 +6909,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
|||||||
|
|
||||||
cpu_rq(cpu)->cpu_capacity = capacity;
|
cpu_rq(cpu)->cpu_capacity = capacity;
|
||||||
sdg->sgc->capacity = capacity;
|
sdg->sgc->capacity = capacity;
|
||||||
|
sdg->sgc->min_capacity = capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
void update_group_capacity(struct sched_domain *sd, int cpu)
|
void update_group_capacity(struct sched_domain *sd, int cpu)
|
||||||
{
|
{
|
||||||
struct sched_domain *child = sd->child;
|
struct sched_domain *child = sd->child;
|
||||||
struct sched_group *group, *sdg = sd->groups;
|
struct sched_group *group, *sdg = sd->groups;
|
||||||
unsigned long capacity;
|
unsigned long capacity, min_capacity;
|
||||||
unsigned long interval;
|
unsigned long interval;
|
||||||
|
|
||||||
interval = msecs_to_jiffies(sd->balance_interval);
|
interval = msecs_to_jiffies(sd->balance_interval);
|
||||||
@ -6928,6 +6929,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
capacity = 0;
|
capacity = 0;
|
||||||
|
min_capacity = ULONG_MAX;
|
||||||
|
|
||||||
if (child->flags & SD_OVERLAP) {
|
if (child->flags & SD_OVERLAP) {
|
||||||
/*
|
/*
|
||||||
@ -6952,11 +6954,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
|||||||
*/
|
*/
|
||||||
if (unlikely(!rq->sd)) {
|
if (unlikely(!rq->sd)) {
|
||||||
capacity += capacity_of(cpu);
|
capacity += capacity_of(cpu);
|
||||||
continue;
|
} else {
|
||||||
|
sgc = rq->sd->groups->sgc;
|
||||||
|
capacity += sgc->capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
sgc = rq->sd->groups->sgc;
|
min_capacity = min(capacity, min_capacity);
|
||||||
capacity += sgc->capacity;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -6966,12 +6969,16 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
|||||||
|
|
||||||
group = child->groups;
|
group = child->groups;
|
||||||
do {
|
do {
|
||||||
capacity += group->sgc->capacity;
|
struct sched_group_capacity *sgc = group->sgc;
|
||||||
|
|
||||||
|
capacity += sgc->capacity;
|
||||||
|
min_capacity = min(sgc->min_capacity, min_capacity);
|
||||||
group = group->next;
|
group = group->next;
|
||||||
} while (group != child->groups);
|
} while (group != child->groups);
|
||||||
}
|
}
|
||||||
|
|
||||||
sdg->sgc->capacity = capacity;
|
sdg->sgc->capacity = capacity;
|
||||||
|
sdg->sgc->min_capacity = min_capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -892,7 +892,8 @@ struct sched_group_capacity {
|
|||||||
* CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
|
* CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
|
||||||
* for a single CPU.
|
* for a single CPU.
|
||||||
*/
|
*/
|
||||||
unsigned int capacity;
|
unsigned long capacity;
|
||||||
|
unsigned long min_capacity; /* Min per-CPU capacity in group */
|
||||||
unsigned long next_update;
|
unsigned long next_update;
|
||||||
int imbalance; /* XXX unrelated to capacity but shared group state */
|
int imbalance; /* XXX unrelated to capacity but shared group state */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user