mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-16 07:24:39 +08:00
sched/deadline: Improve admission control for asymmetric CPU capacities
The current SCHED_DEADLINE (DL) admission control ensures that sum of reserved CPU bandwidth < x * M where x = /proc/sys/kernel/sched_rt_{runtime,period}_us M = # CPUs in root domain. DL admission control works well for homogeneous systems where the capacity of all CPUs are equal (1024). I.e. bounded tardiness for DL and non-starvation of non-DL tasks is guaranteed. But on heterogeneous systems where capacity of CPUs are different it could fail by over-allocating CPU time on smaller capacity CPUs. On an Arm big.LITTLE/DynamIQ system DL tasks can easily starve other tasks making it unusable. Fix this by explicitly considering the CPU capacity in the DL admission test by replacing M with the root domain CPU capacity sum. Signed-off-by: Luca Abeni <luca.abeni@santannapisa.it> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lkml.kernel.org/r/20200520134243.19352-4-dietmar.eggemann@arm.com
This commit is contained in:
parent
fc9dc69847
commit
60ffd5edc5
@ -2590,11 +2590,12 @@ void sched_dl_do_global(void)
|
||||
int sched_dl_overflow(struct task_struct *p, int policy,
|
||||
const struct sched_attr *attr)
|
||||
{
|
||||
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
|
||||
u64 period = attr->sched_period ?: attr->sched_deadline;
|
||||
u64 runtime = attr->sched_runtime;
|
||||
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
|
||||
int cpus, err = -1;
|
||||
int cpus, err = -1, cpu = task_cpu(p);
|
||||
struct dl_bw *dl_b = dl_bw_of(cpu);
|
||||
unsigned long cap;
|
||||
|
||||
if (attr->sched_flags & SCHED_FLAG_SUGOV)
|
||||
return 0;
|
||||
@ -2609,15 +2610,17 @@ int sched_dl_overflow(struct task_struct *p, int policy,
|
||||
* allocated bandwidth of the container.
|
||||
*/
|
||||
raw_spin_lock(&dl_b->lock);
|
||||
cpus = dl_bw_cpus(task_cpu(p));
|
||||
cpus = dl_bw_cpus(cpu);
|
||||
cap = dl_bw_capacity(cpu);
|
||||
|
||||
if (dl_policy(policy) && !task_has_dl_policy(p) &&
|
||||
!__dl_overflow(dl_b, cpus, 0, new_bw)) {
|
||||
!__dl_overflow(dl_b, cap, 0, new_bw)) {
|
||||
if (hrtimer_active(&p->dl.inactive_timer))
|
||||
__dl_sub(dl_b, p->dl.dl_bw, cpus);
|
||||
__dl_add(dl_b, new_bw, cpus);
|
||||
err = 0;
|
||||
} else if (dl_policy(policy) && task_has_dl_policy(p) &&
|
||||
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
|
||||
!__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
|
||||
/*
|
||||
* XXX this is slightly incorrect: when the task
|
||||
* utilization decreases, we should delay the total
|
||||
@ -2772,19 +2775,19 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
|
||||
#ifdef CONFIG_SMP
|
||||
int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
|
||||
{
|
||||
unsigned long flags, cap;
|
||||
unsigned int dest_cpu;
|
||||
struct dl_bw *dl_b;
|
||||
bool overflow;
|
||||
int cpus, ret;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
|
||||
|
||||
rcu_read_lock_sched();
|
||||
dl_b = dl_bw_of(dest_cpu);
|
||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||
cpus = dl_bw_cpus(dest_cpu);
|
||||
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
|
||||
cap = dl_bw_capacity(dest_cpu);
|
||||
overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
|
||||
if (overflow) {
|
||||
ret = -EBUSY;
|
||||
} else {
|
||||
@ -2794,6 +2797,8 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo
|
||||
* We will free resources in the source root_domain
|
||||
* later on (see set_cpus_allowed_dl()).
|
||||
*/
|
||||
int cpus = dl_bw_cpus(dest_cpu);
|
||||
|
||||
__dl_add(dl_b, p->dl.dl_bw, cpus);
|
||||
ret = 0;
|
||||
}
|
||||
@ -2826,16 +2831,15 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
||||
|
||||
bool dl_cpu_busy(unsigned int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long flags, cap;
|
||||
struct dl_bw *dl_b;
|
||||
bool overflow;
|
||||
int cpus;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
dl_b = dl_bw_of(cpu);
|
||||
raw_spin_lock_irqsave(&dl_b->lock, flags);
|
||||
cpus = dl_bw_cpus(cpu);
|
||||
overflow = __dl_overflow(dl_b, cpus, 0, 0);
|
||||
cap = dl_bw_capacity(cpu);
|
||||
overflow = __dl_overflow(dl_b, cap, 0, 0);
|
||||
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
|
@ -310,11 +310,11 @@ void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
|
||||
__dl_update(dl_b, -((s32)tsk_bw / cpus));
|
||||
}
|
||||
|
||||
static inline
|
||||
bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
|
||||
static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
|
||||
u64 old_bw, u64 new_bw)
|
||||
{
|
||||
return dl_b->bw != -1 &&
|
||||
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
|
||||
cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
|
||||
}
|
||||
|
||||
extern void init_dl_bw(struct dl_bw *dl_b);
|
||||
|
Loading…
Reference in New Issue
Block a user