2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 20:53:53 +08:00

sched/numa: Use similar logic to the load balancer for moving between domains with spare capacity

The standard load balancer generally tries to keep the number of running
tasks or idle CPUs balanced between NUMA domains. The NUMA balancer allows
tasks to move if there is spare capacity but this causes a conflict and
utilisation between NUMA nodes gets badly skewed. This patch uses similar
logic between the NUMA balancer and load balancer when deciding if a task
migrating to its preferred node can use an idle CPU.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Hillf Danton <hdanton@sina.com>
Link: https://lore.kernel.org/r/20200224095223.13361-7-mgorman@techsingularity.net
This commit is contained in:
Mel Gorman 2020-02-24 09:52:16 +00:00 committed by Ingo Molnar
parent 6499b1b2dd
commit fb86f5b211

View File

@ -1520,6 +1520,7 @@ struct task_numa_env {
static unsigned long cpu_load(struct rq *rq); static unsigned long cpu_load(struct rq *rq);
static unsigned long cpu_util(int cpu); static unsigned long cpu_util(int cpu);
static inline long adjust_numa_imbalance(int imbalance, int src_nr_running);
static inline enum static inline enum
numa_type numa_classify(unsigned int imbalance_pct, numa_type numa_classify(unsigned int imbalance_pct,
@ -1594,11 +1595,6 @@ static bool load_too_imbalanced(long src_load, long dst_load,
long orig_src_load, orig_dst_load; long orig_src_load, orig_dst_load;
long src_capacity, dst_capacity; long src_capacity, dst_capacity;
/* If dst node has spare capacity, there is no real load imbalance */
if (env->dst_stats.node_type == node_has_spare)
return false;
/* /*
* The load is corrected for the CPU capacity available on each node. * The load is corrected for the CPU capacity available on each node.
* *
@ -1757,19 +1753,42 @@ unlock:
static void task_numa_find_cpu(struct task_numa_env *env, static void task_numa_find_cpu(struct task_numa_env *env,
long taskimp, long groupimp) long taskimp, long groupimp)
{ {
long src_load, dst_load, load;
bool maymove = false; bool maymove = false;
int cpu; int cpu;
load = task_h_load(env->p);
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
/* /*
* If the improvement from just moving env->p direction is better * If dst node has spare capacity, then check if there is an
* than swapping tasks around, check if a move is possible. * imbalance that would be overruled by the load balancer.
*/ */
maymove = !load_too_imbalanced(src_load, dst_load, env); if (env->dst_stats.node_type == node_has_spare) {
unsigned int imbalance;
int src_running, dst_running;
/*
* Would movement cause an imbalance? Note that if src has
* more running tasks that the imbalance is ignored as the
* move improves the imbalance from the perspective of the
* CPU load balancer.
* */
src_running = env->src_stats.nr_running - 1;
dst_running = env->dst_stats.nr_running + 1;
imbalance = max(0, dst_running - src_running);
imbalance = adjust_numa_imbalance(imbalance, src_running);
/* Use idle CPU if there is no imbalance */
if (!imbalance)
maymove = true;
} else {
long src_load, dst_load, load;
/*
* If the improvement from just moving env->p direction is better
* than swapping tasks around, check if a move is possible.
*/
load = task_h_load(env->p);
dst_load = env->dst_stats.load + load;
src_load = env->src_stats.load - load;
maymove = !load_too_imbalanced(src_load, dst_load, env);
}
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */ /* Skip this CPU if the source task cannot migrate */
@ -8694,6 +8713,21 @@ next_group:
} }
} }
static inline long adjust_numa_imbalance(int imbalance, int src_nr_running)
{
unsigned int imbalance_min;
/*
* Allow a small imbalance based on a simple pair of communicating
* tasks that remain local when the source domain is almost idle.
*/
imbalance_min = 2;
if (src_nr_running <= imbalance_min)
return 0;
return imbalance;
}
/** /**
* calculate_imbalance - Calculate the amount of imbalance present within the * calculate_imbalance - Calculate the amount of imbalance present within the
* groups of a given sched_domain during load balance. * groups of a given sched_domain during load balance.
@ -8790,24 +8824,9 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
} }
/* Consider allowing a small imbalance between NUMA groups */ /* Consider allowing a small imbalance between NUMA groups */
if (env->sd->flags & SD_NUMA) { if (env->sd->flags & SD_NUMA)
unsigned int imbalance_min; env->imbalance = adjust_numa_imbalance(env->imbalance,
busiest->sum_nr_running);
/*
* Compute an allowed imbalance based on a simple
* pair of communicating tasks that should remain
* local and ignore them.
*
* NOTE: Generally this would have been based on
* the domain size and this was evaluated. However,
* the benefit is similar across a range of workloads
* and machines but scaling by the domain size adds
* the risk that lower domains have to be rebalanced.
*/
imbalance_min = 2;
if (busiest->sum_nr_running <= imbalance_min)
env->imbalance = 0;
}
return; return;
} }