mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-20 19:23:57 +08:00
sched/numa: Reduce conflict between fbq_classify_rq() and migration
It is possible for fbq_classify_rq() to indicate that a CPU has tasks that should be moved to another NUMA node, but for migrate_improves_locality and migrate_degrades_locality to not identify those tasks. This patch always gives preference to preferred node evaluations, and only checks the number of faults when evaluating moves between two non-preferred nodes on a larger NUMA system. On a two node system, the number of faults is never evaluated. Either a task is about to be pulled off its preferred node, or migrated onto it. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: mgorman@suse.de Link: http://lkml.kernel.org/r/20150514225936.35b91717@annuminas.surriel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
80ed87c8a9
commit
c1ceac6276
@ -5663,10 +5663,15 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
/* Returns true if the destination node has incurred more faults */
|
||||
/*
|
||||
* Returns true if the destination node is the preferred node.
|
||||
* Needs to match fbq_classify_rq(): if there is a runnable task
|
||||
* that is not on its preferred node, we should identify it.
|
||||
*/
|
||||
static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
|
||||
{
|
||||
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
||||
unsigned long src_faults, dst_faults;
|
||||
int src_nid, dst_nid;
|
||||
|
||||
if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
|
||||
@ -5680,29 +5685,30 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
|
||||
if (src_nid == dst_nid)
|
||||
return false;
|
||||
|
||||
if (numa_group) {
|
||||
/* Task is already in the group's interleave set. */
|
||||
if (node_isset(src_nid, numa_group->active_nodes))
|
||||
return false;
|
||||
|
||||
/* Task is moving into the group's interleave set. */
|
||||
if (node_isset(dst_nid, numa_group->active_nodes))
|
||||
return true;
|
||||
|
||||
return group_faults(p, dst_nid) > group_faults(p, src_nid);
|
||||
}
|
||||
|
||||
/* Encourage migration to the preferred node. */
|
||||
if (dst_nid == p->numa_preferred_nid)
|
||||
return true;
|
||||
|
||||
return task_faults(p, dst_nid) > task_faults(p, src_nid);
|
||||
/* Migrating away from the preferred node is bad. */
|
||||
if (src_nid == p->numa_preferred_nid)
|
||||
return false;
|
||||
|
||||
if (numa_group) {
|
||||
src_faults = group_faults(p, src_nid);
|
||||
dst_faults = group_faults(p, dst_nid);
|
||||
} else {
|
||||
src_faults = task_faults(p, src_nid);
|
||||
dst_faults = task_faults(p, dst_nid);
|
||||
}
|
||||
|
||||
return dst_faults > src_faults;
|
||||
}
|
||||
|
||||
|
||||
static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
||||
{
|
||||
struct numa_group *numa_group = rcu_dereference(p->numa_group);
|
||||
unsigned long src_faults, dst_faults;
|
||||
int src_nid, dst_nid;
|
||||
|
||||
if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
|
||||
@ -5717,23 +5723,23 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
|
||||
if (src_nid == dst_nid)
|
||||
return false;
|
||||
|
||||
if (numa_group) {
|
||||
/* Task is moving within/into the group's interleave set. */
|
||||
if (node_isset(dst_nid, numa_group->active_nodes))
|
||||
return false;
|
||||
|
||||
/* Task is moving out of the group's interleave set. */
|
||||
if (node_isset(src_nid, numa_group->active_nodes))
|
||||
return true;
|
||||
|
||||
return group_faults(p, dst_nid) < group_faults(p, src_nid);
|
||||
}
|
||||
|
||||
/* Migrating away from the preferred node is always bad. */
|
||||
/* Migrating away from the preferred node is bad. */
|
||||
if (src_nid == p->numa_preferred_nid)
|
||||
return true;
|
||||
|
||||
return task_faults(p, dst_nid) < task_faults(p, src_nid);
|
||||
/* Encourage migration to the preferred node. */
|
||||
if (dst_nid == p->numa_preferred_nid)
|
||||
return false;
|
||||
|
||||
if (numa_group) {
|
||||
src_faults = group_faults(p, src_nid);
|
||||
dst_faults = group_faults(p, dst_nid);
|
||||
} else {
|
||||
src_faults = task_faults(p, src_nid);
|
||||
dst_faults = task_faults(p, dst_nid);
|
||||
}
|
||||
|
||||
return dst_faults < src_faults;
|
||||
}
|
||||
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user