From 3bd65a80affb9768b91f03c56dba46ee79525f9b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 7 Apr 2011 14:09:54 +0200 Subject: [PATCH] sched: Simplify NODE/ALLNODES domain creation Don't treat ALLNODES/NODE different for difference's sake. Simply always create the ALLNODES domain and let the sd_degenerate() checks kill it when its redundant. This simplifies the code flow. Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Nick Piggin Cc: Linus Torvalds Cc: Andrew Morton Link: http://lkml.kernel.org/r/20110407122942.455464579@chello.nl Signed-off-by: Ingo Molnar --- kernel/sched.c | 44 ++++++++++++++++++++++++-------------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 72c194c55c31..d395fe5493c9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6838,9 +6838,6 @@ struct sd_data { }; struct s_data { -#ifdef CONFIG_NUMA - int sd_allnodes; -#endif cpumask_var_t nodemask; cpumask_var_t send_covered; struct sched_domain ** __percpu sd; @@ -7112,30 +7109,35 @@ static void claim_allocations(int cpu, struct sched_domain *sd) } } -static struct sched_domain *__build_numa_sched_domains(struct s_data *d, - const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i) +static struct sched_domain *__build_allnodes_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) { struct sched_domain *sd = NULL; #ifdef CONFIG_NUMA - struct sched_domain *parent; - - d->sd_allnodes = 0; - if (cpumask_weight(cpu_map) > - SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) { - sd = sd_init_ALLNODES(d, i); - set_domain_attribute(sd, attr); - cpumask_copy(sched_domain_span(sd), cpu_map); - d->sd_allnodes = 1; - } - parent = sd; - - sd = sd_init_NODE(d, i); + sd = sd_init_ALLNODES(d, i); set_domain_attribute(sd, attr); - sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); + cpumask_copy(sched_domain_span(sd), cpu_map); sd->parent = parent; if (parent) parent->child = sd; +#endif + return sd; +} + +static struct sched_domain *__build_node_sched_domain(struct s_data *d, + const struct cpumask *cpu_map, struct sched_domain_attr *attr, + struct sched_domain *parent, int i) +{ + struct sched_domain *sd = NULL; +#ifdef CONFIG_NUMA + sd = sd_init_NODE(d, i); + set_domain_attribute(sd, attr); + sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map); + sd->parent = parent; + if (parent) + parent->child = sd; #endif return sd; } @@ -7220,7 +7222,9 @@ static int build_sched_domains(const struct cpumask *cpu_map, cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); - sd = __build_numa_sched_domains(&d, cpu_map, attr, i); + sd = NULL; + sd = __build_allnodes_sched_domain(&d, cpu_map, attr, sd, i); + sd = __build_node_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i); sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);