mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-17 07:54:54 +08:00
powerpc/smp: Dynamically build Powerpc topology
Currently there are four Powerpc specific sched topologies. These are all statically defined. However not all these topologies are used by all Powerpc systems. To avoid unnecessary degenerations by the scheduler, masks and flags are compared. However if the sched topologies are build dynamically then the code is simpler and there are greater chances of avoiding degenerations. Note: Even X86 builds its sched topologies dynamically and proposed changes are very similar to the way X86 is building its topologies. Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231214180720.310852-6-srikar@linux.vnet.ibm.com
This commit is contained in:
parent
0e93f1c780
commit
c46975715f
@ -93,15 +93,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
|
||||
EXPORT_SYMBOL_GPL(has_big_cores);
|
||||
|
||||
enum {
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
smt_idx,
|
||||
#endif
|
||||
cache_idx,
|
||||
mc_idx,
|
||||
die_idx,
|
||||
};
|
||||
|
||||
#define MAX_THREAD_LIST_SIZE 8
|
||||
#define THREAD_GROUP_SHARE_L1 1
|
||||
#define THREAD_GROUP_SHARE_L2_L3 2
|
||||
@ -1067,16 +1058,6 @@ static const struct cpumask *cpu_mc_mask(int cpu)
|
||||
return cpu_coregroup_mask(cpu);
|
||||
}
|
||||
|
||||
static struct sched_domain_topology_level powerpc_topology[] = {
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
|
||||
#endif
|
||||
{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
|
||||
{ cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC) },
|
||||
{ cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG) },
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
static int __init init_big_cores(void)
|
||||
{
|
||||
int cpu;
|
||||
@ -1704,9 +1685,11 @@ void start_secondary(void *unused)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void __init fixup_topology(void)
|
||||
static struct sched_domain_topology_level powerpc_topology[6];
|
||||
|
||||
static void __init build_sched_topology(void)
|
||||
{
|
||||
int i;
|
||||
int i = 0;
|
||||
|
||||
if (is_shared_processor() && has_big_cores)
|
||||
static_branch_enable(&splpar_asym_pack);
|
||||
@ -1714,36 +1697,33 @@ static void __init fixup_topology(void)
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
if (has_big_cores) {
|
||||
pr_info("Big cores detected but using small core scheduling\n");
|
||||
powerpc_topology[smt_idx].mask = smallcore_smt_mask;
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
|
||||
};
|
||||
} else {
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!has_coregroup_support())
|
||||
powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
|
||||
|
||||
/*
|
||||
* Try to consolidate topology levels here instead of
|
||||
* allowing scheduler to degenerate.
|
||||
* - Dont consolidate if masks are different.
|
||||
* - Dont consolidate if sd_flags exists and are different.
|
||||
*/
|
||||
for (i = 1; i <= die_idx; i++) {
|
||||
if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
|
||||
continue;
|
||||
|
||||
if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
|
||||
powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
|
||||
continue;
|
||||
|
||||
if (!powerpc_topology[i - 1].sd_flags)
|
||||
powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
|
||||
|
||||
powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
|
||||
powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
powerpc_topology[i].name = powerpc_topology[i + 1].name;
|
||||
#endif
|
||||
if (shared_caches) {
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
|
||||
};
|
||||
}
|
||||
if (has_coregroup_support()) {
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
|
||||
};
|
||||
}
|
||||
powerpc_topology[i++] = (struct sched_domain_topology_level){
|
||||
cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
|
||||
};
|
||||
|
||||
/* There must be one trailing NULL entry left. */
|
||||
BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
|
||||
|
||||
set_sched_topology(powerpc_topology);
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
@ -1758,9 +1738,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
smp_ops->bringup_done();
|
||||
|
||||
dump_numa_cpu_topology();
|
||||
|
||||
fixup_topology();
|
||||
set_sched_topology(powerpc_topology);
|
||||
build_sched_topology();
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user