Skip to content

Commit c469757

Browse files
srikardmpe
authored andcommitted
powerpc/smp: Dynamically build Powerpc topology
Currently there are four Powerpc specific sched topologies. These are all statically defined. However not all these topologies are used by all Powerpc systems. To avoid unnecessary degenerations by the scheduler, masks and flags are compared. However if the sched topologies are build dynamically then the code is simpler and there are greater chances of avoiding degenerations. Note: Even X86 builds its sched topologies dynamically and proposed changes are very similar to the way X86 is building its topologies. Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20231214180720.310852-6-srikar@linux.vnet.ibm.com
1 parent 0e93f1c commit c469757

1 file changed

Lines changed: 28 additions & 50 deletions

File tree

arch/powerpc/kernel/smp.c

Lines changed: 28 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -93,15 +93,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
9393
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
9494
EXPORT_SYMBOL_GPL(has_big_cores);
9595

96-
enum {
97-
#ifdef CONFIG_SCHED_SMT
98-
smt_idx,
99-
#endif
100-
cache_idx,
101-
mc_idx,
102-
die_idx,
103-
};
104-
10596
#define MAX_THREAD_LIST_SIZE 8
10697
#define THREAD_GROUP_SHARE_L1 1
10798
#define THREAD_GROUP_SHARE_L2_L3 2
@@ -1067,16 +1058,6 @@ static const struct cpumask *cpu_mc_mask(int cpu)
10671058
return cpu_coregroup_mask(cpu);
10681059
}
10691060

1070-
static struct sched_domain_topology_level powerpc_topology[] = {
1071-
#ifdef CONFIG_SCHED_SMT
1072-
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1073-
#endif
1074-
{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1075-
{ cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC) },
1076-
{ cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG) },
1077-
{ NULL, },
1078-
};
1079-
10801061
static int __init init_big_cores(void)
10811062
{
10821063
int cpu;
@@ -1704,46 +1685,45 @@ void start_secondary(void *unused)
17041685
BUG();
17051686
}
17061687

1707-
static void __init fixup_topology(void)
1688+
static struct sched_domain_topology_level powerpc_topology[6];
1689+
1690+
static void __init build_sched_topology(void)
17081691
{
1709-
int i;
1692+
int i = 0;
17101693

17111694
if (is_shared_processor() && has_big_cores)
17121695
static_branch_enable(&splpar_asym_pack);
17131696

17141697
#ifdef CONFIG_SCHED_SMT
17151698
if (has_big_cores) {
17161699
pr_info("Big cores detected but using small core scheduling\n");
1717-
powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1700+
powerpc_topology[i++] = (struct sched_domain_topology_level){
1701+
smallcore_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1702+
};
1703+
} else {
1704+
powerpc_topology[i++] = (struct sched_domain_topology_level){
1705+
cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT)
1706+
};
17181707
}
17191708
#endif
1709+
if (shared_caches) {
1710+
powerpc_topology[i++] = (struct sched_domain_topology_level){
1711+
shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE)
1712+
};
1713+
}
1714+
if (has_coregroup_support()) {
1715+
powerpc_topology[i++] = (struct sched_domain_topology_level){
1716+
cpu_mc_mask, powerpc_shared_proc_flags, SD_INIT_NAME(MC)
1717+
};
1718+
}
1719+
powerpc_topology[i++] = (struct sched_domain_topology_level){
1720+
cpu_cpu_mask, powerpc_shared_proc_flags, SD_INIT_NAME(PKG)
1721+
};
17201722

1721-
if (!has_coregroup_support())
1722-
powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1723-
1724-
/*
1725-
* Try to consolidate topology levels here instead of
1726-
* allowing scheduler to degenerate.
1727-
* - Dont consolidate if masks are different.
1728-
* - Dont consolidate if sd_flags exists and are different.
1729-
*/
1730-
for (i = 1; i <= die_idx; i++) {
1731-
if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1732-
continue;
1733-
1734-
if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1735-
powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1736-
continue;
1737-
1738-
if (!powerpc_topology[i - 1].sd_flags)
1739-
powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1723+
/* There must be one trailing NULL entry left. */
1724+
BUG_ON(i >= ARRAY_SIZE(powerpc_topology) - 1);
17401725

1741-
powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1742-
powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1743-
#ifdef CONFIG_SCHED_DEBUG
1744-
powerpc_topology[i].name = powerpc_topology[i + 1].name;
1745-
#endif
1746-
}
1726+
set_sched_topology(powerpc_topology);
17471727
}
17481728

17491729
void __init smp_cpus_done(unsigned int max_cpus)
@@ -1758,9 +1738,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
17581738
smp_ops->bringup_done();
17591739

17601740
dump_numa_cpu_topology();
1761-
1762-
fixup_topology();
1763-
set_sched_topology(powerpc_topology);
1741+
build_sched_topology();
17641742
}
17651743

17661744
/*

0 commit comments

Comments
 (0)