s390/topology: move cpumasks away from stack

Make cpumasks static variables to avoid potential large stack
frames. There shouldn't be any concurrent callers since all current
callers are serialized with the cpu hotplug lock.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2021-02-17 08:15:00 +01:00 committed by Vasily Gorbik
parent f213e5502d
commit da6d2c289d

View File

@ -62,16 +62,16 @@ static struct mask_info drawer_info;
struct cpu_topology_s390 cpu_topology[NR_CPUS]; struct cpu_topology_s390 cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology); EXPORT_SYMBOL_GPL(cpu_topology);
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
{ {
cpumask_t mask; static cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
switch (topology_mode) { switch (topology_mode) {
case TOPOLOGY_MODE_HW: case TOPOLOGY_MODE_HW:
while (info) { while (info) {
if (cpumask_test_cpu(cpu, &info->mask)) { if (cpumask_test_cpu(cpu, &info->mask)) {
mask = info->mask; cpumask_copy(&mask, &info->mask);
break; break;
} }
info = info->next; info = info->next;
@ -89,23 +89,24 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
break; break;
} }
cpumask_and(&mask, &mask, cpu_online_mask); cpumask_and(&mask, &mask, cpu_online_mask);
return mask; cpumask_copy(dst, &mask);
} }
static cpumask_t cpu_thread_map(unsigned int cpu) static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
{ {
cpumask_t mask; static cpumask_t mask;
int i; int i;
cpumask_copy(&mask, cpumask_of(cpu)); cpumask_copy(&mask, cpumask_of(cpu));
if (topology_mode != TOPOLOGY_MODE_HW) if (topology_mode != TOPOLOGY_MODE_HW)
return mask; goto out;
cpu -= cpu % (smp_cpu_mtid + 1); cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++) for (i = 0; i <= smp_cpu_mtid; i++)
if (cpu_present(cpu + i)) if (cpu_present(cpu + i))
cpumask_set_cpu(cpu + i, &mask); cpumask_set_cpu(cpu + i, &mask);
cpumask_and(&mask, &mask, cpu_online_mask); cpumask_and(&mask, &mask, cpu_online_mask);
return mask; out:
cpumask_copy(dst, &mask);
} }
#define TOPOLOGY_CORE_BITS 64 #define TOPOLOGY_CORE_BITS 64
@ -250,10 +251,10 @@ void update_cpu_masks(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
topo = &cpu_topology[cpu]; topo = &cpu_topology[cpu];
topo->thread_mask = cpu_thread_map(cpu); cpu_thread_map(&topo->thread_mask, cpu);
topo->core_mask = cpu_group_map(&socket_info, cpu); cpu_group_map(&topo->core_mask, &socket_info, cpu);
topo->book_mask = cpu_group_map(&book_info, cpu); cpu_group_map(&topo->book_mask, &book_info, cpu);
topo->drawer_mask = cpu_group_map(&drawer_info, cpu); cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
topo->booted_cores = 0; topo->booted_cores = 0;
if (topology_mode != TOPOLOGY_MODE_HW) { if (topology_mode != TOPOLOGY_MODE_HW) {
id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu; id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;