powerpc/numa: stub out numa_update_cpu_topology()

Previous changes have removed the code which sets bits in
cpu_associativity_changes_mask and thus it is never modifed at
runtime. From this we can reason that numa_update_cpu_topology()
always returns 0 without doing anything. Remove the body of
numa_update_cpu_topology() and remove all code which becomes
unreachable as a result.

Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200612051238.1007764-10-nathanl@linux.ibm.com
This commit is contained in:
Nathan Lynch 2020-06-12 00:12:29 -05:00 committed by Michael Ellerman
parent 9fb8b5fd1b
commit 893ec6461f

View File

@ -1122,14 +1122,6 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
struct topology_update_data {
struct topology_update_data *next;
unsigned int cpu;
int old_nid;
int new_nid;
};
static cpumask_t cpu_associativity_changes_mask;
static int topology_inited;
/*
@ -1219,192 +1211,9 @@ int find_and_online_cpu_nid(int cpu)
return new_nid;
}
/*
* Update the CPU maps and sysfs entries for a single CPU when its NUMA
* characteristics change. This function doesn't perform any locking and is
* only safe to call from stop_machine().
*/
static int update_cpu_topology(void *data)
{
struct topology_update_data *update;
unsigned long cpu;
if (!data)
return -EINVAL;
cpu = smp_processor_id();
for (update = data; update; update = update->next) {
int new_nid = update->new_nid;
if (cpu != update->cpu)
continue;
unmap_cpu_from_node(cpu);
map_cpu_to_node(cpu, new_nid);
set_cpu_numa_node(cpu, new_nid);
set_cpu_numa_mem(cpu, local_memory_node(new_nid));
vdso_getcpu_init();
}
return 0;
}
static int update_lookup_table(void *data)
{
struct topology_update_data *update;
if (!data)
return -EINVAL;
/*
* Upon topology update, the numa-cpu lookup table needs to be updated
* for all threads in the core, including offline CPUs, to ensure that
* future hotplug operations respect the cpu-to-node associativity
* properly.
*/
for (update = data; update; update = update->next) {
int nid, base, j;
nid = update->new_nid;
base = cpu_first_thread_sibling(update->cpu);
for (j = 0; j < threads_per_core; j++) {
update_numa_cpu_lookup_table(base + j, nid);
}
}
return 0;
}
/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
*
* cpus_locked says whether we already hold cpu_hotplug_lock.
*/
int numa_update_cpu_topology(bool cpus_locked)
{
unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud;
cpumask_t updated_cpus;
struct device *dev;
int weight, new_nid, i = 0;
if (topology_inited)
return 0;
weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight)
return 0;
updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
if (!updates)
return 0;
cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) {
/*
* If siblings aren't flagged for changes, updates list
* will be too short. Skip on this update and set for next
* update.
*/
if (!cpumask_subset(cpu_sibling_mask(cpu),
&cpu_associativity_changes_mask)) {
pr_info("Sibling bits not set for associativity "
"change, cpu%d\n", cpu);
cpumask_or(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask,
cpu_sibling_mask(cpu));
cpu = cpu_last_thread_sibling(cpu);
continue;
}
new_nid = find_and_online_cpu_nid(cpu);
if (new_nid == numa_cpu_lookup_table[cpu]) {
cpumask_andnot(&cpu_associativity_changes_mask,
&cpu_associativity_changes_mask,
cpu_sibling_mask(cpu));
dbg("Assoc chg gives same node %d for cpu%d\n",
new_nid, cpu);
cpu = cpu_last_thread_sibling(cpu);
continue;
}
for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
ud = &updates[i++];
ud->next = &updates[i];
ud->cpu = sibling;
ud->new_nid = new_nid;
ud->old_nid = numa_cpu_lookup_table[sibling];
cpumask_set_cpu(sibling, &updated_cpus);
}
cpu = cpu_last_thread_sibling(cpu);
}
/*
* Prevent processing of 'updates' from overflowing array
* where last entry filled in a 'next' pointer.
*/
if (i)
updates[i-1].next = NULL;
pr_debug("Topology update for the following CPUs:\n");
if (cpumask_weight(&updated_cpus)) {
for (ud = &updates[0]; ud; ud = ud->next) {
pr_debug("cpu %d moving from node %d "
"to %d\n", ud->cpu,
ud->old_nid, ud->new_nid);
}
}
/*
* In cases where we have nothing to update (because the updates list
* is too short or because the new topology is same as the old one),
* skip invoking update_cpu_topology() via stop-machine(). This is
* necessary (and not just a fast-path optimization) since stop-machine
* can end up electing a random CPU to run update_cpu_topology(), and
* thus trick us into setting up incorrect cpu-node mappings (since
* 'updates' is kzalloc()'ed).
*
* And for the similar reason, we will skip all the following updating.
*/
if (!cpumask_weight(&updated_cpus))
goto out;
if (cpus_locked)
stop_machine_cpuslocked(update_cpu_topology, &updates[0],
&updated_cpus);
else
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
/*
* Update the numa-cpu lookup table with the new mappings, even for
* offline CPUs. It is best to perform this update from the stop-
* machine context.
*/
if (cpus_locked)
stop_machine_cpuslocked(update_lookup_table, &updates[0],
cpumask_of(raw_smp_processor_id()));
else
stop_machine(update_lookup_table, &updates[0],
cpumask_of(raw_smp_processor_id()));
for (ud = &updates[0]; ud; ud = ud->next) {
unregister_cpu_under_node(ud->cpu, ud->old_nid);
register_cpu_under_node(ud->cpu, ud->new_nid);
dev = get_cpu_device(ud->cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
changed = 1;
}
out:
kfree(updates);
return changed;
return 0;
}
int arch_update_cpu_topology(void)