mobility/numa: Ensure that numa_update_cpu_topology() can not be entered multiple times concurrently. It may be accessed through many different paths through the code. Without some protection against multiple entry, the code may acquire and update associativity information from the PHYP in multiple threads, and apply changes separately in each thread. Applying the changes concurrently may perform considerable work multiple times, but more importantly, may acquire locks in other modules concurrently and end up blocking each of the competing threads. This patch allows only the first entrant to the code to execute the operation and to recognize any CPU topology changes.
Signed-off-by: Michael Bringmann <m...@linux.vnet.ibm.com> --- Changes in patch: -- Added information to description. --- arch/powerpc/mm/numa.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index a789d57..b22e27a 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1079,6 +1079,7 @@ struct topology_update_data { static int topology_timer_secs = 1; static int topology_inited; static int topology_update_needed; +static struct mutex topology_update_lock; /* * Change polling interval for associativity changes. @@ -1320,6 +1321,11 @@ int numa_update_cpu_topology(bool cpus_locked) if (!updates) return 0; + if (!mutex_trylock(&topology_update_lock)) { + kfree(updates); + return 0; + } + cpumask_clear(&updated_cpus); for_each_cpu(cpu, &cpu_associativity_changes_mask) { @@ -1424,6 +1430,7 @@ int numa_update_cpu_topology(bool cpus_locked) out: kfree(updates); topology_update_needed = 0; + mutex_unlock(&topology_update_lock); return changed; } @@ -1598,6 +1605,8 @@ static ssize_t topology_write(struct file *file, const char __user *buf, static int topology_update_init(void) { + mutex_init(&topology_update_lock); + /* Do not poll for changes if disabled at boot */ if (topology_updates_enabled) start_topology_update();