mobility/numa: Ensure that numa_update_cpu_topology() can not be
entered multiple times concurrently.  It may be accessed through
many different paths / concurrent work functions, and the lock
ordering may be difficult to ensure otherwise.

Signed-off-by: Michael Bringmann <m...@linux.vnet.ibm.com>
---
 arch/powerpc/mm/numa.c |    9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index a789d57..b22e27a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1079,6 +1079,7 @@ struct topology_update_data {
 static int topology_timer_secs = 1;
 static int topology_inited;
 static int topology_update_needed;
+static struct mutex topology_update_lock;
 
 /*
  * Change polling interval for associativity changes.
@@ -1320,6 +1321,11 @@ int numa_update_cpu_topology(bool cpus_locked)
        if (!updates)
                return 0;
 
+       if (!mutex_trylock(&topology_update_lock)) {
+               kfree(updates);
+               return 0;
+       }
+
        cpumask_clear(&updated_cpus);
 
        for_each_cpu(cpu, &cpu_associativity_changes_mask) {
@@ -1424,6 +1430,7 @@ int numa_update_cpu_topology(bool cpus_locked)
 out:
        kfree(updates);
        topology_update_needed = 0;
+       mutex_unlock(&topology_update_lock);
        return changed;
 }
 
@@ -1598,6 +1605,8 @@ static ssize_t topology_write(struct file *file, const 
char __user *buf,
 
 static int topology_update_init(void)
 {
+       mutex_init(&topology_update_lock);
+
        /* Do not poll for changes if disabled at boot */
        if (topology_updates_enabled)
                start_topology_update();

Reply via email to