In start_secondary, even if shared_cache was already set, system does a
redundant match for cpumask. This redundant check can be removed by
checking if shared_cache is already set.

While here, localize the sibling_mask variable to within the if
condition.

Cc: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
Cc: LKML <linux-ker...@vger.kernel.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Nicholas Piggin <npig...@gmail.com>
Cc: Anton Blanchard <an...@ozlabs.org>
Cc: Oliver O'Halloran <ooh...@gmail.com>
Cc: Nathan Lynch <nath...@linux.ibm.com>
Cc: Michael Neuling <mi...@neuling.org>
Cc: Gautham R Shenoy <e...@linux.vnet.ibm.com>
Cc: Ingo Molnar <mi...@kernel.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Valentin Schneider <valentin.schnei...@arm.com>
Cc: Jordan Niethe <jniet...@gmail.com>
Cc: Vaidyanathan Srinivasan <sva...@linux.ibm.com>
Signed-off-by: Srikar Dronamraju <sri...@linux.vnet.ibm.com>
---
Changelog v4 ->v5:
        Retain cache domain, no need for generalization
                 (Michael Ellerman, Peter Zijlstra,
                 Valentin Schneider, Gautham R. Shenoy)

Changelog v1 -> v2:
        Moved shared_cache topology fixup to fixup_topology (Gautham)

 arch/powerpc/kernel/smp.c | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0c960ce3be42..91cf5d05e7ec 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -851,7 +851,7 @@ static int powerpc_shared_cache_flags(void)
  */
 static const struct cpumask *shared_cache_mask(int cpu)
 {
-       return cpu_l2_cache_mask(cpu);
+       return per_cpu(cpu_l2_cache_map, cpu);
 }
 
 #ifdef CONFIG_SCHED_SMT
@@ -1305,7 +1305,6 @@ static void add_cpu_to_masks(int cpu)
 void start_secondary(void *unused)
 {
        unsigned int cpu = smp_processor_id();
-       struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
 
        mmgrab(&init_mm);
        current->active_mm = &init_mm;
@@ -1331,14 +1330,20 @@ void start_secondary(void *unused)
        /* Update topology CPU masks */
        add_cpu_to_masks(cpu);
 
-       if (has_big_cores)
-               sibling_mask = cpu_smallcore_mask;
        /*
         * Check for any shared caches. Note that this must be done on a
         * per-core basis because one core in the pair might be disabled.
         */
-       if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
-               shared_caches = true;
+       if (!shared_caches) {
+               struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
+               struct cpumask *mask = cpu_l2_cache_mask(cpu);
+
+               if (has_big_cores)
+                       sibling_mask = cpu_smallcore_mask;
+
+               if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
+                       shared_caches = true;
+       }
 
        set_numa_node(numa_cpu_lookup_table[cpu]);
        set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
-- 
2.18.2

Reply via email to