All implementations of apic->cpu_mask_to_apicid_and() mask out the offline
cpus. The callsite already has a mask available, which has the offline CPUs
removed. Use that and remove the extra bits.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
---
 arch/x86/kernel/apic/apic.c           |   27 +++++++++------------------
 arch/x86/kernel/apic/vector.c         |    5 ++++-
 arch/x86/kernel/apic/x2apic_cluster.c |   25 +++++++++----------------
 3 files changed, 22 insertions(+), 35 deletions(-)

--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2205,19 +2205,12 @@ int default_cpu_mask_to_apicid_and(const
                                   const struct cpumask *andmask,
                                   unsigned int *apicid)
 {
-       unsigned int cpu;
+       unsigned int cpu = cpumask_first_and(cpumask, andmask);
 
-       for_each_cpu_and(cpu, cpumask, andmask) {
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       }
-
-       if (likely(cpu < nr_cpu_ids)) {
-               *apicid = per_cpu(x86_cpu_to_apicid, cpu);
-               return 0;
-       }
-
-       return -EINVAL;
+       if (cpu >= nr_cpu_ids)
+               return -EINVAL;
+       *apicid = per_cpu(x86_cpu_to_apicid, cpu);
+       return 0;
 }
 
 int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -2226,14 +2219,12 @@ int flat_cpu_mask_to_apicid_and(const st
 {
        unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
                                 cpumask_bits(andmask)[0] &
-                                cpumask_bits(cpu_online_mask)[0] &
                                 APIC_ALL_CPUS;
 
-       if (likely(cpu_mask)) {
-               *apicid = (unsigned int)cpu_mask;
-               return 0;
-       }
-       return -EINVAL;
+       if (!cpu_mask)
+               return -EINVAL;
+       *apicid = (unsigned int)cpu_mask;
+       return 0;
 }
 
 /*
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -221,8 +221,11 @@ static int __assign_irq_vector(int irq,
         * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
         * as we already established, that mask & d->domain & cpu_online_mask
         * is not empty.
+        *
+        * vector_searchmask is a subset of d->domain and has the offline
+        * cpus masked out.
         */
-       BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+       BUG_ON(apic->cpu_mask_to_apicid_and(mask, vector_searchmask,
                                            &d->cfg.dest_apicid));
        return 0;
 }
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -108,31 +108,24 @@ x2apic_cpu_mask_to_apicid_and(const stru
                              const struct cpumask *andmask,
                              unsigned int *apicid)
 {
+       unsigned int cpu;
        u32 dest = 0;
        u16 cluster;
-       int i;
 
-       for_each_cpu_and(i, cpumask, andmask) {
-               if (!cpumask_test_cpu(i, cpu_online_mask))
-                       continue;
-               dest = per_cpu(x86_cpu_to_logical_apicid, i);
-               cluster = x2apic_cluster(i);
-               break;
-       }
-
-       if (!dest)
+       cpu = cpumask_first_and(cpumask, andmask);
+       if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       for_each_cpu_and(i, cpumask, andmask) {
-               if (!cpumask_test_cpu(i, cpu_online_mask))
-                       continue;
-               if (cluster != x2apic_cluster(i))
+       dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
+       cluster = x2apic_cluster(cpu);
+
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cluster != x2apic_cluster(cpu))
                        continue;
-               dest |= per_cpu(x86_cpu_to_logical_apicid, i);
+               dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
        }
 
        *apicid = dest;
-
        return 0;
 }
 


Reply via email to