From: Thomas Gleixner <t...@linutronix.de>

Now that the core code drops sparse_irq_lock after the idle thread
synchronized, it's pointless to wait for the AP to mark itself online.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Tested-by: Michael Kelley <mikel...@microsoft.com>
---
 arch/x86/kernel/smpboot.c |   26 ++------------------------
 1 file changed, 2 insertions(+), 24 deletions(-)

--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -275,7 +275,6 @@ static void notrace start_secondary(void
         * half valid vector space.
         */
        lock_vector_lock();
-       /* Sync point with do_wait_cpu_online() */
        set_cpu_online(smp_processor_id(), true);
        lapic_online();
        unlock_vector_lock();
@@ -1104,20 +1103,6 @@ static int wait_cpu_initialized(unsigned
        return 0;
 }
 
-/*
- * Bringup step three: Wait for the target AP to reach set_cpu_online() in
- * start_secondary().
- */
-static void wait_cpu_online(unsigned int cpu)
-{
-       /*
-        * Wait for the AP to mark itself online, so the core caller
-        * can drop sparse_irq_lock.
-        */
-       while (!cpu_online(cpu))
-               schedule();
-}
-
 static int native_kick_ap(unsigned int cpu, struct task_struct *tidle)
 {
        int apicid = apic->cpu_present_to_apicid(cpu);
@@ -1164,16 +1149,9 @@ int native_cpu_up(unsigned int cpu, stru
        int ret;
 
        ret = native_kick_ap(cpu, tidle);
-       if (ret)
-               goto out;
-
-       ret = wait_cpu_initialized(cpu);
-       if (ret)
-               goto out;
-
-       wait_cpu_online(cpu);
+       if (!ret)
+               ret = wait_cpu_initialized(cpu);
 
-out:
        /* Cleanup possible dangling ends... */
        if (x86_platform.legacy.warm_reset)
                smpboot_restore_warm_reset_vector();


Reply via email to