On Mon 27-07-15 10:58:50, Hidehiro Kawai wrote:
[...]
> diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
> index d05bd2e..5b32d81 100644
> --- a/arch/x86/kernel/nmi.c
> +++ b/arch/x86/kernel/nmi.c
> @@ -230,7 +230,8 @@ void unregister_nmi_handler(unsigned int type, const char 
> *name)
>       }
>  #endif
>  
> -     if (panic_on_unrecovered_nmi)
> +     if (panic_on_unrecovered_nmi &&
> +         atomic_cmpxchg(&panicking_cpu, -1, raw_smp_processor_id()) == -1)
>               panic("NMI: Not continuing");

Spreading the check to all NMI callers is quite ugly. Wouldn't it be
better to introduce nmi_panic() which wouldn't be __noreturn unlike the
regular panic. The check could be also relaxed a bit and nmi_panic would
return only if the ongoing panic is the current cpu when we really have
to return and allow the preempted panic to finish.

Something like
---
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5582410727cb..409091c48e6c 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -253,6 +253,7 @@ static inline void might_fault(void) { }
 extern struct atomic_notifier_head panic_notifier_list;
 extern long (*panic_blink)(int state);
 __printf(1, 2)
+void nmi_panic(const char *fmt, ...) __cold;
 void panic(const char *fmt, ...)
        __noreturn __cold;
 extern void oops_enter(void);
diff --git a/kernel/panic.c b/kernel/panic.c
index 04e91ff7560b..4c1ff7e19cdc 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -60,6 +60,8 @@ void __weak panic_smp_self_stop(void)
                cpu_relax();
 }
 
+static atomic_t panic_cpu = ATOMIC_INIT(-1);
+
 /**
  *     panic - halt the system
  *     @fmt: The text string to print
@@ -70,11 +72,11 @@ void __weak panic_smp_self_stop(void)
  */
 void panic(const char *fmt, ...)
 {
-       static DEFINE_SPINLOCK(panic_lock);
        static char buf[1024];
        va_list args;
        long i, i_next = 0;
        int state = 0;
+       int this_cpu, old_cpu;
 
        /*
         * Disable local interrupts. This will prevent panic_smp_self_stop
@@ -94,7 +96,9 @@ void panic(const char *fmt, ...)
         * stop themself or will wait until they are stopped by the 1st CPU
         * with smp_send_stop().
         */
-       if (!spin_trylock(&panic_lock))
+       this_cpu = raw_smp_processor_id();
+       old_cpu = atomic_cmpxchg(&panic_cpu, -1, this_cpu);
+       if (old_cpu != -1 && old_cpu != this_cpu)
                panic_smp_self_stop();
 
        console_verbose();
@@ -201,9 +205,20 @@ void panic(const char *fmt, ...)
                mdelay(PANIC_TIMER_STEP);
        }
 }
-
 EXPORT_SYMBOL(panic);
 
+void nmi_panic(const char *fmt, ...)
+{
+       /*
+        * We have to back off if the NMI has preempted an ongoing panic and
+        * allow it to finish
+        */
+       if (atomic_read(&panic_cpu) == raw_smp_processor_id())
+               return;
+
+       panic();
+}
+EXPORT_SYMBOL(nmi_panic);
 
 struct tnt {
        u8      bit;
-- 
Michal Hocko
SUSE Labs
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to