On (04/20/17 15:11), Petr Mladek wrote:
[..]
>  void printk_nmi_enter(void)
>  {
> -     this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
> +     /*
> +      * The size of the extra per-CPU buffer is limited. Use it
> +      * only when really needed.
> +      */
> +     if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK ||
> +         raw_spin_is_locked(&logbuf_lock)) {

can we please have && here?


[..]
> diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
> index 4e8a30d1c22f..0bc0a3535a8a 100644
> --- a/lib/nmi_backtrace.c
> +++ b/lib/nmi_backtrace.c
> @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
>  
>  bool nmi_cpu_backtrace(struct pt_regs *regs)
>  {
> +     static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
>       int cpu = smp_processor_id();
>  
>       if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
> +             arch_spin_lock(&lock);
>               if (regs && cpu_in_idle(instruction_pointer(regs))) {
>                       pr_warn("NMI backtrace for cpu %d skipped: idling at pc 
> %#lx\n",
>                               cpu, instruction_pointer(regs));
> @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
>                       else
>                               dump_stack();
>               }
> +             arch_spin_unlock(&lock);
>               cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
>               return true;
>       }

can the nmi_backtrace part be a patch on its own?

        -ss

Reply via email to