On Thu, Apr 25, 2019 at 11:45:14AM +0200, Thomas Gleixner wrote:
> @@ -2788,29 +2798,32 @@ static void __ftrace_trace_stack(struct
>        */
>       preempt_disable_notrace();
>  
> -     use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
> +     stackidx = __this_cpu_inc_return(ftrace_stack_reserve);
> +
> +     /* This should never happen. If it does, yell once and skip */
> +     if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
> +             goto out;
> +
>       /*
> -      * We don't need any atomic variables, just a barrier.
> -      * If an interrupt comes in, we don't care, because it would
> -      * have exited and put the counter back to what we want.
> -      * We just need a barrier to keep gcc from moving things
> -      * around.
> +      * The above __this_cpu_inc_return() is 'atomic' cpu local. An
> +      * interrupt will either see the value pre increment or post
> +      * increment. If the interrupt happens pre increment it will have
> +      * restored the counter when it returns.  We just need a barrier to
> +      * keep gcc from moving things around.
>        */
>       barrier();
> -     if (use_stack == 1) {
> -             trace.entries           = this_cpu_ptr(ftrace_stack.calls);
> -             trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
> -
> -             if (regs)
> -                     save_stack_trace_regs(regs, &trace);
> -             else
> -                     save_stack_trace(&trace);
> -
> -             if (trace.nr_entries > size)
> -                     size = trace.nr_entries;
> -     } else
> -             /* From now on, use_stack is a boolean */
> -             use_stack = 0;
> +
> +     fstack = this_cpu_ptr(ftrace_stacks.stacks) + (stackidx - 1);

nit: it would be slightly less surprising if stackidx were 0-based:

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d3f6ec7eb729..4fc93004feab 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2798,10 +2798,10 @@ static void __ftrace_trace_stack(struct ring_buffer 
*buffer,
         */
        preempt_disable_notrace();
 
-       stackidx = __this_cpu_inc_return(ftrace_stack_reserve);
+       stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
 
        /* This should never happen. If it does, yell once and skip */
-       if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
+       if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
                goto out;
 
        /*
@@ -2813,7 +2813,7 @@ static void __ftrace_trace_stack(struct ring_buffer 
*buffer,
         */
        barrier();
 
-       fstack = this_cpu_ptr(ftrace_stacks.stacks) + (stackidx - 1);
+       fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
        trace.entries           = fstack->calls;
        trace.max_entries       = FTRACE_KSTACK_ENTRIES;
 
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

Reply via email to