On Mon, 24 Jul 2017 14:44:39 -0700
"Paul E. McKenney" <paul...@linux.vnet.ibm.com> wrote:

> The _rcu_barrier_trace() function is a wrapper for trace_rcu_barrier(),
> which needs TPS() protection for strings passed through the second
> argument.  However, it has escaped prior TPS()-ification efforts because
> it _rcu_barrier_trace() does not start with "trace_".  This commit
> therefore adds the needed TPS() protection
> 
> Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
> Cc: Steven Rostedt <rost...@goodmis.org>

Acked-by: Steven Rostedt (VMware) <rost...@goodmis.org>

-- Steve

> ---
>  kernel/rcu/tree.c | 27 +++++++++++++++------------
>  1 file changed, 15 insertions(+), 12 deletions(-)
> 
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9a90d9b1dc04..7e018696fd82 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -3553,10 +3553,11 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
>       struct rcu_state *rsp = rdp->rsp;
>  
>       if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
> -             _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
> +             _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
> +                                rsp->barrier_sequence);
>               complete(&rsp->barrier_completion);
>       } else {
> -             _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
> +             _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
>       }
>  }
>  
> @@ -3568,14 +3569,15 @@ static void rcu_barrier_func(void *type)
>       struct rcu_state *rsp = type;
>       struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
>  
> -     _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
> +     _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
>       rdp->barrier_head.func = rcu_barrier_callback;
>       debug_rcu_head_queue(&rdp->barrier_head);
>       if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
>               atomic_inc(&rsp->barrier_cpu_count);
>       } else {
>               debug_rcu_head_unqueue(&rdp->barrier_head);
> -             _rcu_barrier_trace(rsp, "IRQNQ", -1, rsp->barrier_sequence);
> +             _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
> +                                rsp->barrier_sequence);
>       }
>  }
>  
> @@ -3589,14 +3591,15 @@ static void _rcu_barrier(struct rcu_state *rsp)
>       struct rcu_data *rdp;
>       unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
>  
> -     _rcu_barrier_trace(rsp, "Begin", -1, s);
> +     _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
>  
>       /* Take mutex to serialize concurrent rcu_barrier() requests. */
>       mutex_lock(&rsp->barrier_mutex);
>  
>       /* Did someone else do our work for us? */
>       if (rcu_seq_done(&rsp->barrier_sequence, s)) {
> -             _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
> +             _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
> +                                rsp->barrier_sequence);
>               smp_mb(); /* caller's subsequent code after above check. */
>               mutex_unlock(&rsp->barrier_mutex);
>               return;
> @@ -3604,7 +3607,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
>  
>       /* Mark the start of the barrier operation. */
>       rcu_seq_start(&rsp->barrier_sequence);
> -     _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
> +     _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
>  
>       /*
>        * Initialize the count to one rather than to zero in order to
> @@ -3627,10 +3630,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
>               rdp = per_cpu_ptr(rsp->rda, cpu);
>               if (rcu_is_nocb_cpu(cpu)) {
>                       if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
> -                             _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
> +                             _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
>                                                  rsp->barrier_sequence);
>                       } else {
> -                             _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
> +                             _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
>                                                  rsp->barrier_sequence);
>                               smp_mb__before_atomic();
>                               atomic_inc(&rsp->barrier_cpu_count);
> @@ -3638,11 +3641,11 @@ static void _rcu_barrier(struct rcu_state *rsp)
>                                          rcu_barrier_callback, rsp, cpu, 0);
>                       }
>               } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
> -                     _rcu_barrier_trace(rsp, "OnlineQ", cpu,
> +                     _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
>                                          rsp->barrier_sequence);
>                       smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
>               } else {
> -                     _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
> +                     _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
>                                          rsp->barrier_sequence);
>               }
>       }
> @@ -3659,7 +3662,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
>       wait_for_completion(&rsp->barrier_completion);
>  
>       /* Mark the end of the barrier operation. */
> -     _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
> +     _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
>       rcu_seq_end(&rsp->barrier_sequence);
>  
>       /* Other rcu_barrier() invocations can now safely proceed. */

Reply via email to