Re: [PATCH v9 36/36] fgraph: Skip recording calltime/rettime if it is not nneeded

2024-04-29 Thread Google
On Thu, 25 Apr 2024 13:15:08 -0700
Andrii Nakryiko  wrote:

> On Mon, Apr 15, 2024 at 6:25 AM Masami Hiramatsu (Google)
>  wrote:
> >
> > From: Masami Hiramatsu (Google) 
> >
> > Skip recording calltime and rettime if the fgraph_ops does not need it.
> > This is a kind of performance optimization for fprobe. Since the fprobe
> > user does not use these entries, recording timestamp in fgraph is just
> > a overhead (e.g. eBPF, ftrace). So introduce the skip_timestamp flag,
> > and all fgraph_ops sets this flag, skip recording calltime and rettime.
> >
> > Suggested-by: Jiri Olsa 
> > Signed-off-by: Masami Hiramatsu (Google) 
> > ---
> >  Changes in v9:
> >   - Newly added.
> > ---
> >  include/linux/ftrace.h |2 ++
> >  kernel/trace/fgraph.c  |   46 
> > +++---
> >  kernel/trace/fprobe.c  |1 +
> >  3 files changed, 42 insertions(+), 7 deletions(-)
> >
> > diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> > index d845a80a3d56..06fc7cbef897 100644
> > --- a/include/linux/ftrace.h
> > +++ b/include/linux/ftrace.h
> > @@ -1156,6 +1156,8 @@ struct fgraph_ops {
> > struct ftrace_ops   ops; /* for the hash lists */
> > void*private;
> > int idx;
> > +   /* If skip_timestamp is true, this does not record timestamps. */
> > +   boolskip_timestamp;
> >  };
> >
> >  void *fgraph_reserve_data(int idx, int size_bytes);
> > diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
> > index 7556fbbae323..a5722537bb79 100644
> > --- a/kernel/trace/fgraph.c
> > +++ b/kernel/trace/fgraph.c
> > @@ -131,6 +131,7 @@ DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
> >  int ftrace_graph_active;
> >
> >  static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
> > +static bool fgraph_skip_timestamp;
> >
> >  /* LRU index table for fgraph_array */
> >  static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
> > @@ -475,7 +476,7 @@ void ftrace_graph_stop(void)
> >  static int
> >  ftrace_push_return_trace(unsigned long ret, unsigned long func,
> >  unsigned long frame_pointer, unsigned long *retp,
> > -int fgraph_idx)
> > +int fgraph_idx, bool skip_ts)
> >  {
> > struct ftrace_ret_stack *ret_stack;
> > unsigned long long calltime;
> > @@ -498,8 +499,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned 
> > long func,
> > ret_stack = get_ret_stack(current, current->curr_ret_stack, );
> > if (ret_stack && ret_stack->func == func &&
> > get_fgraph_type(current, index + FGRAPH_RET_INDEX) == 
> > FGRAPH_TYPE_BITMAP &&
> > -   !is_fgraph_index_set(current, index + FGRAPH_RET_INDEX, 
> > fgraph_idx))
> > +   !is_fgraph_index_set(current, index + FGRAPH_RET_INDEX, 
> > fgraph_idx)) {
> > +   /* If previous one skips calltime, update it. */
> > +   if (!skip_ts && !ret_stack->calltime)
> > +   ret_stack->calltime = trace_clock_local();
> > return index + FGRAPH_RET_INDEX;
> > +   }
> >
> > val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | 
> > FGRAPH_RET_INDEX;
> >
> > @@ -517,7 +522,10 @@ ftrace_push_return_trace(unsigned long ret, unsigned 
> > long func,
> > return -EBUSY;
> > }
> >
> > -   calltime = trace_clock_local();
> > +   if (skip_ts)
> 
> would it be ok to add likely() here to keep the least-overhead code path 
> linear?

It's not "likely", but hmm, yes as you said. We can keep the least overhead.
OK, let me add likely. 

Thank you,

> 
> > +   calltime = 0LL;
> > +   else
> > +   calltime = trace_clock_local();
> >
> > index = READ_ONCE(current->curr_ret_stack);
> > ret_stack = RET_STACK(current, index);
> > @@ -601,7 +609,8 @@ int function_graph_enter_regs(unsigned long ret, 
> > unsigned long func,
> > trace.func = func;
> > trace.depth = ++current->curr_ret_depth;
> >
> > -   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
> > +   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0,
> > +fgraph_skip_timestamp);
> > if (index < 0)
> > goto out;
> >
> > @@ -654,7 +663,8 @@ int function_graph_enter_ops(unsigned long ret, 
> > unsigned long func,
> > return -ENODEV;
> >
> > /* Use start for the distance to ret_stack (skipping over reserve) 
> > */
> > -   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 
> > gops->idx);
> > +   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 
> > gops->idx,
> > +gops->skip_timestamp);
> > if (index < 0)
> > return index;
> > type = get_fgraph_type(current, index);
> > @@ -732,6 

Re: [PATCH v9 36/36] fgraph: Skip recording calltime/rettime if it is not nneeded

2024-04-25 Thread Andrii Nakryiko
On Mon, Apr 15, 2024 at 6:25 AM Masami Hiramatsu (Google)
 wrote:
>
> From: Masami Hiramatsu (Google) 
>
> Skip recording calltime and rettime if the fgraph_ops does not need it.
> This is a kind of performance optimization for fprobe. Since the fprobe
> user does not use these entries, recording timestamp in fgraph is just
> a overhead (e.g. eBPF, ftrace). So introduce the skip_timestamp flag,
> and all fgraph_ops sets this flag, skip recording calltime and rettime.
>
> Suggested-by: Jiri Olsa 
> Signed-off-by: Masami Hiramatsu (Google) 
> ---
>  Changes in v9:
>   - Newly added.
> ---
>  include/linux/ftrace.h |2 ++
>  kernel/trace/fgraph.c  |   46 +++---
>  kernel/trace/fprobe.c  |1 +
>  3 files changed, 42 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index d845a80a3d56..06fc7cbef897 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -1156,6 +1156,8 @@ struct fgraph_ops {
> struct ftrace_ops   ops; /* for the hash lists */
> void*private;
> int idx;
> +   /* If skip_timestamp is true, this does not record timestamps. */
> +   boolskip_timestamp;
>  };
>
>  void *fgraph_reserve_data(int idx, int size_bytes);
> diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
> index 7556fbbae323..a5722537bb79 100644
> --- a/kernel/trace/fgraph.c
> +++ b/kernel/trace/fgraph.c
> @@ -131,6 +131,7 @@ DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
>  int ftrace_graph_active;
>
>  static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
> +static bool fgraph_skip_timestamp;
>
>  /* LRU index table for fgraph_array */
>  static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
> @@ -475,7 +476,7 @@ void ftrace_graph_stop(void)
>  static int
>  ftrace_push_return_trace(unsigned long ret, unsigned long func,
>  unsigned long frame_pointer, unsigned long *retp,
> -int fgraph_idx)
> +int fgraph_idx, bool skip_ts)
>  {
> struct ftrace_ret_stack *ret_stack;
> unsigned long long calltime;
> @@ -498,8 +499,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned 
> long func,
> ret_stack = get_ret_stack(current, current->curr_ret_stack, );
> if (ret_stack && ret_stack->func == func &&
> get_fgraph_type(current, index + FGRAPH_RET_INDEX) == 
> FGRAPH_TYPE_BITMAP &&
> -   !is_fgraph_index_set(current, index + FGRAPH_RET_INDEX, 
> fgraph_idx))
> +   !is_fgraph_index_set(current, index + FGRAPH_RET_INDEX, 
> fgraph_idx)) {
> +   /* If previous one skips calltime, update it. */
> +   if (!skip_ts && !ret_stack->calltime)
> +   ret_stack->calltime = trace_clock_local();
> return index + FGRAPH_RET_INDEX;
> +   }
>
> val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_RET_INDEX;
>
> @@ -517,7 +522,10 @@ ftrace_push_return_trace(unsigned long ret, unsigned 
> long func,
> return -EBUSY;
> }
>
> -   calltime = trace_clock_local();
> +   if (skip_ts)

would it be ok to add likely() here to keep the least-overhead code path linear?

> +   calltime = 0LL;
> +   else
> +   calltime = trace_clock_local();
>
> index = READ_ONCE(current->curr_ret_stack);
> ret_stack = RET_STACK(current, index);
> @@ -601,7 +609,8 @@ int function_graph_enter_regs(unsigned long ret, unsigned 
> long func,
> trace.func = func;
> trace.depth = ++current->curr_ret_depth;
>
> -   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
> +   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0,
> +fgraph_skip_timestamp);
> if (index < 0)
> goto out;
>
> @@ -654,7 +663,8 @@ int function_graph_enter_ops(unsigned long ret, unsigned 
> long func,
> return -ENODEV;
>
> /* Use start for the distance to ret_stack (skipping over reserve) */
> -   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 
> gops->idx);
> +   index = ftrace_push_return_trace(ret, func, frame_pointer, retp, 
> gops->idx,
> +gops->skip_timestamp);
> if (index < 0)
> return index;
> type = get_fgraph_type(current, index);
> @@ -732,6 +742,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, 
> unsigned long *ret,
> *ret = ret_stack->ret;
> trace->func = ret_stack->func;
> trace->calltime = ret_stack->calltime;
> +   trace->rettime = 0;
> trace->overrun = atomic_read(>trace_overrun);
> trace->depth = current->curr_ret_depth;
> /*
> @@ -792,7 +803,6 @@ __ftrace_return_to_handler(struct