On Tue, 13 Aug 2024 13:34:09 -0700
Andrii Nakryiko <and...@kernel.org> wrote:

> trace_uprobe->nhit counter is not incremented atomically, so its value
> is questionable in when uprobe is hit on multiple CPUs simultaneously.
> 
> Also, doing this shared counter increment across many CPUs causes heavy
> cache line bouncing, limiting uprobe/uretprobe performance scaling with
> number of CPUs.
> 
> Solve both problems by making this a per-CPU counter.
> 

Looks good to me. Let me pick it.

> Reviewed-by: Oleg Nesterov <o...@redhat.com>
> Signed-off-by: Andrii Nakryiko <and...@kernel.org>
> ---
>  kernel/trace/trace_uprobe.c | 24 +++++++++++++++++++++---
>  1 file changed, 21 insertions(+), 3 deletions(-)
> 
> diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
> index c98e3b3386ba..c3df411a2684 100644
> --- a/kernel/trace/trace_uprobe.c
> +++ b/kernel/trace/trace_uprobe.c
> @@ -17,6 +17,7 @@
>  #include <linux/string.h>
>  #include <linux/rculist.h>
>  #include <linux/filter.h>
> +#include <linux/percpu.h>
>  
>  #include "trace_dynevent.h"
>  #include "trace_probe.h"
> @@ -62,7 +63,7 @@ struct trace_uprobe {
>       char                            *filename;
>       unsigned long                   offset;
>       unsigned long                   ref_ctr_offset;
> -     unsigned long                   nhit;
> +     unsigned long __percpu          *nhits;
>       struct trace_probe              tp;
>  };
>  
> @@ -337,6 +338,12 @@ alloc_trace_uprobe(const char *group, const char *event, 
> int nargs, bool is_ret)
>       if (!tu)
>               return ERR_PTR(-ENOMEM);
>  
> +     tu->nhits = alloc_percpu(unsigned long);
> +     if (!tu->nhits) {
> +             ret = -ENOMEM;
> +             goto error;
> +     }
> +
>       ret = trace_probe_init(&tu->tp, event, group, true, nargs);
>       if (ret < 0)
>               goto error;
> @@ -349,6 +356,7 @@ alloc_trace_uprobe(const char *group, const char *event, 
> int nargs, bool is_ret)
>       return tu;
>  
>  error:
> +     free_percpu(tu->nhits);
>       kfree(tu);
>  
>       return ERR_PTR(ret);
> @@ -362,6 +370,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
>       path_put(&tu->path);
>       trace_probe_cleanup(&tu->tp);
>       kfree(tu->filename);
> +     free_percpu(tu->nhits);
>       kfree(tu);
>  }
>  
> @@ -815,13 +824,21 @@ static int probes_profile_seq_show(struct seq_file *m, 
> void *v)
>  {
>       struct dyn_event *ev = v;
>       struct trace_uprobe *tu;
> +     unsigned long nhits;
> +     int cpu;
>  
>       if (!is_trace_uprobe(ev))
>               return 0;
>  
>       tu = to_trace_uprobe(ev);
> +
> +     nhits = 0;
> +     for_each_possible_cpu(cpu) {
> +             nhits += per_cpu(*tu->nhits, cpu);
> +     }
> +
>       seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
> -                     trace_probe_name(&tu->tp), tu->nhit);
> +                trace_probe_name(&tu->tp), nhits);
>       return 0;
>  }
>  
> @@ -1512,7 +1529,8 @@ static int uprobe_dispatcher(struct uprobe_consumer 
> *con, struct pt_regs *regs)
>       int ret = 0;
>  
>       tu = container_of(con, struct trace_uprobe, consumer);
> -     tu->nhit++;
> +
> +     this_cpu_inc(*tu->nhits);
>  
>       udd.tu = tu;
>       udd.bp_addr = instruction_pointer(regs);
> -- 
> 2.43.5
> 


-- 
Masami Hiramatsu (Google) <mhira...@kernel.org>

Reply via email to