Hi Song,

On Sat, Apr 17, 2021 at 7:13 AM Song Liu <s...@kernel.org> wrote:
>
> Currently, to use BPF to aggregate perf event counters, the user uses
> --bpf-counters option. Enable "use bpf by default" events with a config
> option, stat.bpf-counter-events. Events with name in the option will use
> BPF.
>
> This also enables mixed BPF event and regular event in the same sesssion.
> For example:
>
>    perf config stat.bpf-counter-events=instructions
>    perf stat -e instructions,cs
>
> The second command will use BPF for "instructions" but not "cs".
>
> Signed-off-by: Song Liu <s...@kernel.org>
> ---
> @@ -535,12 +549,13 @@ static int enable_counters(void)
>         struct evsel *evsel;
>         int err;
>
> -       if (target__has_bpf(&target)) {
> -               evlist__for_each_entry(evsel_list, evsel) {
> -                       err = bpf_counter__enable(evsel);
> -                       if (err)
> -                               return err;
> -               }
> +       evlist__for_each_entry(evsel_list, evsel) {
> +               if (!evsel__is_bpf(evsel))
> +                       continue;
> +
> +               err = bpf_counter__enable(evsel);
> +               if (err)
> +                       return err;

I just realized it doesn't have a disable counterpart.

>         }
>
>         if (stat_config.initial_delay < 0) {
> @@ -784,11 +799,9 @@ static int __run_perf_stat(int argc, const char **argv, 
> int run_idx)
>         if (affinity__setup(&affinity) < 0)
>                 return -1;
>
> -       if (target__has_bpf(&target)) {
> -               evlist__for_each_entry(evsel_list, counter) {
> -                       if (bpf_counter__load(counter, &target))
> -                               return -1;
> -               }
> +       evlist__for_each_entry(evsel_list, counter) {
> +               if (bpf_counter__load(counter, &target))
> +                       return -1;
>         }
>
>         evlist__for_each_cpu (evsel_list, i, cpu) {

[SNIP]
> diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
> index 2d2614eeaa20e..080ddcfefbcd2 100644
> --- a/tools/perf/util/evsel.c
> +++ b/tools/perf/util/evsel.c
> @@ -492,6 +492,28 @@ const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
>         "ref-cycles",
>  };
>
> +char *evsel__bpf_counter_events;
> +
> +bool evsel__match_bpf_counter_events(const char *name)
> +{
> +       int name_len;
> +       bool match;
> +       char *ptr;
> +
> +       if (!evsel__bpf_counter_events)
> +               return false;
> +
> +       ptr = strstr(evsel__bpf_counter_events, name);
> +       name_len = strlen(name);
> +
> +       /* check name matches a full token in evsel__bpf_counter_events */
> +       match = (ptr != NULL) &&
> +               ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
> +               ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));

I'm not sure we have an event name which is a substring of another.
Maybe it can retry if it fails to match.

Thanks,
Namhyung

> +
> +       return match;
> +}
> +
>  static const char *__evsel__hw_name(u64 config)
>  {
>         if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])

Reply via email to