From: Arnaldo Carvalho de Melo <[email protected]>

So that we don't need to know about the evlist all the time and can cope
with cases where evsel->cpus were set because it was for an event on a
PMU with a cpumask.

Reported-by: Stephane Eranian <[email protected]>
Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
Signed-off-by: Kan Liang <[email protected]>
---
 tools/perf/builtin-stat.c | 36 ++++++++++-----------------
 tools/perf/util/evlist.c  | 63 +++++++++++++++++++++++++++++++++++++++++------
 2 files changed, 68 insertions(+), 31 deletions(-)

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f7b8218..2c1456e 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -163,16 +163,6 @@ static inline void diff_timespec(struct timespec *r, 
struct timespec *a,
        }
 }
 
-static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
-{
-       return (evsel->cpus && !target.cpu_list) ? evsel->cpus : 
evsel_list->cpus;
-}
-
-static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
-{
-       return perf_evsel__cpus(evsel)->nr;
-}
-
 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 {
        int i;
@@ -202,7 +192,7 @@ static int perf_evsel__alloc_prev_raw_counts(struct 
perf_evsel *evsel)
        size_t sz;
 
        sz = sizeof(*evsel->counts) +
-            (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values));
+            (cpu_map__nr(evsel->cpus) * sizeof(struct perf_counts_values));
 
        addr = zalloc(sz);
        if (!addr)
@@ -235,7 +225,7 @@ static int perf_evlist__alloc_stats(struct perf_evlist 
*evlist, bool alloc_raw)
 
        evlist__for_each(evlist, evsel) {
                if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
-                   perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) 
< 0 ||
+                   perf_evsel__alloc_counts(evsel, cpu_map__nr(evsel->cpus)) < 
0 ||
                    (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0))
                        goto out_free;
        }
@@ -269,7 +259,7 @@ static void perf_stat__reset_stats(struct perf_evlist 
*evlist)
 
        evlist__for_each(evlist, evsel) {
                perf_evsel__reset_stat_priv(evsel);
-               perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel));
+               perf_evsel__reset_counts(evsel, cpu_map__nr(evsel->cpus));
        }
 
        memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
@@ -302,7 +292,7 @@ static int create_perf_stat_counter(struct perf_evsel 
*evsel)
        attr->inherit = !no_inherit;
 
        if (target__has_cpu(&target))
-               return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
+               return perf_evsel__open_per_cpu(evsel, evsel->cpus);
 
        if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
                attr->disabled = 1;
@@ -398,7 +388,7 @@ static void zero_per_pkg(struct perf_evsel *counter)
 static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
 {
        unsigned long *mask = counter->per_pkg_mask;
-       struct cpu_map *cpus = perf_evsel__cpus(counter);
+       struct cpu_map *cpus = counter->cpus;
        int s;
 
        *skip = false;
@@ -509,7 +499,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
 static int read_counter(struct perf_evsel *counter)
 {
        int nthreads = thread_map__nr(evsel_list->threads);
-       int ncpus = perf_evsel__nr_cpus(counter);
+       int ncpus = cpu_map__nr(counter->cpus);
        int cpu, thread;
 
        if (!counter->supported)
@@ -733,13 +723,13 @@ static int __run_perf_stat(int argc, const char **argv)
        if (aggr_mode == AGGR_GLOBAL) {
                evlist__for_each(evsel_list, counter) {
                        read_counter_aggr(counter);
-                       perf_evsel__close_fd(counter, 
perf_evsel__nr_cpus(counter),
+                       perf_evsel__close_fd(counter, 
cpu_map__nr(counter->cpus),
                                             
thread_map__nr(evsel_list->threads));
                }
        } else {
                evlist__for_each(evsel_list, counter) {
                        read_counter(counter);
-                       perf_evsel__close_fd(counter, 
perf_evsel__nr_cpus(counter), 1);
+                       perf_evsel__close_fd(counter, 
cpu_map__nr(counter->cpus), 1);
                }
        }
 
@@ -831,7 +821,7 @@ static void aggr_printout(struct perf_evsel *evsel, int id, 
int nr)
        case AGGR_NONE:
                fprintf(output, "CPU%*d%s",
                        csv_output ? 0 : -4,
-                       perf_evsel__cpus(evsel)->map[id], csv_sep);
+                       evsel->cpus->map[id], csv_sep);
                break;
        case AGGR_GLOBAL:
        default:
@@ -1239,8 +1229,8 @@ static void print_aggr(char *prefix)
                evlist__for_each(evsel_list, counter) {
                        val = ena = run = 0;
                        nr = 0;
-                       for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); 
cpu++) {
-                               cpu2 = perf_evsel__cpus(counter)->map[cpu];
+                       for (cpu = 0; cpu < cpu_map__nr(counter->cpus); cpu++) {
+                               cpu2 = counter->cpus->map[cpu];
                                s2 = aggr_get_id(evsel_list->cpus, cpu2);
                                if (s2 != id)
                                        continue;
@@ -1353,7 +1343,7 @@ static void print_counter(struct perf_evsel *counter, 
char *prefix)
        double uval;
        int cpu;
 
-       for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+       for (cpu = 0; cpu < cpu_map__nr(counter->cpus); cpu++) {
                val = counter->counts->cpu[cpu].val;
                ena = counter->counts->cpu[cpu].ena;
                run = counter->counts->cpu[cpu].run;
@@ -1364,7 +1354,7 @@ static void print_counter(struct perf_evsel *counter, 
char *prefix)
                if (run == 0 || ena == 0) {
                        fprintf(output, "CPU%*d%s%*s%s",
                                csv_output ? 0 : -4,
-                               perf_evsel__cpus(counter)->map[cpu], csv_sep,
+                               counter->cpus->map[cpu], csv_sep,
                                csv_output ? 0 : 18,
                                counter->supported ? CNTR_NOT_COUNTED : 
CNTR_NOT_SUPPORTED,
                                csv_sep);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 080be93..a065150 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -110,13 +110,46 @@ void perf_evlist__exit(struct perf_evlist *evlist)
        fdarray__exit(&evlist->pollfd);
 }
 
+static void perf_evlist__reset_cpus(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel) {
+               if (evsel->cpus == evlist->cpus)
+                       evsel->cpus = NULL;
+       }
+
+       evlist->cpus = NULL;
+}
+
+static void perf_evlist__set_cpus(struct perf_evlist *evlist,
+                                 struct cpu_map *cpus)
+{
+       struct perf_evsel *evsel;
+
+       if (evlist->cpus != NULL)
+               perf_evlist__reset_cpus(evlist);
+       /*
+        * If, when parsing events, the evsel->cpus wasn't constrained to a
+        * cpulist, say, because it is on a PMU that has a cpumask, then set it
+        * to the evlist cpu_map, so that we can access evsel->cpus and get the
+        * cpu_map this evsel works with.
+        */
+       evlist__for_each(evlist, evsel) {
+       if (evsel->cpus == NULL)
+               evsel->cpus = cpus;
+       }
+
+       evlist->cpus = cpus;
+}
+
 void perf_evlist__delete(struct perf_evlist *evlist)
 {
        perf_evlist__munmap(evlist);
        perf_evlist__close(evlist);
        cpu_map__delete(evlist->cpus);
+       perf_evlist__reset_cpus(evlist);
        thread_map__delete(evlist->threads);
-       evlist->cpus = NULL;
        evlist->threads = NULL;
        perf_evlist__purge(evlist);
        perf_evlist__exit(evlist);
@@ -129,6 +162,15 @@ void perf_evlist__add(struct perf_evlist *evlist, struct 
perf_evsel *entry)
        entry->idx = evlist->nr_entries;
        entry->tracking = !entry->idx;
 
+       /*
+        * If, when parsing events, the evsel->cpus wasn't constrained to a
+        * cpulist, say, because it is on a PMU that has a cpumask, then set it
+        * to the evlist cpu_map, so that we can access evsel->cpus and get the
+        * cpu_map this evsel works with.
+        */
+       if (entry->cpus == NULL)
+               entry->cpus = evlist->cpus;
+
        if (!evlist->nr_entries++)
                perf_evlist__set_id_pos(evlist);
 }
@@ -1028,6 +1070,8 @@ int perf_evlist__mmap(struct perf_evlist *evlist, 
unsigned int pages,
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
+       struct cpu_map *cpus;
+
        evlist->threads = thread_map__new_str(target->pid, target->tid,
                                              target->uid);
 
@@ -1035,13 +1079,15 @@ int perf_evlist__create_maps(struct perf_evlist 
*evlist, struct target *target)
                return -1;
 
        if (target__uses_dummy_map(target))
-               evlist->cpus = cpu_map__dummy_new();
+               cpus = cpu_map__dummy_new();
        else
-               evlist->cpus = cpu_map__new(target->cpu_list);
+               cpus = cpu_map__new(target->cpu_list);
 
-       if (evlist->cpus == NULL)
+       if (cpus == NULL)
                goto out_delete_threads;
 
+       perf_evlist__set_cpus(evlist, cpus);
+
        return 0;
 
 out_delete_threads:
@@ -1255,6 +1301,7 @@ void perf_evlist__close(struct perf_evlist *evlist)
 
 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
 {
+       struct cpu_map *cpus;
        int err = -ENOMEM;
 
        /*
@@ -1266,8 +1313,8 @@ static int perf_evlist__create_syswide_maps(struct 
perf_evlist *evlist)
         * error, and we may not want to do that fallback to a
         * default cpu identity map :-\
         */
-       evlist->cpus = cpu_map__new(NULL);
-       if (evlist->cpus == NULL)
+       cpus = cpu_map__new(NULL);
+       if (cpus == NULL)
                goto out;
 
        evlist->threads = thread_map__new_dummy();
@@ -1275,11 +1322,11 @@ static int perf_evlist__create_syswide_maps(struct 
perf_evlist *evlist)
                goto out_free_cpus;
 
        err = 0;
+       perf_evlist__set_cpus(evlist, cpus);
 out:
        return err;
 out_free_cpus:
-       cpu_map__delete(evlist->cpus);
-       evlist->cpus = NULL;
+       cpu_map__delete(cpus);
        goto out;
 }
 
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to