When ctx_groups_sched_in iterates the CPU and cgroup of events is known
to match the current task. Avoid double checking this with
event_filter_match by passing in an additional argument.

Signed-off-by: Ian Rogers <[email protected]>
---
 kernel/events/core.c | 27 ++++++++++++++++++---------
 1 file changed, 18 insertions(+), 9 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index c8b9c8611533..fb1027387e8e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2077,10 +2077,12 @@ static inline int pmu_filter_match(struct perf_event 
*event)
 }
 
 static inline int
-event_filter_match(struct perf_event *event)
+event_filter_match(struct perf_event *event, bool check_cgroup_and_cpu)
 {
-       return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
-              perf_cgroup_match(event) && pmu_filter_match(event);
+       return (!check_cgroup_and_cpu ||
+               ((event->cpu == -1 || event->cpu == smp_processor_id()) &&
+                perf_cgroup_match(event))) &&
+                       pmu_filter_match(event);
 }
 
 static void
@@ -2801,7 +2803,7 @@ static void __perf_event_enable(struct perf_event *event,
        if (!ctx->is_active)
                return;
 
-       if (!event_filter_match(event)) {
+       if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true)) {
                ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
                return;
        }
@@ -3578,7 +3580,10 @@ static int pinned_sched_in(struct perf_event_context 
*ctx,
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
-       if (!event_filter_match(event))
+       /* The caller already checked the CPU and cgroup before calling
+        * pinned_sched_in.
+        */
+       if (!event_filter_match(event, /*check_cpu_and_cgroup=*/false))
                return 0;
 
        if (group_can_go_on(event, cpuctx, 1)) {
@@ -3604,7 +3609,10 @@ static int flexible_sched_in(struct perf_event_context 
*ctx,
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
-       if (!event_filter_match(event))
+       /* The caller already checked the CPU and cgroup before calling
+        * felxible_sched_in.
+        */
+       if (!event_filter_match(event, /*check_cpu_and_cgroup=*/false))
                return 0;
 
        if (group_can_go_on(event, cpuctx, *can_add_hw)) {
@@ -3904,7 +3912,7 @@ static void perf_adjust_freq_unthr_context(struct 
perf_event_context *ctx,
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
 
-               if (!event_filter_match(event))
+               if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true))
                        continue;
 
                perf_pmu_disable(event->pmu);
@@ -6952,7 +6960,8 @@ perf_iterate_ctx(struct perf_event_context *ctx,
                if (!all) {
                        if (event->state < PERF_EVENT_STATE_INACTIVE)
                                continue;
-                       if (!event_filter_match(event))
+                       if (!event_filter_match(event,
+                                               /*check_cpu_and_cgroup=*/true))
                                continue;
                }
 
@@ -6976,7 +6985,7 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, 
void *data)
 
                if (event->state < PERF_EVENT_STATE_INACTIVE)
                        continue;
-               if (!event_filter_match(event))
+               if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true))
                        continue;
                output(event, data);
        }
-- 
2.22.0.709.g102302147b-goog

Reply via email to