When ctx_groups_sched_in iterates the CPU and cgroup of events is known to match the current task. Avoid double checking this with event_filter_match by passing in an additional argument.
Signed-off-by: Ian Rogers <[email protected]> --- kernel/events/core.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 7608bd562dac..a66477ee196a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2079,10 +2079,12 @@ static inline int pmu_filter_match(struct perf_event *event) } static inline int -event_filter_match(struct perf_event *event) +event_filter_match(struct perf_event *event, bool check_cgroup_and_cpu) { - return (event->cpu == -1 || event->cpu == smp_processor_id()) && - perf_cgroup_match(event) && pmu_filter_match(event); + return (!check_cgroup_and_cpu || + ((event->cpu == -1 || event->cpu == smp_processor_id()) && + perf_cgroup_match(event))) && + pmu_filter_match(event); } static void @@ -2797,7 +2799,7 @@ static void __perf_event_enable(struct perf_event *event, if (!ctx->is_active) return; - if (!event_filter_match(event)) { + if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true)) { ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); return; } @@ -3573,7 +3575,10 @@ static int pinned_sched_in(struct perf_event_context *ctx, if (event->state <= PERF_EVENT_STATE_OFF) return 0; - if (!event_filter_match(event)) + /* The caller already checked the CPU and cgroup before calling + * pinned_sched_in. + */ + if (!event_filter_match(event, /*check_cpu_and_cgroup=*/false)) return 0; if (group_can_go_on(event, cpuctx, 1)) { @@ -3599,7 +3604,10 @@ static int flexible_sched_in(struct perf_event_context *ctx, if (event->state <= PERF_EVENT_STATE_OFF) return 0; - if (!event_filter_match(event)) + /* The caller already checked the CPU and cgroup before calling + * felxible_sched_in. + */ + if (!event_filter_match(event, /*check_cpu_and_cgroup=*/false)) return 0; if (group_can_go_on(event, cpuctx, *can_add_hw)) { @@ -3899,7 +3907,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, if (event->state != PERF_EVENT_STATE_ACTIVE) continue; - if (!event_filter_match(event)) + if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true)) continue; perf_pmu_disable(event->pmu); @@ -6929,7 +6937,8 @@ perf_iterate_ctx(struct perf_event_context *ctx, if (!all) { if (event->state < PERF_EVENT_STATE_INACTIVE) continue; - if (!event_filter_match(event)) + if (!event_filter_match(event, + /*check_cpu_and_cgroup=*/true)) continue; } @@ -6953,7 +6962,7 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) if (event->state < PERF_EVENT_STATE_INACTIVE) continue; - if (!event_filter_match(event)) + if (!event_filter_match(event, /*check_cpu_and_cgroup=*/true)) continue; output(event, data); } -- 2.22.0.410.gd8fdbe21b5-goog

