On Mon, Jul 01, 2019 at 11:59:50PM -0700, Ian Rogers wrote: > +static int visit_groups_merge(struct perf_event_context *ctx, > + struct perf_cpu_context *cpuctx, > + struct perf_event_groups *groups, > + int (*func)(struct perf_event_context *, > + struct perf_cpu_context *, > + struct perf_event *, > + int *), > + int *data)
> -struct sched_in_data { > - struct perf_event_context *ctx; > - struct perf_cpu_context *cpuctx; > - int can_add_hw; > -}; > - > -static int pinned_sched_in(struct perf_event *event, void *data) > +static int pinned_sched_in(struct perf_event_context *ctx, > + struct perf_cpu_context *cpuctx, > + struct perf_event *event, > + int *unused) > { > - struct sched_in_data *sid = data; > - > if (event->state <= PERF_EVENT_STATE_OFF) > return 0; > > if (!event_filter_match(event)) > return 0; > > - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { > - if (!group_sched_in(event, sid->cpuctx, sid->ctx)) > - list_add_tail(&event->active_list, > &sid->ctx->pinned_active); > + if (group_can_go_on(event, cpuctx, 1)) { > + if (!group_sched_in(event, cpuctx, ctx)) > + list_add_tail(&event->active_list, &ctx->pinned_active); > } > > /* > @@ -3317,24 +3444,25 @@ static int pinned_sched_in(struct perf_event *event, > void *data) > return 0; > } > > -static int flexible_sched_in(struct perf_event *event, void *data) > +static int flexible_sched_in(struct perf_event_context *ctx, > + struct perf_cpu_context *cpuctx, > + struct perf_event *event, > + int *can_add_hw) > { > - struct sched_in_data *sid = data; > - > if (event->state <= PERF_EVENT_STATE_OFF) > return 0; > > if (!event_filter_match(event)) > return 0; > > - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { > - int ret = group_sched_in(event, sid->cpuctx, sid->ctx); > + if (group_can_go_on(event, cpuctx, *can_add_hw)) { > + int ret = group_sched_in(event, cpuctx, ctx); > if (ret) { > - sid->can_add_hw = 0; > - sid->ctx->rotate_necessary = 1; > + *can_add_hw = 0; > + ctx->rotate_necessary = 1; > return 0; > } > - list_add_tail(&event->active_list, &sid->ctx->flexible_active); > + list_add_tail(&event->active_list, &ctx->flexible_active); > } > > return 0; > @@ -3344,30 +3472,24 @@ static void > ctx_pinned_sched_in(struct perf_event_context *ctx, > struct perf_cpu_context *cpuctx) > { > - struct sched_in_data sid = { > - .ctx = ctx, > - .cpuctx = cpuctx, > - .can_add_hw = 1, > - }; > - > - visit_groups_merge(&ctx->pinned_groups, > - smp_processor_id(), > - pinned_sched_in, &sid); > + visit_groups_merge(ctx, > + cpuctx, > + &ctx->pinned_groups, > + pinned_sched_in, > + NULL); > } > > static void > ctx_flexible_sched_in(struct perf_event_context *ctx, > struct perf_cpu_context *cpuctx) > { > - struct sched_in_data sid = { > - .ctx = ctx, > - .cpuctx = cpuctx, > - .can_add_hw = 1, > - }; > + int can_add_hw = 1; > > - visit_groups_merge(&ctx->flexible_groups, > - smp_processor_id(), > - flexible_sched_in, &sid); > + visit_groups_merge(ctx, > + cpuctx, > + &ctx->flexible_groups, > + flexible_sched_in, > + &can_add_hw); > } It is not clear to me why you're doing away with that sched_in_data abstraction. AFAICT that has no material impact on this patch.