On 03.08.2017 18:00, Peter Zijlstra wrote:
> On Wed, Aug 02, 2017 at 11:15:39AM +0300, Alexey Budankov wrote:
>> @@ -772,6 +780,10 @@ struct perf_event_context {
>>       */
>>      u64                             time;
>>      u64                             timestamp;
>> +    /*
>> +     * Context cache for filtered out events;
>> +     */
>> +    struct perf_event_tstamp        tstamp_data;
>>  
>>      /*
>>       * These fields let us detect when two contexts have both
> 
> 
>> @@ -1379,6 +1379,9 @@ static void update_context_time(struct 
>> perf_event_context *ctx)
>>  
>>      ctx->time += now - ctx->timestamp;
>>      ctx->timestamp = now;
>> +
>> +    ctx->tstamp_data.running += ctx->time - ctx->tstamp_data.stopped;
>> +    ctx->tstamp_data.stopped = ctx->time;
>>  }
>>  
>>  static u64 perf_event_time(struct perf_event *event)
> 
> It appears to me we have some redundancy here.
> 
> 
>> @@ -1968,9 +1971,13 @@ event_sched_out(struct perf_event *event,
>>       */
>>      if (event->state == PERF_EVENT_STATE_INACTIVE &&
>>          !event_filter_match(event)) {
>> +            delta = tstamp - event->tstamp->stopped;
>> +            event->tstamp->running += delta;
>> +            event->tstamp->stopped = tstamp;
>> +            if (event->tstamp != &event->tstamp_data) {
>> +                    event->tstamp_data = *event->tstamp;
> 
> This,
> 
>> +                    event->tstamp = &event->tstamp_data;
>> +            }
>>      }
>>  
>>      if (event->state != PERF_EVENT_STATE_ACTIVE)
> 
> 
>> @@ -3239,8 +3246,11 @@ ctx_pinned_sched_in(struct perf_event *event, void 
>> *data)
>>  
>>      if (event->state <= PERF_EVENT_STATE_OFF)
>>              return 0;
>> -    if (!event_filter_match(event))
>> +    if (!event_filter_match(event)) {
>> +            if (event->tstamp != &params->ctx->tstamp_data)
>> +                    event->tstamp = &params->ctx->tstamp_data;
> 
> this and
> 
>>              return 0;
>> +    }
>>  
>>      /* may need to reset tstamp_enabled */
>>      if (is_cgroup_event(event))
>> @@ -3273,8 +3283,11 @@ ctx_flexible_sched_in(struct perf_event *event, void 
>> *data)
>>       * Listen to the 'cpu' scheduling filter constraint
>>       * of events:
>>       */
>> -    if (!event_filter_match(event))
>> +    if (!event_filter_match(event)) {
>> +            if (event->tstamp != &params->ctx->tstamp_data)
>> +                    event->tstamp = &params->ctx->tstamp_data;
> 
> this..
> 
>>              return 0;
>> +    }
>>  
>>      /* may need to reset tstamp_enabled */
>>      if (is_cgroup_event(event))
> 
> 
> Are the magic spots, right? And I'm not convinced its right.
> 
> Suppose I have two events in my context, and I created them 1 minute
> apart. Then their respective tstamp_enabled are 1 minute apart as well.
> But the above doesn't seem to preserve that difference.
> 
> A similar argument can be made for running I think. That is a per event
> value and cannot be passed along to the ctx and back.

Aww, I see your point and it challenges my initial assumptions. 
Let me think thru the case more. There must be some solution. Thanks!

> 
> 
>

Reply via email to