On 1 February 2018 at 17:57, Peter Zijlstra <[email protected]> wrote:
> On Wed, Jan 24, 2018 at 09:25:36AM +0100, Vincent Guittot wrote:
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 898785d..ed90303 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -7356,6 +7356,17 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq 
>> *cfs_rq)
>>       return true;
>>  }
>>
>> +static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
>> +{
>> +     if (cfs_rq->avg.load_avg)
>> +             return true;
>> +
>> +     if (cfs_rq->avg.util_avg)
>> +             return true;
>> +
>> +     return false;
>> +}
>> +
>>  #ifdef CONFIG_FAIR_GROUP_SCHED
>>
>>  static void update_blocked_averages(int cpu)
>> @@ -7393,7 +7404,9 @@ static void update_blocked_averages(int cpu)
>>                */
>>               if (cfs_rq_is_decayed(cfs_rq))
>>                       list_del_leaf_cfs_rq(cfs_rq);
>> -             else
>> +
>> +             /* Don't need periodic decay once load/util_avg are null */
>> +             if (cfs_rq_has_blocked(cfs_rq))
>>                       done = false;
>>       }
>>
>> @@ -7463,7 +7476,7 @@ static inline void update_blocked_averages(int cpu)
>>       update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
>>  #ifdef CONFIG_NO_HZ_COMMON
>>       rq->last_blocked_load_update_tick = jiffies;
>> -     if (cfs_rq_is_decayed(cfs_rq))
>> +     if (cfs_rq_has_blocked(cfs_rq))
>>               rq->has_blocked_load = 0;
>>  #endif
>>       rq_unlock_irqrestore(rq, &rf);
>
> OK makes sense; would've been even better as a separate patch :-)

Yes  i will make a separate patch for that

Reply via email to