On Mon, Jul 22, 2019 at 01:33:37PM -0400, Rik van Riel wrote: > @@ -263,8 +258,8 @@ ___update_load_avg(struct sched_avg *sa, unsigned long > load, unsigned long runna > > int __update_load_avg_blocked_se(u64 now, struct sched_entity *se) > { > - if (___update_load_sum(now, &se->avg, 0, 0, 0)) { > - ___update_load_avg(&se->avg, se_weight(se), se_runnable(se)); > + if (___update_load_sum(now, &se->avg, 0, 0)) { > + ___update_load_avg(&se->avg, se_weight(se)); > return 1; > } >
The comment above that needs adjustment too, I think. --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -234,28 +234,13 @@ ___update_load_avg(struct sched_avg *sa, /* * sched_entity: * - * task: - * se_runnable() == se_weight() - * - * group: [ see update_cfs_group() ] - * se_weight() = tg->weight * grq->load_avg / tg->load_avg - * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg - * * load_sum := runnable_sum * load_avg = se_weight(se) * runnable_avg * - * runnable_load_sum := runnable_sum - * runnable_load_avg = se_runnable(se) * runnable_avg - * - * XXX collapse load_sum and runnable_load_sum - * * cfq_rq: * * load_sum = \Sum se_weight(se) * se->avg.load_sum * load_avg = \Sum se->avg.load_avg - * - * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum - * runnable_load_avg = \Sum se->avg.runable_load_avg */ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)