The _task_util_est() is mainly used to add/remove the task contribution
to/from the rq's estimated utilization at task enqueue/dequeue time.
In both cases we ensure the UTIL_AVG_UNCHANGED flag is set to keep
consistency between enqueue and dequeue time while still being
transparent to update_load_avg calls which will eventually reset the
flag.

Let's move the flag forcing within _task_util_est() itself so that we
can simplify calling code by hiding that estimated utilization
implementation detail into one of its internal functions.

This will affect also the "public" API task_util_est() but we know that
the flag will (eventually) impact just on the LSB of the estimated
utilization, thus it's certainly acceptable.

Signed-off-by: Patrick Bellasi <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: [email protected]
---
 kernel/sched/fair.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 473a9cc559e8..aeb37fe4dbb1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3604,7 +3604,7 @@ static inline unsigned long _task_util_est(struct 
task_struct *p)
 {
        struct util_est ue = READ_ONCE(p->se.avg.util_est);
 
-       return max(ue.ewma, ue.enqueued);
+       return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
 }
 
 static inline unsigned long task_util_est(struct task_struct *p)
@@ -3622,7 +3622,7 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
 
        /* Update root cfs_rq's estimated utilization */
        enqueued  = cfs_rq->avg.util_est.enqueued;
-       enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
+       enqueued += _task_util_est(p);
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
 }
 
@@ -3650,8 +3650,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct 
task_struct *p, bool task_sleep)
 
        /* Update root cfs_rq's estimated utilization */
        ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-       ue.enqueued -= min_t(unsigned int, ue.enqueued,
-                            (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+       ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
        /*
@@ -6292,7 +6291,7 @@ static unsigned long cpu_util_without(int cpu, struct 
task_struct *p)
                 */
                if (unlikely(task_on_rq_queued(p) || current == p)) {
                        estimated -= min_t(unsigned int, estimated,
-                                          (_task_util_est(p) | 
UTIL_AVG_UNCHANGED));
+                                          _task_util_est(p));
                }
                util = max(util, estimated);
        }
-- 
2.18.0

Reply via email to