When using PELT (per-entity load tracking) utilization to place tasks at
wake-up using the decayed utilization (due to sleep) leads to
under-estimation of true utilization of the task. This could mean
putting the task on a cpu with less available capacity than is actually
needed. This issue can be mitigated by using 'peak' utilization instead
of the decayed utilization for placement decisions, e.g. at task
wake-up.

The 'peak' utilization metric, util_peak, tracks util_avg when the task
is running and retains its previous value while the task is
blocked/waiting on the rq. It is instantly updated to track util_avg
again as soon as the task running again.

cc: Ingo Molnar <mi...@redhat.com>
cc: Peter Zijlstra <pet...@infradead.org>

Signed-off-by: Morten Rasmussen <morten.rasmus...@arm.com>
---
 include/linux/sched.h |  2 +-
 kernel/sched/fair.c   | 23 +++++++++++++++++------
 2 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ad51978ff15e..988d7f48604e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1294,7 +1294,7 @@ struct load_weight {
 struct sched_avg {
        u64 last_update_time, load_sum;
        u32 util_sum, period_contrib;
-       unsigned long load_avg, util_avg;
+       unsigned long load_avg, util_avg, util_peak;
 };
 
 #ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a5efafda23ef..e0abff77764f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -696,6 +696,7 @@ void init_entity_runnable_average(struct sched_entity *se)
         * At this point, util_avg won't be used in select_task_rq_fair anyway
         */
        sa->util_avg = 0;
+       sa->util_peak = 0;
        sa->util_sum = 0;
        /* when this task enqueue'ed, it will contribute to its cfs_rq's 
load_avg */
 }
@@ -747,6 +748,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
                } else {
                        sa->util_avg = cap;
                }
+               sa->util_peak = sa->util_avg;
                sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
        }
 
@@ -3515,6 +3517,10 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct 
sched_entity *se, int flags)
         */
        if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
                update_min_vruntime(cfs_rq);
+
+       /* Save peak PELT utilization for task to help wake-up decisions */
+       if (flags & DEQUEUE_SLEEP && entity_is_task(se))
+               se->avg.util_peak = se->avg.util_avg;
 }
 
 /*
@@ -5203,7 +5209,7 @@ static int wake_affine(struct sched_domain *sd, struct 
task_struct *p,
        return 1;
 }
 
-static inline int task_util(struct task_struct *p);
+static inline int task_util_peak(struct task_struct *p);
 static int cpu_util_wake(int cpu, struct task_struct *p);
 
 static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
@@ -5286,14 +5292,14 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p,
        /*
         * The cross-over point between using spare capacity or least load
         * is too conservative for high utilization tasks on partially
-        * utilized systems if we require spare_capacity > task_util(p),
+        * utilized systems if we require spare_capacity > task_util_peak(p),
         * so we allow for some task stuffing by using
-        * spare_capacity > task_util(p)/2.
+        * spare_capacity > task_util_peak(p)/2.
         */
-       if (this_spare > task_util(p) / 2 &&
+       if (this_spare > task_util_peak(p) / 2 &&
            imbalance*this_spare > 100*most_spare)
                return NULL;
-       else if (most_spare > task_util(p) / 2)
+       else if (most_spare > task_util_peak(p) / 2)
                return most_spare_sg;
 
        if (!idlest || 100*this_load < imbalance*min_load)
@@ -5628,6 +5634,11 @@ static inline int task_util(struct task_struct *p)
        return p->se.avg.util_avg;
 }
 
+static inline int task_util_peak(struct task_struct *p)
+{
+       return p->se.avg.util_peak;
+}
+
 /*
  * cpu_util_wake: Compute cpu utilization with any contributions from
  * the waking task p removed.
@@ -5667,7 +5678,7 @@ static int wake_cap(struct task_struct *p, int cpu, int 
prev_cpu)
        /* Bring task utilization in sync with prev_cpu */
        sync_entity_load_avg(&p->se);
 
-       return min_cap * 1024 < task_util(p) * capacity_margin;
+       return min_cap * 1024 < task_util_peak(p) * capacity_margin;
 }
 
 /*
-- 
2.7.4

Reply via email to