Use an explicit "cfs_rq of parent sched_entity" helper in a few
strategic places, where cfs_rq_of(se) may no longer point at the
right runqueue once we flatten the hierarchical cgroup runqueues.

No functional change.

Signed-off-by: Rik van Riel <r...@surriel.com>
---
 kernel/sched/fair.c | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d6a8aa78a948..39f7a2d810e1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -276,6 +276,15 @@ static inline struct cfs_rq *group_cfs_rq(struct 
sched_entity *grp)
        return grp->my_q;
 }
 
+/* runqueue owned by the parent entity; the root cfs_rq for a top level se */
+static inline struct cfs_rq *group_cfs_rq_of_parent(struct sched_entity *se)
+{
+       if (se->parent)
+               return group_cfs_rq(se->parent);
+
+       return cfs_rq_of(se);
+}
+
 static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 {
        struct rq *rq = rq_of(cfs_rq);
@@ -3317,7 +3326,7 @@ static inline int propagate_entity_load_avg(struct 
sched_entity *se)
 
        gcfs_rq->propagate = 0;
 
-       cfs_rq = cfs_rq_of(se);
+       cfs_rq = group_cfs_rq_of_parent(se);
 
        add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
 
@@ -7794,7 +7803,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
 
        WRITE_ONCE(cfs_rq->h_load_next, NULL);
        for_each_sched_entity(se) {
-               cfs_rq = cfs_rq_of(se);
+               cfs_rq = group_cfs_rq_of_parent(se);
                WRITE_ONCE(cfs_rq->h_load_next, se);
                if (cfs_rq->last_h_load_update == now)
                        break;
@@ -7817,7 +7826,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
 
 static unsigned long task_se_h_load(struct sched_entity *se)
 {
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       struct cfs_rq *cfs_rq = group_cfs_rq_of_parent(se);
 
        update_cfs_rq_h_load(cfs_rq);
        return div64_ul(se->avg.load_avg * cfs_rq->h_load,
@@ -10164,7 +10173,7 @@ static void task_tick_fair(struct rq *rq, struct 
task_struct *curr, int queued)
        struct sched_entity *se = &curr->se;
 
        for_each_sched_entity(se) {
-               cfs_rq = cfs_rq_of(se);
+               cfs_rq = group_cfs_rq_of_parent(se);
                entity_tick(cfs_rq, se, queued);
        }
 
-- 
2.20.1

Reply via email to