Factor out the logic to place a SE into a CFS runqueue into its own
function.

This consolidates various sprinkled updates of se->cfs_rq, se->parent,
and se->depth at the cost of updating se->depth unnecessarily on
same-group movements between CPUs.

Signed-off-by: Jan H. Schönherr <jscho...@amazon.de>
---
 kernel/sched/fair.c  | 26 ++++----------------------
 kernel/sched/sched.h | 12 +++++++++---
 2 files changed, 13 insertions(+), 25 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 638fd14bb6c4..3de0158729a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9683,14 +9683,6 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-       /*
-        * Since the real-depth could have been changed (only FAIR
-        * class maintain depth value), reset depth properly.
-        */
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
-#endif
-
        /* Synchronize entity with its cfs_rq */
        update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : 
SKIP_AGE_LOAD);
        attach_entity_load_avg(cfs_rq, se, 0);
@@ -9781,10 +9773,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_set_group_fair(struct task_struct *p)
 {
-       struct sched_entity *se = &p->se;
-
        set_task_rq(p, task_cpu(p));
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
 }
 
 static void task_move_group_fair(struct task_struct *p)
@@ -9855,7 +9844,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct 
task_group *parent)
                        goto err_free_rq;
 
                init_cfs_rq(cfs_rq);
-               init_tg_cfs_entry(tg, cfs_rq, se, i, parent->cfs_rq[i]->my_se);
+               init_tg_cfs_entry(tg, cfs_rq, se, i, parent->cfs_rq[i]);
                init_entity_runnable_average(se);
        }
 
@@ -9912,7 +9901,7 @@ void unregister_fair_sched_group(struct task_group *tg)
 
 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
                        struct sched_entity *se, int cpu,
-                       struct sched_entity *parent)
+                       struct cfs_rq *parent)
 {
        struct rq *rq = cpu_rq(cpu);
 
@@ -9927,18 +9916,11 @@ void init_tg_cfs_entry(struct task_group *tg, struct 
cfs_rq *cfs_rq,
        if (!se)
                return;
 
-       if (!parent) {
-               se->cfs_rq = &rq->cfs;
-               se->depth = 0;
-       } else {
-               se->cfs_rq = parent->my_q;
-               se->depth = parent->depth + 1;
-       }
-
+       set_entity_cfs(se, parent);
        se->my_q = cfs_rq;
+
        /* guarantee group entities always have weight */
        update_load_set(&se->load, NICE_0_LOAD);
-       se->parent = parent;
 }
 
 static DEFINE_MUTEX(shares_mutex);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8435bf70a701..b4d0e8a68697 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -428,7 +428,7 @@ extern void online_fair_sched_group(struct task_group *tg);
 extern void unregister_fair_sched_group(struct task_group *tg);
 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
                        struct sched_entity *se, int cpu,
-                       struct sched_entity *parent);
+                       struct cfs_rq *parent);
 extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 
 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
@@ -1290,6 +1290,13 @@ static inline struct task_group *task_group(struct 
task_struct *p)
        return p->sched_task_group;
 }
 
+static inline void set_entity_cfs(struct sched_entity *se, struct cfs_rq 
*cfs_rq)
+{
+       se->cfs_rq = cfs_rq;
+       se->parent = cfs_rq->my_se;
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
+}
+
 /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 {
@@ -1299,8 +1306,7 @@ static inline void set_task_rq(struct task_struct *p, 
unsigned int cpu)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
-       p->se.cfs_rq = tg->cfs_rq[cpu];
-       p->se.parent = tg->cfs_rq[cpu]->my_se;
+       set_entity_cfs(&p->se, tg->cfs_rq[cpu]);
 #endif
 
 #ifdef CONFIG_RT_GROUP_SCHED
-- 
2.9.3.1.gcba166c.dirty

Reply via email to