Remove se->depth and the code that touches it, since we no longer need
any of that.

Signed-off-by: Rik van Riel <r...@surriel.com>
Suggested-by: Dietmar Eggemann <dietmar.eggem...@arm.com>
---
 include/linux/sched.h |  1 -
 kernel/sched/fair.c   | 50 ++-----------------------------------------
 2 files changed, 2 insertions(+), 49 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index bdca15b3afe7..94aec4065fd1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -451,7 +451,6 @@ struct sched_entity {
        struct sched_statistics         statistics;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       int                             depth;
        unsigned long                   enqueued_h_load;
        unsigned long                   enqueued_h_weight;
        u64                             propagated_exec_runtime;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3c25ab666799..0fb3de853d45 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -438,38 +438,6 @@ static inline bool task_se_in_cgroup(struct sched_entity 
*se)
        return parent_entity(se);
 }
 
-static void
-find_matching_se(struct sched_entity **se, struct sched_entity **pse)
-{
-       int se_depth, pse_depth;
-
-       /*
-        * preemption test can be made between sibling entities who are in the
-        * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
-        * both tasks until we find their ancestors who are siblings of common
-        * parent.
-        */
-
-       /* First walk up until both entities are at same depth */
-       se_depth = (*se)->depth;
-       pse_depth = (*pse)->depth;
-
-       while (se_depth > pse_depth) {
-               se_depth--;
-               *se = parent_entity(*se);
-       }
-
-       while (pse_depth > se_depth) {
-               pse_depth--;
-               *pse = parent_entity(*pse);
-       }
-
-       while (!is_same_group(*se, *pse)) {
-               *se = parent_entity(*se);
-               *pse = parent_entity(*pse);
-       }
-}
-
 /* Add the cgroup cfs_rqs to the list, for update_blocked_averages */
 static void enqueue_entity_cfs_rqs(struct sched_entity *se)
 {
@@ -10221,14 +10189,6 @@ static void attach_entity_cfs_rq(struct sched_entity 
*se)
        int flags = sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD;
        struct cfs_rq *cfs_rq = group_cfs_rq_of_parent(se);
 
-#ifdef CONFIG_FAIR_GROUP_SCHED
-       /*
-        * Since the real-depth could have been changed (only FAIR
-        * class maintain depth value), reset depth properly.
-        */
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
-#endif
-
        /* Synchronize entity with its cfs_rq */
        update_load_avg(cfs_rq, se, flags);
        attach_entity_load_avg(cfs_rq, se, 0);
@@ -10316,10 +10276,7 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_set_group_fair(struct task_struct *p)
 {
-       struct sched_entity *se = &p->se;
-
        set_task_rq(p, task_cpu(p));
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
 }
 
 static void task_move_group_fair(struct task_struct *p)
@@ -10465,13 +10422,10 @@ void init_tg_cfs_entry(struct task_group *tg, struct 
cfs_rq *cfs_rq,
        if (!se)
                return;
 
-       if (!parent) {
+       if (!parent)
                se->cfs_rq = &rq->cfs;
-               se->depth = 0;
-       } else {
+       else
                se->cfs_rq = parent->my_q;
-               se->depth = parent->depth + 1;
-       }
 
        se->my_q = cfs_rq;
        /* guarantee group entities always have weight */
-- 
2.20.1

Reply via email to