Factor out the logic to retrieve the parent CFS runqueue of another
CFS runqueue into its own function and replace open-coded variants.

Signed-off-by: Jan H. Schönherr <jscho...@amazon.de>
---
 kernel/sched/fair.c | 18 ++++++++++++------
 1 file changed, 12 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9f0ce4555c26..82cdd75e88b9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -282,11 +282,18 @@ static inline struct cfs_rq *group_cfs_rq(struct 
sched_entity *grp)
        return grp->my_q;
 }
 
+static inline struct cfs_rq *parent_cfs_rq(struct cfs_rq *cfs_rq)
+{
+       if (!cfs_rq->tg->parent)
+               return NULL;
+       return cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))];
+}
+
 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 {
        if (!cfs_rq->on_list) {
                struct rq *rq = rq_of(cfs_rq);
-               int cpu = cpu_of(rq);
+               struct cfs_rq *pcfs_rq = parent_cfs_rq(cfs_rq);
                /*
                 * Ensure we either appear before our parent (if already
                 * enqueued) or force our parent to appear after us when it is
@@ -296,8 +303,7 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq 
*cfs_rq)
                 * tmp_alone_branch either when the branch is connected
                 * to a tree or when we reach the beg of the tree
                 */
-               if (cfs_rq->tg->parent &&
-                   cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
+               if (pcfs_rq && pcfs_rq->on_list) {
                        /*
                         * If parent is already on the list, we add the child
                         * just before. Thanks to circular linked property of
@@ -305,14 +311,14 @@ static inline void list_add_leaf_cfs_rq(struct cfs_rq 
*cfs_rq)
                         * of the list that starts by parent.
                         */
                        list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
-                               
&(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
+                                         &pcfs_rq->leaf_cfs_rq_list);
                        /*
                         * The branch is now connected to its tree so we can
                         * reset tmp_alone_branch to the beginning of the
                         * list.
                         */
                        rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
-               } else if (!cfs_rq->tg->parent) {
+               } else if (!pcfs_rq) {
                        /*
                         * cfs rq without parent should be put
                         * at the tail of the list.
@@ -4716,7 +4722,7 @@ static void sync_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
-       pcfs_rq = cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))];
+       pcfs_rq = parent_cfs_rq(cfs_rq);
 
        cfs_rq->throttle_count = pcfs_rq->throttle_count;
        cfs_rq->throttled_clock_task = rq_clock_task(rq_of(cfs_rq));
-- 
2.9.3.1.gcba166c.dirty

Reply via email to