From: Vincent Guittot <vincent.guit...@linaro.org>

[ Upstream commit 6d4d22468dae3d8757af9f8b81b848a76ef4409d ]

The walk through the cgroup hierarchy during the enqueue/dequeue of a task
is split in 2 distinct parts for throttled cfs_rq without any added value
but making code less readable.

Change the code ordering such that everything related to a cfs_rq
(throttled or not) will be done in the same loop.

In addition, the same steps ordering is used when updating a cfs_rq:

 - update_load_avg
 - update_cfs_group
 - update *h_nr_running

This reordering enables the use of h_nr_running in PELT algorithm.

No functional and performance changes are expected and have been noticed
during tests.

Signed-off-by: Vincent Guittot <vincent.guit...@linaro.org>
Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Reviewed-by: "Dietmar Eggemann <dietmar.eggem...@arm.com>"
Acked-by: Peter Zijlstra <a.p.zijls...@chello.nl>
Cc: Juri Lelli <juri.le...@redhat.com>
Cc: Valentin Schneider <valentin.schnei...@arm.com>
Cc: Phil Auld <pa...@redhat.com>
Cc: Hillf Danton <hdan...@sina.com>
Link: 
https://lore.kernel.org/r/20200224095223.13361-5-mgor...@techsingularity.net
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 kernel/sched/fair.c | 42 ++++++++++++++++++++----------------------
 1 file changed, 20 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c76a20648b72..a486bf3d5078 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5276,32 +5276,31 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, 
int flags)
                cfs_rq = cfs_rq_of(se);
                enqueue_entity(cfs_rq, se, flags);
 
-               /*
-                * end evaluation on encountering a throttled cfs_rq
-                *
-                * note: in the case of encountering a throttled cfs_rq we will
-                * post the final h_nr_running increment below.
-                */
-               if (cfs_rq_throttled(cfs_rq))
-                       break;
                cfs_rq->h_nr_running++;
                cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
+               /* end evaluation on encountering a throttled cfs_rq */
+               if (cfs_rq_throttled(cfs_rq))
+                       goto enqueue_throttle;
+
                flags = ENQUEUE_WAKEUP;
        }
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               cfs_rq->h_nr_running++;
-               cfs_rq->idle_h_nr_running += idle_h_nr_running;
 
+               /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
-                       break;
+                       goto enqueue_throttle;
 
                update_load_avg(cfs_rq, se, UPDATE_TG);
                update_cfs_group(se);
+
+               cfs_rq->h_nr_running++;
+               cfs_rq->idle_h_nr_running += idle_h_nr_running;
        }
 
+enqueue_throttle:
        if (!se) {
                add_nr_running(rq, 1);
                /*
@@ -5362,17 +5361,13 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
                cfs_rq = cfs_rq_of(se);
                dequeue_entity(cfs_rq, se, flags);
 
-               /*
-                * end evaluation on encountering a throttled cfs_rq
-                *
-                * note: in the case of encountering a throttled cfs_rq we will
-                * post the final h_nr_running decrement below.
-               */
-               if (cfs_rq_throttled(cfs_rq))
-                       break;
                cfs_rq->h_nr_running--;
                cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
+               /* end evaluation on encountering a throttled cfs_rq */
+               if (cfs_rq_throttled(cfs_rq))
+                       goto dequeue_throttle;
+
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
                        /* Avoid re-evaluating load for this entity: */
@@ -5390,16 +5385,19 @@ static void dequeue_task_fair(struct rq *rq, struct 
task_struct *p, int flags)
 
        for_each_sched_entity(se) {
                cfs_rq = cfs_rq_of(se);
-               cfs_rq->h_nr_running--;
-               cfs_rq->idle_h_nr_running -= idle_h_nr_running;
 
+               /* end evaluation on encountering a throttled cfs_rq */
                if (cfs_rq_throttled(cfs_rq))
-                       break;
+                       goto dequeue_throttle;
 
                update_load_avg(cfs_rq, se, UPDATE_TG);
                update_cfs_group(se);
+
+               cfs_rq->h_nr_running--;
+               cfs_rq->idle_h_nr_running -= idle_h_nr_running;
        }
 
+dequeue_throttle:
        if (!se)
                sub_nr_running(rq, 1);
 
-- 
2.25.1

Reply via email to