Factorize post_init_entity_util_avg and part of attach_task_cfs_rq
in one function attach_entity_cfs_rq.
Create symmetric detach_entity_cfs_rq function

Signed-off-by: Vincent Guittot <vincent.guit...@linaro.org>
---
 kernel/sched/fair.c | 53 +++++++++++++++++++++++++++++++----------------------
 1 file changed, 31 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c242944..b27cac0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -708,9 +708,7 @@ void init_entity_runnable_average(struct sched_entity *se)
 }
 
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool 
update_freq);
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se);
+static void attach_entity_cfs_rq(struct sched_entity *se);
 
 /*
  * With new tasks being created, their initial util_avgs are extrapolated
@@ -742,7 +740,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        struct sched_avg *sa = &se->avg;
        long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
-       u64 now = cfs_rq_clock_task(cfs_rq);
 
        if (cap > 0) {
                if (cfs_rq->avg.util_avg != 0) {
@@ -770,14 +767,12 @@ void post_init_entity_util_avg(struct sched_entity *se)
                         * such that the next switched_to_fair() has the
                         * expected state.
                         */
-                       se->avg.last_update_time = now;
+                       se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
                        return;
                }
        }
 
-       update_cfs_rq_load_avg(now, cfs_rq, false);
-       attach_entity_load_avg(cfs_rq, se);
-       update_tg_load_avg(cfs_rq, false);
+       attach_entity_cfs_rq(se);
 }
 
 #else /* !CONFIG_SMP */
@@ -8687,30 +8682,19 @@ static inline bool vruntime_normalized(struct 
task_struct *p)
        return false;
 }
 
-static void detach_task_cfs_rq(struct task_struct *p)
+static void detach_entity_cfs_rq(struct sched_entity *se)
 {
-       struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        u64 now = cfs_rq_clock_task(cfs_rq);
 
-       if (!vruntime_normalized(p)) {
-               /*
-                * Fix up our vruntime so that the current sleep doesn't
-                * cause 'unlimited' sleep bonus.
-                */
-               place_entity(cfs_rq, se, 0);
-               se->vruntime -= cfs_rq->min_vruntime;
-       }
-
        /* Catch up with the cfs_rq and remove our load when we leave */
        update_cfs_rq_load_avg(now, cfs_rq, false);
        detach_entity_load_avg(cfs_rq, se);
        update_tg_load_avg(cfs_rq, false);
 }
 
-static void attach_task_cfs_rq(struct task_struct *p)
+static void attach_entity_cfs_rq(struct sched_entity *se)
 {
-       struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        u64 now = cfs_rq_clock_task(cfs_rq);
 
@@ -8722,10 +8706,35 @@ static void attach_task_cfs_rq(struct task_struct *p)
        se->depth = se->parent ? se->parent->depth + 1 : 0;
 #endif
 
-       /* Synchronize task with its cfs_rq */
+       /* Synchronize entity with its cfs_rq */
        update_cfs_rq_load_avg(now, cfs_rq, false);
        attach_entity_load_avg(cfs_rq, se);
        update_tg_load_avg(cfs_rq, false);
+}
+
+static void detach_task_cfs_rq(struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+       if (!vruntime_normalized(p)) {
+               /*
+                * Fix up our vruntime so that the current sleep doesn't
+                * cause 'unlimited' sleep bonus.
+                */
+               place_entity(cfs_rq, se, 0);
+               se->vruntime -= cfs_rq->min_vruntime;
+       }
+
+       detach_entity_cfs_rq(se);
+}
+
+static void attach_task_cfs_rq(struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+       attach_entity_cfs_rq(se);
 
        if (!vruntime_normalized(p))
                se->vruntime += cfs_rq->min_vruntime;
-- 
2.7.4

Reply via email to