From: Byungchul Park <byungchul.p...@lge.com>

i introduced need_vruntime_adjust() to check if do or not adjust vruntime
when attaching/detaching a se to/from its cfs_rq, and use it.

i made the best use of switched_to(from)_fair for attach(detach)
operations in task_move_group_fair().

Signed-off-by: Byungchul Park <byungchul.p...@lge.com>
---
 kernel/sched/fair.c |   80 +++++++++++++++++++--------------------------------
 1 file changed, 29 insertions(+), 51 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d91e4da..5ec846f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7904,6 +7904,31 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, 
int oldprio)
                check_preempt_curr(rq, p, 0);
 }
 
+static inline int need_vruntime_adjust(struct task_struct *p, int queued)
+{
+       struct sched_entity *se = &p->se;
+
+       /*
+        * When !queued, vruntime of the task has usually NOT been normalized.
+        * But there are some cases where it has already been normalized:
+        *
+        * - Moving a forked child which is waiting for being woken up by
+        *   wake_up_new_task().
+        * - Moving a task which has been woken up by try_to_wake_up() and
+        *   waiting for actually being woken up by sched_ttwu_pending().
+        */
+       if (queued)
+               return 0;
+
+       if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+               return 0;
+
+       if (p->state != TASK_RUNNING)
+               return 1;
+
+       return 0;
+}
+
 static void switched_from_fair(struct rq *rq, struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
@@ -7918,7 +7943,7 @@ static void switched_from_fair(struct rq *rq, struct 
task_struct *p)
         * have normalized the vruntime, if it's !queued, then only when
         * the task is sleeping will it still have non-normalized vruntime.
         */
-       if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
+       if (need_vruntime_adjust(p, task_on_rq_queued(p))) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
@@ -7939,7 +7964,6 @@ static void switched_from_fair(struct rq *rq, struct 
task_struct *p)
 static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /*
         * Since the real-depth could have been changed (only FAIR
@@ -7964,7 +7988,7 @@ static void switched_to_fair(struct rq *rq, struct 
task_struct *p)
                 * has non-normalized vruntime, if it's !queued, then it still 
has
                 * normalized vruntime.
                 */
-               if (p->state != TASK_RUNNING)
+               if (need_vruntime_adjust(p, task_on_rq_queued(p)))
                        se->vruntime += cfs_rq_of(se)->min_vruntime;
                return;
        }
@@ -8014,55 +8038,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_move_group_fair(struct task_struct *p, int queued)
 {
-       struct sched_entity *se = &p->se;
-       struct cfs_rq *cfs_rq;
-
-       /*
-        * If the task was not on the rq at the time of this cgroup movement
-        * it must have been asleep, sleeping tasks keep their ->vruntime
-        * absolute on their old rq until wakeup (needed for the fair sleeper
-        * bonus in place_entity()).
-        *
-        * If it was on the rq, we've just 'preempted' it, which does convert
-        * ->vruntime to a relative base.
-        *
-        * Make sure both cases convert their relative position when migrating
-        * to another cgroup's rq. This does somewhat interfere with the
-        * fair sleeper stuff for the first placement, but who cares.
-        */
-       /*
-        * When !queued, vruntime of the task has usually NOT been normalized.
-        * But there are some cases where it has already been normalized:
-        *
-        * - Moving a forked child which is waiting for being woken up by
-        *   wake_up_new_task().
-        * - Moving a task which has been woken up by try_to_wake_up() and
-        *   waiting for actually being woken up by sched_ttwu_pending().
-        *
-        * To prevent boost or penalty in the new cfs_rq caused by delta
-        * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
-        */
-       if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
-               queued = 1;
-
-       cfs_rq = cfs_rq_of(se);
-       if (!queued)
-               se->vruntime -= cfs_rq->min_vruntime;
-
-#ifdef CONFIG_SMP
-       /* synchronize task with its prev cfs_rq */
-       detach_entity_load_avg(cfs_rq, se);
-#endif
+       switched_from_fair(task_rq(p), p);
        set_task_rq(p, task_cpu(p));
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
-       cfs_rq = cfs_rq_of(se);
-       if (!queued)
-               se->vruntime += cfs_rq->min_vruntime;
-
-#ifdef CONFIG_SMP
-       /* Virtually synchronize task with its new cfs_rq */
-       attach_entity_load_avg(cfs_rq, se);
-#endif
+       switched_to_fair(task_rq(p), p);
 }
 
 void free_fair_sched_group(struct task_group *tg)
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to