From: Byungchul Park <byungchul.p...@lge.com>

two functions are introduced for attaching(detaching) a task to a cfs_rq.
it does mainly adjusting its vruntime and load avg with the cfs_rq.
switched_from_fair(), switched_to_fair() and task_move_group_fair() can
use these functions, now.

Signed-off-by: Byungchul Park <byungchul.p...@lge.com>
---
 kernel/sched/fair.c |  143 +++++++++++++++++++++++----------------------------
 1 file changed, 63 insertions(+), 80 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fc6b39c..346f2a6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7904,21 +7904,29 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, 
int oldprio)
                check_preempt_curr(rq, p, 0);
 }
 
-static void switched_from_fair(struct rq *rq, struct task_struct *p)
+static void detach_task_cfs_rq(struct task_struct *p, int queued)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
        /*
-        * Ensure the task's vruntime is normalized, so that when it's
-        * switched back to the fair class the enqueue_entity(.flags=0) will
-        * do the right thing.
+        * Ensure the task's vruntime is normalized, so that when attaching
+        * it to a cfs_rq the enqueue_entity(.flags=0) will do the right thing.
         *
         * If it's queued, then the dequeue_entity(.flags=0) will already
         * have normalized the vruntime, if it's !queued, then only when
-        * the task is sleeping will it still have non-normalized vruntime.
+        * the task is sleeping it still has a non-normalized vruntime.
+        *
+        * When !queued, vruntime of the task has usually NOT been normalized.
+        * But there are some cases where it has already been normalized:
+        *
+        * - Moving a forked child which is waiting for being woken up by
+        *   wake_up_new_task().
+        * - Moving a task which has been woken up by try_to_wake_up() and
+        *   waiting for actually being woken up by sched_ttwu_pending().
         */
-       if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
+       if (!queued && p->state != TASK_RUNNING &&
+           se->sum_exec_runtime && p->state != TASK_WAKING) {
                /*
                 * Fix up our vruntime so that the current sleep doesn't
                 * cause 'unlimited' sleep bonus.
@@ -7933,12 +7941,10 @@ static void switched_from_fair(struct rq *rq, struct 
task_struct *p)
 #endif
 }
 
-/*
- * We switched to the sched_fair class.
- */
-static void switched_to_fair(struct rq *rq, struct task_struct *p)
+static void attach_task_cfs_rq(struct task_struct *p, int queued)
 {
        struct sched_entity *se = &p->se;
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /*
@@ -7950,34 +7956,57 @@ static void switched_to_fair(struct rq *rq, struct 
task_struct *p)
 
 #ifdef CONFIG_SMP
        /* synchronize task with its cfs_rq */
-       attach_entity_load_avg(cfs_rq_of(&p->se), &p->se);
+       attach_entity_load_avg(cfs_rq, se);
 #endif
 
-       if (!task_on_rq_queued(p)) {
+       /*
+        * Ensure the task has a non-normalized vruntime when attaching it
+        * to a cfs_rq with !queued, so that enqueue_entity() at wake-up time
+        * will do the right thing.
+        *
+        * If it's queued, then the enqueue_entity(.flags=0) makes the task
+        * has non-normalized vruntime, if it's !queued, then it still has
+        * a normalized vruntime.
+        *
+        * When !queued, a task usually should have a non-normalized vruntime
+        * after being attached to a cfs_rq. But there are some cases where
+        * we should keep it normalized:
+        *
+        * - Moving a forked child which is waiting for being woken up by
+        *   wake_up_new_task().
+        * - Moving a task which has been woken up by try_to_wake_up() and
+        *   waiting for actually being woken up by sched_ttwu_pending().
+        */
+       if (!queued && p->state != TASK_RUNNING &&
+           se->sum_exec_runtime && p->state != TASK_WAKING)
+               se->vruntime += cfs_rq->min_vruntime;
+}
+
+static void switched_from_fair(struct rq *rq, struct task_struct *p)
+{
+       detach_task_cfs_rq(p, task_on_rq_queued(p));
+}
 
+/*
+ * We switched to the sched_fair class.
+ */
+static void switched_to_fair(struct rq *rq, struct task_struct *p)
+{
+       int queued = task_on_rq_queued(p);
+
+       attach_task_cfs_rq(p, queued);
+
+       if (queued) {
                /*
-                * Ensure the task has a non-normalized vruntime when it is 
switched
-                * back to the fair class with !queued, so that 
enqueue_entity() at
-                * wake-up time will do the right thing.
-                *
-                * If it's queued, then the enqueue_entity(.flags=0) makes the 
task
-                * has non-normalized vruntime, if it's !queued, then it still 
has
-                * normalized vruntime.
+                * We were most likely switched from sched_rt, so
+                * kick off the schedule if running, otherwise just see
+                * if we can still preempt the current task.
                 */
-               if (p->state != TASK_RUNNING)
-                       se->vruntime += cfs_rq_of(se)->min_vruntime;
-               return;
+               if (rq->curr == p)
+                       resched_curr(rq);
+               else
+                       check_preempt_curr(rq, p, 0);
        }
-
-       /*
-        * We were most likely switched from sched_rt, so
-        * kick off the schedule if running, otherwise just see
-        * if we can still preempt the current task.
-        */
-       if (rq->curr == p)
-               resched_curr(rq);
-       else
-               check_preempt_curr(rq, p, 0);
 }
 
 /* Account for a task changing its policy or group.
@@ -8014,55 +8043,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_move_group_fair(struct task_struct *p, int queued)
 {
-       struct sched_entity *se = &p->se;
-       struct cfs_rq *cfs_rq;
-
-       /*
-        * If the task was not on the rq at the time of this cgroup movement
-        * it must have been asleep, sleeping tasks keep their ->vruntime
-        * absolute on their old rq until wakeup (needed for the fair sleeper
-        * bonus in place_entity()).
-        *
-        * If it was on the rq, we've just 'preempted' it, which does convert
-        * ->vruntime to a relative base.
-        *
-        * Make sure both cases convert their relative position when migrating
-        * to another cgroup's rq. This does somewhat interfere with the
-        * fair sleeper stuff for the first placement, but who cares.
-        */
-       /*
-        * When !queued, vruntime of the task has usually NOT been normalized.
-        * But there are some cases where it has already been normalized:
-        *
-        * - Moving a forked child which is waiting for being woken up by
-        *   wake_up_new_task().
-        * - Moving a task which has been woken up by try_to_wake_up() and
-        *   waiting for actually being woken up by sched_ttwu_pending().
-        *
-        * To prevent boost or penalty in the new cfs_rq caused by delta
-        * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
-        */
-       if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
-               queued = 1;
-
-       cfs_rq = cfs_rq_of(se);
-       if (!queued)
-               se->vruntime -= cfs_rq->min_vruntime;
-
-#ifdef CONFIG_SMP
-       /* synchronize task with its prev cfs_rq */
-       detach_entity_load_avg(cfs_rq, se);
-#endif
+       detach_task_cfs_rq(p, queued);
        set_task_rq(p, task_cpu(p));
-       se->depth = se->parent ? se->parent->depth + 1 : 0;
-       cfs_rq = cfs_rq_of(se);
-       if (!queued)
-               se->vruntime += cfs_rq->min_vruntime;
-
-#ifdef CONFIG_SMP
-       /* Virtually synchronize task with its new cfs_rq */
-       attach_entity_load_avg(cfs_rq, se);
-#endif
+       attach_task_cfs_rq(p, queued);
 }
 
 void free_fair_sched_group(struct task_group *tg)
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to