Add a wrapper function cfs_rq_min_vruntime(cfs_rq) to
return cfs_rq->min_vruntime.

It will be used in the following patch, no functionality
change.

Signed-off-by: Aaron Lu <ziqian....@antfin.com>
---
 kernel/sched/fair.c | 27 ++++++++++++++++-----------
 1 file changed, 16 insertions(+), 11 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 26d29126d6a5..a7b26c96f46b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -431,6 +431,11 @@ find_matching_se(struct sched_entity **se, struct 
sched_entity **pse)
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
+static inline u64 cfs_rq_min_vruntime(struct cfs_rq *cfs_rq)
+{
+       return cfs_rq->min_vruntime;
+}
+
 static __always_inline
 void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
 
@@ -467,7 +472,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
        struct sched_entity *curr = cfs_rq->curr;
        struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
 
-       u64 vruntime = cfs_rq->min_vruntime;
+       u64 vruntime = cfs_rq_min_vruntime(cfs_rq);
 
        if (curr) {
                if (curr->on_rq)
@@ -487,7 +492,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
        }
 
        /* ensure we never gain time by being placed backwards. */
-       cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
+       cfs_rq->min_vruntime = max_vruntime(cfs_rq_min_vruntime(cfs_rq), 
vruntime);
 #ifndef CONFIG_64BIT
        smp_wmb();
        cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
@@ -3742,7 +3747,7 @@ static inline void update_misfit_status(struct 
task_struct *p, struct rq *rq) {}
 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
 #ifdef CONFIG_SCHED_DEBUG
-       s64 d = se->vruntime - cfs_rq->min_vruntime;
+       s64 d = se->vruntime - cfs_rq_min_vruntime(cfs_rq);
 
        if (d < 0)
                d = -d;
@@ -3755,7 +3760,7 @@ static void check_spread(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
 static void
 place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 {
-       u64 vruntime = cfs_rq->min_vruntime;
+       u64 vruntime = cfs_rq_min_vruntime(cfs_rq);
 
        /*
         * The 'current' period is already promised to the current tasks,
@@ -3848,7 +3853,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
         * update_curr().
         */
        if (renorm && curr)
-               se->vruntime += cfs_rq->min_vruntime;
+               se->vruntime += cfs_rq_min_vruntime(cfs_rq);
 
        update_curr(cfs_rq);
 
@@ -3859,7 +3864,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
         * fairness detriment of existing tasks.
         */
        if (renorm && !curr)
-               se->vruntime += cfs_rq->min_vruntime;
+               se->vruntime += cfs_rq_min_vruntime(cfs_rq);
 
        /*
         * When enqueuing a sched_entity, we must:
@@ -3972,7 +3977,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
         * can move min_vruntime forward still more.
         */
        if (!(flags & DEQUEUE_SLEEP))
-               se->vruntime -= cfs_rq->min_vruntime;
+               se->vruntime -= cfs_rq_min_vruntime(cfs_rq);
 
        /* return excess runtime on last dequeue */
        return_cfs_rq_runtime(cfs_rq);
@@ -6722,7 +6727,7 @@ static void migrate_task_rq_fair(struct task_struct *p, 
int new_cpu)
                        min_vruntime = cfs_rq->min_vruntime;
                } while (min_vruntime != min_vruntime_copy);
 #else
-               min_vruntime = cfs_rq->min_vruntime;
+               min_vruntime = cfs_rq_min_vruntime(cfs_rq);
 #endif
 
                se->vruntime -= min_vruntime;
@@ -10215,7 +10220,7 @@ static void task_fork_fair(struct task_struct *p)
                resched_curr(rq);
        }
 
-       se->vruntime -= cfs_rq->min_vruntime;
+       se->vruntime -= cfs_rq_min_vruntime(cfs_rq);
        rq_unlock(rq, &rf);
 }
 
@@ -10335,7 +10340,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
                 * cause 'unlimited' sleep bonus.
                 */
                place_entity(cfs_rq, se, 0);
-               se->vruntime -= cfs_rq->min_vruntime;
+               se->vruntime -= cfs_rq_min_vruntime(cfs_rq);
        }
 
        detach_entity_cfs_rq(se);
@@ -10349,7 +10354,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
        attach_entity_cfs_rq(se);
 
        if (!vruntime_normalized(p))
-               se->vruntime += cfs_rq->min_vruntime;
+               se->vruntime += cfs_rq_min_vruntime(cfs_rq);
 }
 
 static void switched_from_fair(struct rq *rq, struct task_struct *p)
-- 
2.19.1.3.ge56e4f7

Reply via email to