rq->clock_task may be updated between the two calls of
rq_clock_task() in update_curr_dl(). Calling rq_clock_task() only
once makes it more accurate and efficient, taking update_curr() as
reference.

Signed-off-by: Wen Yang <[email protected]>
Reviewed-by: Jiang Biao <[email protected]>
Suggested-by: Peter Zijlstra <[email protected]>
---
 kernel/sched/deadline.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2473736..a32209c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1115,6 +1115,7 @@ static void update_curr_dl(struct rq *rq)
        struct task_struct *curr = rq->curr;
        struct sched_dl_entity *dl_se = &curr->dl;
        u64 delta_exec;
+       u64 now;
 
        if (!dl_task(curr) || !on_dl_rq(dl_se))
                return;
@@ -1127,7 +1128,8 @@ static void update_curr_dl(struct rq *rq)
         * natural solution, but the full ramifications of this
         * approach need further study.
         */
-       delta_exec = rq_clock_task(rq) - curr->se.exec_start;
+       now = rq_clock_task(rq);
+       delta_exec = now - curr->se.exec_start;
        if (unlikely((s64)delta_exec <= 0)) {
                if (unlikely(dl_se->dl_yielded))
                        goto throttle;
@@ -1143,7 +1145,7 @@ static void update_curr_dl(struct rq *rq)
        curr->se.sum_exec_runtime += delta_exec;
        account_group_exec_runtime(curr, delta_exec);
 
-       curr->se.exec_start = rq_clock_task(rq);
+       curr->se.exec_start = now;
        cgroup_account_cputime(curr, delta_exec);
 
        sched_rt_avg_update(rq, delta_exec);
-- 
1.8.3.1

Reply via email to