Since sched_slice() is used in high frequency,
small change should also make sense.

Signed-off-by: Peng Liu <iwtba...@gmail.com>
---
 kernel/sched/fair.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1054d2cf6aaa..6ae2a507aac0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -694,19 +694,16 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct 
sched_entity *se)
        u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
 
        for_each_sched_entity(se) {
-               struct load_weight *load;
                struct load_weight lw;
 
                cfs_rq = cfs_rq_of(se);
-               load = &cfs_rq->load;
+               lw = cfs_rq->load;
 
-               if (unlikely(!se->on_rq)) {
+               if (unlikely(!se->on_rq))
                        lw = cfs_rq->load;
 
-                       update_load_add(&lw, se->load.weight);
-                       load = &lw;
-               }
-               slice = __calc_delta(slice, se->load.weight, load);
+               update_load_add(&lw, se->load.weight);
+               slice = __calc_delta(slice, se->load.weight, &lw);
        }
        return slice;
 }
-- 
2.17.1

Reply via email to