Most call sites of update_load_avg() already have cfs_rq_of(se)
available, pass it down instead of recomputing it.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/fair.c |   31 +++++++++++++++----------------
 1 file changed, 15 insertions(+), 16 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3498,9 +3498,8 @@ update_cfs_rq_load_avg(u64 now, struct c
 #define SKIP_AGE_LOAD  0x2
 
 /* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int flags)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int flags)
 {
-       struct cfs_rq *cfs_rq = cfs_rq_of(se);
        u64 now = cfs_rq_clock_task(cfs_rq);
        struct rq *rq = rq_of(cfs_rq);
        int cpu = cpu_of(rq);
@@ -3661,9 +3660,9 @@ update_cfs_rq_load_avg(u64 now, struct c
 #define UPDATE_TG      0x0
 #define SKIP_AGE_LOAD  0x0
 
-static inline void update_load_avg(struct sched_entity *se, int not_used1)
+static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity 
*se, int not_used1)
 {
-       cfs_rq_util_change(cfs_rq_of(se));
+       cfs_rq_util_change(cfs_rq);
 }
 
 static inline void
@@ -3814,7 +3813,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
         *     its group cfs_rq
         *   - Add its new weight to cfs_rq->load.weight
         */
-       update_load_avg(se, UPDATE_TG);
+       update_load_avg(cfs_rq, se, UPDATE_TG);
        enqueue_entity_load_avg(cfs_rq, se);
        update_cfs_shares(se);
        account_entity_enqueue(cfs_rq, se);
@@ -3898,7 +3897,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
         *   - For group entity, update its weight to reflect the new share
         *     of its group cfs_rq.
         */
-       update_load_avg(se, UPDATE_TG);
+       update_load_avg(cfs_rq, se, UPDATE_TG);
        dequeue_entity_load_avg(cfs_rq, se);
 
        update_stats_dequeue(cfs_rq, se, flags);
@@ -3986,7 +3985,7 @@ set_next_entity(struct cfs_rq *cfs_rq, s
                 */
                update_stats_wait_end(cfs_rq, se);
                __dequeue_entity(cfs_rq, se);
-               update_load_avg(se, UPDATE_TG);
+               update_load_avg(cfs_rq, se, UPDATE_TG);
        }
 
        update_stats_curr_start(cfs_rq, se);
@@ -4088,7 +4087,7 @@ static void put_prev_entity(struct cfs_r
                /* Put 'current' back into the tree. */
                __enqueue_entity(cfs_rq, prev);
                /* in !on_rq case, update occurred at dequeue */
-               update_load_avg(prev, 0);
+               update_load_avg(cfs_rq, prev, 0);
        }
        cfs_rq->curr = NULL;
 }
@@ -4104,7 +4103,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
        /*
         * Ensure that runnable average is periodically updated.
         */
-       update_load_avg(curr, UPDATE_TG);
+       update_load_avg(cfs_rq, curr, UPDATE_TG);
        update_cfs_shares(curr);
 
 #ifdef CONFIG_SCHED_HRTICK
@@ -5022,7 +5021,7 @@ enqueue_task_fair(struct rq *rq, struct
                if (cfs_rq_throttled(cfs_rq))
                        break;
 
-               update_load_avg(se, UPDATE_TG);
+               update_load_avg(cfs_rq, se, UPDATE_TG);
                update_cfs_shares(se);
        }
 
@@ -5081,7 +5080,7 @@ static void dequeue_task_fair(struct rq
                if (cfs_rq_throttled(cfs_rq))
                        break;
 
-               update_load_avg(se, UPDATE_TG);
+               update_load_avg(cfs_rq, se, UPDATE_TG);
                update_cfs_shares(se);
        }
 
@@ -7044,7 +7043,7 @@ static void update_blocked_averages(int
                /* Propagate pending load changes to the parent, if any: */
                se = cfs_rq->tg->se[cpu];
                if (se && !skip_blocked_update(se))
-                       update_load_avg(se, 0);
+                       update_load_avg(cfs_rq_of(se), se, 0);
 
                /*
                 * There can be a lot of idle CPU cgroups.  Don't let fully
@@ -9185,7 +9184,7 @@ static void propagate_entity_cfs_rq(stru
                if (cfs_rq_throttled(cfs_rq))
                        break;
 
-               update_load_avg(se, UPDATE_TG);
+               update_load_avg(cfs_rq, se, UPDATE_TG);
        }
 }
 #else
@@ -9197,7 +9196,7 @@ static void detach_entity_cfs_rq(struct
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
        /* Catch up with the cfs_rq and remove our load when we leave */
-       update_load_avg(se, 0);
+       update_load_avg(cfs_rq, se, 0);
        detach_entity_load_avg(cfs_rq, se);
        update_tg_load_avg(cfs_rq, false);
        propagate_entity_cfs_rq(se);
@@ -9216,7 +9215,7 @@ static void attach_entity_cfs_rq(struct
 #endif
 
        /* Synchronize entity with its cfs_rq */
-       update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
+       update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : 
SKIP_AGE_LOAD);
        attach_entity_load_avg(cfs_rq, se);
        update_tg_load_avg(cfs_rq, false);
        propagate_entity_cfs_rq(se);
@@ -9500,7 +9499,7 @@ int sched_group_set_shares(struct task_g
                rq_lock_irqsave(rq, &rf);
                update_rq_clock(rq);
                for_each_sched_entity(se) {
-                       update_load_avg(se, UPDATE_TG);
+                       update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
                        update_cfs_shares(se);
                }
                rq_unlock_irqrestore(rq, &rf);


Reply via email to