This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52.

Its needed by the series for controlling whether cpufreq is notified about
updating frequency during an update to the utilization.

Cc: Srinivas Pandruvada <srinivas.pandruv...@linux.intel.com>
Cc: Len Brown <l...@kernel.org>
Cc: Rafael J. Wysocki <r...@rjwysocki.net>
Cc: Viresh Kumar <viresh.ku...@linaro.org>
Cc: Ingo Molnar <mi...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Juri Lelli <juri.le...@arm.com>
Cc: Patrick Bellasi <patrick.bell...@arm.com>
Cc: kernel-t...@android.com
Signed-off-by: Joel Fernandes <joe...@google.com>
---
 kernel/sched/fair.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40a89f415db0..a8915649d833 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -797,7 +797,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
                        /*
                         * For !fair tasks do:
                         *
-                       update_cfs_rq_load_avg(now, cfs_rq);
+                       update_cfs_rq_load_avg(now, cfs_rq, false);
                        attach_entity_load_avg(cfs_rq, se);
                        switched_from_fair(rq, p);
                         *
@@ -3571,6 +3571,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq 
*cfs_rq, long runnable_sum
  * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
  * @now: current time, as per cfs_rq_clock_task()
  * @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
  *
  * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
  * avg. The immediate corollary is that all (fair) tasks must be attached, see
@@ -3584,7 +3585,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq 
*cfs_rq, long runnable_sum
  * call update_tg_load_avg() when this function returns true.
  */
 static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
        unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum 
= 0;
        struct sched_avg *sa = &cfs_rq->avg;
@@ -3621,7 +3622,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
        cfs_rq->load_last_update_time_copy = sa->last_update_time;
 #endif
 
-       if (decayed)
+       if (update_freq && decayed)
                cfs_rq_util_change(cfs_rq);
 
        return decayed;
@@ -3715,7 +3716,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, 
struct sched_entity *s
        if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
                __update_load_avg_se(now, cpu, cfs_rq, se);
 
-       decayed  = update_cfs_rq_load_avg(now, cfs_rq);
+       decayed  = update_cfs_rq_load_avg(now, cfs_rq, true);
        decayed |= propagate_entity_load_avg(se);
 
        if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
@@ -3805,7 +3806,7 @@ static int idle_balance(struct rq *this_rq, struct 
rq_flags *rf);
 #else /* CONFIG_SMP */
 
 static inline int
-update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
        return 0;
 }
@@ -7299,7 +7300,7 @@ static void update_blocked_averages(int cpu)
                if (throttled_hierarchy(cfs_rq))
                        continue;
 
-               if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+               if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, 
true))
                        update_tg_load_avg(cfs_rq, 0);
 
                /* Propagate pending load changes to the parent, if any: */
@@ -7372,7 +7373,7 @@ static inline void update_blocked_averages(int cpu)
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
-       update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+       update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
        rq_unlock_irqrestore(rq, &rf);
 }
 
-- 
2.14.1.480.gb18f417b89-goog

Reply via email to