This is to avoid cpufreq update callbacks when we go idle on UP
platforms.

Signed-off-by: Jisheng Zhang <jszh...@marvell.com>
---
 kernel/sched/fair.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bdcbeea..236ec5b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3121,7 +3121,8 @@ static inline void update_load_avg(struct sched_entity 
*se, int not_used)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        struct rq *rq = rq_of(cfs_rq);
 
-       cpufreq_trigger_update(rq_clock(rq));
+       if (&rq->cfs == cfs_rq)
+               cpufreq_trigger_update(rq_clock(rq));
 }
 
 static inline void
-- 
2.8.1

Reply via email to