In the file fair.c, sometims update_tg_load_avg(cfs_rq, 0) is used,
sometimes update_tg_load_avg(cfs_rq, false) is used. So change it
to use bool parameter.

Signed-off-by: Xianting Tian <tian.xiant...@h3c.com>
---
 kernel/sched/fair.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1a68a0536..61dac1c58 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -831,7 +831,7 @@ void init_entity_runnable_average(struct sched_entity *se)
 void post_init_entity_util_avg(struct task_struct *p)
 {
 }
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, bool force)
 {
 }
 #endif /* CONFIG_SMP */
@@ -3300,7 +3300,7 @@ static inline void cfs_rq_util_change(struct cfs_rq 
*cfs_rq, int flags)
  *
  * Updating tg's load_avg is necessary before update_cfs_share().
  */
-static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, bool force)
 {
        long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
 
@@ -3612,7 +3612,7 @@ static inline bool skip_blocked_update(struct 
sched_entity *se)
 
 #else /* CONFIG_FAIR_GROUP_SCHED */
 
-static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, bool force) {}
 
 static inline int propagate_entity_load_avg(struct sched_entity *se)
 {
@@ -3800,13 +3800,13 @@ static inline void update_load_avg(struct cfs_rq 
*cfs_rq, struct sched_entity *s
                 * IOW we're enqueueing a task on a new CPU.
                 */
                attach_entity_load_avg(cfs_rq, se);
-               update_tg_load_avg(cfs_rq, 0);
+               update_tg_load_avg(cfs_rq, false);
 
        } else if (decayed) {
                cfs_rq_util_change(cfs_rq, 0);
 
                if (flags & UPDATE_TG)
-                       update_tg_load_avg(cfs_rq, 0);
+                       update_tg_load_avg(cfs_rq, false);
        }
 }
 
@@ -7887,7 +7887,7 @@ static bool __update_blocked_fair(struct rq *rq, bool 
*done)
                struct sched_entity *se;
 
                if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
-                       update_tg_load_avg(cfs_rq, 0);
+                       update_tg_load_avg(cfs_rq, false);
 
                        if (cfs_rq == &rq->cfs)
                                decayed = true;
-- 
2.17.1

Reply via email to