In preparation for the introduction of a new root domain flag which can
be set during load balance (the 'overutilized' flag), clean-up the set
of parameters passed to update_sg_lb_stats(). More specifically, the
'local_group' and 'local_idx' parameters can be removed since they can
easily be reconstructed from within the function.

While at it, transform the 'overload' parameter into a flag stored in
the 'sg_status' parameter hence facilitating the definition of new flags
when needed.

Cc: Ingo Molnar <mi...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Suggested-by: Peter Zijlstra <pet...@infradead.org>
Suggested-by: Valentin Schneider <valentin.schnei...@arm.com>
Signed-off-by: Quentin Perret <quentin.per...@arm.com>
---
 kernel/sched/fair.c  | 27 +++++++++++----------------
 kernel/sched/sched.h |  3 +++
 2 files changed, 14 insertions(+), 16 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6c42f3e075b9..e21f37129395 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7905,16 +7905,16 @@ static bool update_nohz_stats(struct rq *rq, bool force)
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  * @env: The load balancing environment.
  * @group: sched_group whose statistics are to be updated.
- * @load_idx: Load index of sched_domain of this_cpu for load calc.
- * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
- * @overload: Indicate pullable load (e.g. >1 runnable task).
+ * @sg_status: Holds flag indicating the status of the sched_group
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
-                       struct sched_group *group, int load_idx,
-                       int local_group, struct sg_lb_stats *sgs,
-                       bool *overload)
+                                     struct sched_group *group,
+                                     struct sg_lb_stats *sgs,
+                                     int *sg_status)
 {
+       int local_group = cpumask_test_cpu(env->dst_cpu, 
sched_group_span(group));
+       int load_idx = get_sd_load_idx(env->sd, env->idle);
        unsigned long load;
        int i, nr_running;
 
@@ -7938,7 +7938,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
                nr_running = rq->nr_running;
                if (nr_running > 1)
-                       *overload = true;
+                       *sg_status |= SG_OVERLOAD;
 
 #ifdef CONFIG_NUMA_BALANCING
                sgs->nr_numa_running += rq->nr_numa_running;
@@ -7954,7 +7954,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
                if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
                    sgs->group_misfit_task_load < rq->misfit_task_load) {
                        sgs->group_misfit_task_load = rq->misfit_task_load;
-                       *overload = 1;
+                       *sg_status |= SG_OVERLOAD;
                }
        }
 
@@ -8099,17 +8099,14 @@ static inline void update_sd_lb_stats(struct lb_env 
*env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats *local = &sds->local_stat;
        struct sg_lb_stats tmp_sgs;
-       int load_idx;
-       bool overload = false;
        bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
+       int sg_status = 0;
 
 #ifdef CONFIG_NO_HZ_COMMON
        if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
                env->flags |= LBF_NOHZ_STATS;
 #endif
 
-       load_idx = get_sd_load_idx(env->sd, env->idle);
-
        do {
                struct sg_lb_stats *sgs = &tmp_sgs;
                int local_group;
@@ -8124,8 +8121,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, 
struct sd_lb_stats *sd
                                update_group_capacity(env->sd, env->dst_cpu);
                }
 
-               update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
-                                               &overload);
+               update_sg_lb_stats(env, sg, sgs, &sg_status);
 
                if (local_group)
                        goto next_group;
@@ -8175,8 +8171,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, 
struct sd_lb_stats *sd
 
        if (!env->sd->parent) {
                /* update overload indicator if we are at root domain */
-               if (READ_ONCE(env->dst_rq->rd->overload) != overload)
-                       WRITE_ONCE(env->dst_rq->rd->overload, overload);
+               WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
        }
 }
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 8decb3bc0e3f..4c1e4b73f40d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -715,6 +715,9 @@ struct perf_domain {
        struct rcu_head rcu;
 };
 
+/* Scheduling group status flags */
+#define SG_OVERLOAD            0x1 /* More than one runnable task on a CPU. */
+
 /*
  * We add the notion of a root-domain which will be used to define per-domain
  * variables. Each exclusive cpuset essentially defines an island domain by
-- 
2.19.2

Reply via email to