After cleaning up the sched metrics, these two definitions that cause
ambiguity are not needed any more. Use NICE_0_LOAD_SHIFT and NICE_0_LOAD
instead (the names suggest clearly who they are).

Suggested-by: Ben Segall <bseg...@google.com>
Signed-off-by: Yuyang Du <yuyang...@intel.com>
---
 kernel/sched/fair.c  |  4 ++--
 kernel/sched/sched.h | 22 +++++++++++-----------
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1d3fc01..bf835b5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -682,7 +682,7 @@ void init_entity_runnable_average(struct sched_entity *se)
        sa->period_contrib = 1023;
        sa->load_avg = scale_load_down(se->load.weight);
        sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
-       sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
+       sa->util_avg = SCHED_CAPACITY_SCALE;
        sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
        /* when this task enqueue'ed, it will contribute to its cfs_rq's 
load_avg */
 }
@@ -6877,7 +6877,7 @@ static inline void calculate_imbalance(struct lb_env 
*env, struct sd_lb_stats *s
        if (busiest->group_type == group_overloaded &&
            local->group_type   == group_overloaded) {
                load_above_capacity = busiest->sum_nr_running *
-                                       SCHED_LOAD_SCALE;
+                                     scale_load_down(NICE_0_LOAD);
                if (load_above_capacity > busiest->group_capacity)
                        load_above_capacity -= busiest->group_capacity;
                else
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 15a89ee..94ba652 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -54,25 +54,25 @@ static inline void update_cpu_load_active(struct rq 
*this_rq) { }
  * increased costs.
  */
 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage 
under light load  */
-# define SCHED_LOAD_SHIFT      (SCHED_FIXEDPOINT_SHIFT + 
SCHED_FIXEDPOINT_SHIFT)
+# define NICE_0_LOAD_SHIFT     (SCHED_FIXEDPOINT_SHIFT + 
SCHED_FIXEDPOINT_SHIFT)
 # define scale_load(w)         ((w) << SCHED_FIXEDPOINT_SHIFT)
 # define scale_load_down(w)    ((w) >> SCHED_FIXEDPOINT_SHIFT)
 #else
-# define SCHED_LOAD_SHIFT      (SCHED_FIXEDPOINT_SHIFT)
+# define NICE_0_LOAD_SHIFT     (SCHED_FIXEDPOINT_SHIFT)
 # define scale_load(w)         (w)
 # define scale_load_down(w)    (w)
 #endif
 
-#define SCHED_LOAD_SCALE       (1L << SCHED_LOAD_SHIFT)
-
 /*
- * NICE_0's weight (visible to user) and its load (invisible to user) have
- * independent ranges, but they should be well calibrated. We use scale_load()
- * and scale_load_down(w) to convert between them, the following must be true:
- * scale_load(sched_prio_to_weight[20]) == NICE_0_LOAD
+ * Task weight (visible to user) and its load (invisible to user) have
+ * independent resolution, but they should be well calibrated. We use
+ * scale_load() and scale_load_down(w) to convert between them. The
+ * following must be true:
+ *
+ *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
+ *
  */
-#define NICE_0_LOAD            SCHED_LOAD_SCALE
-#define NICE_0_SHIFT           SCHED_LOAD_SHIFT
+#define NICE_0_LOAD            (1L << NICE_0_LOAD_SHIFT)
 
 /*
  * Single value that decides SCHED_DEADLINE internal math precision.
@@ -859,7 +859,7 @@ DECLARE_PER_CPU(struct sched_domain *, sd_asym);
 struct sched_group_capacity {
        atomic_t ref;
        /*
-        * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
+        * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
         * for a single CPU.
         */
        unsigned int capacity;
-- 
2.1.4

Reply via email to