Rename scale_load() and scale_load_down() to user_to_kernel_load()
and kernel_to_user_load() respectively. This helps us tag them
clearly and avoid confusion.

[update calculate_imbalance]
Signed-off-by: Vincent Guittot <vincent.guit...@linaro.org>
Signed-off-by: Yuyang Du <yuyang...@intel.com>
---
 kernel/sched/core.c  |    8 ++++----
 kernel/sched/fair.c  |   18 ++++++++++++------
 kernel/sched/sched.h |   16 ++++++++--------
 3 files changed, 24 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c82ca6e..349d776 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -698,12 +698,12 @@ static void set_load_weight(struct task_struct *p)
         * SCHED_IDLE tasks get minimal weight:
         */
        if (idle_policy(p->policy)) {
-               load->weight = scale_load(WEIGHT_IDLEPRIO);
+               load->weight = user_to_kernel_load(WEIGHT_IDLEPRIO);
                load->inv_weight = WMULT_IDLEPRIO;
                return;
        }
 
-       load->weight = scale_load(sched_prio_to_weight[prio]);
+       load->weight = user_to_kernel_load(sched_prio_to_weight[prio]);
        load->inv_weight = sched_prio_to_wmult[prio];
 }
 
@@ -8184,7 +8184,7 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
                                struct cftype *cftype, u64 shareval)
 {
-       return sched_group_set_shares(css_tg(css), scale_load(shareval));
+       return sched_group_set_shares(css_tg(css), 
user_to_kernel_load(shareval));
 }
 
 static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
@@ -8192,7 +8192,7 @@ static u64 cpu_shares_read_u64(struct cgroup_subsys_state 
*css,
 {
        struct task_group *tg = css_tg(css);
 
-       return (u64) scale_load_down(tg->shares);
+       return (u64) kernel_to_user_load(tg->shares);
 }
 
 #ifdef CONFIG_CFS_BANDWIDTH
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 4487c2a..200f752 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -189,7 +189,7 @@ static void __update_inv_weight(struct load_weight *lw)
        if (likely(lw->inv_weight))
                return;
 
-       w = scale_load_down(lw->weight);
+       w = kernel_to_user_load(lw->weight);
 
        if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
                lw->inv_weight = 1;
@@ -210,10 +210,14 @@ static void __update_inv_weight(struct load_weight *lw)
  *
  * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
  * weight/lw.weight <= 1, and therefore our shift will also be positive.
+ *
+ * Note load.weight falls back to user load scale (i.e., NICE_0's load is
+ * 1024), instead of possibly increased kernel load scale (i.e., NICE_0's
+ * load is NICE_0_LOAD) due to multiplication and division efficiency.
  */
 static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct 
load_weight *lw)
 {
-       u64 fact = scale_load_down(weight);
+       u64 fact = kernel_to_user_load(weight);
        int shift = WMULT_SHIFT;
 
        __update_inv_weight(lw);
@@ -7017,10 +7021,11 @@ static inline void calculate_imbalance(struct lb_env 
*env, struct sd_lb_stats *s
         */
        if (busiest->group_type == group_overloaded &&
            local->group_type   == group_overloaded) {
+               unsigned long min_cpu_load =
+                       busiest->group_capacity * NICE_0_LOAD / 
SCHED_CAPACITY_SCALE;
                load_above_capacity = busiest->sum_nr_running * NICE_0_LOAD;
-               if (load_above_capacity > scale_load(busiest->group_capacity))
-                       load_above_capacity -=
-                               scale_load(busiest->group_capacity);
+               if (load_above_capacity > min_cpu_load)
+                       load_above_capacity -= min_cpu_load;
                else
                        load_above_capacity = ~0UL;
        }
@@ -8574,7 +8579,8 @@ int sched_group_set_shares(struct task_group *tg, 
unsigned long shares)
        if (!tg->se[0])
                return -EINVAL;
 
-       shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
+       shares = clamp(shares, user_to_kernel_load(MIN_SHARES),
+                      user_to_kernel_load(MAX_SHARES));
 
        mutex_lock(&shares_mutex);
        if (tg->shares == shares)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1a3be6f..871da67 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -55,22 +55,22 @@ static inline void cpu_load_update_active(struct rq 
*this_rq) { }
  */
 #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage 
under light load  */
 # define NICE_0_LOAD_SHIFT     (SCHED_FIXEDPOINT_SHIFT + 
SCHED_FIXEDPOINT_SHIFT)
-# define scale_load(w)         ((w) << SCHED_FIXEDPOINT_SHIFT)
-# define scale_load_down(w)    ((w) >> SCHED_FIXEDPOINT_SHIFT)
+# define user_to_kernel_load(w)        ((w) << SCHED_FIXEDPOINT_SHIFT)
+# define kernel_to_user_load(w)        ((w) >> SCHED_FIXEDPOINT_SHIFT)
 #else
 # define NICE_0_LOAD_SHIFT     (SCHED_FIXEDPOINT_SHIFT)
-# define scale_load(w)         (w)
-# define scale_load_down(w)    (w)
+# define user_to_kernel_load(w)        (w)
+# define kernel_to_user_load(w)        (w)
 #endif
 
 /*
  * Task weight (visible to user) and its load (invisible to user) have
  * independent resolution, but they should be well calibrated. We use
- * scale_load() and scale_load_down(w) to convert between them. The
- * following must be true:
- *
- *  scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD
+ * user_to_kernel_load() and kernel_to_user_load(w) to convert between
+ * them. The following must be true:
  *
+ * user_to_kernel_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == 
NICE_0_LOAD
+ * kernel_to_user_load(NICE_0_LOAD) == 
sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]
  */
 #define NICE_0_LOAD            (1L << NICE_0_LOAD_SHIFT)
 
-- 
1.7.9.5

Reply via email to