Actually all below item could be repalced by scaled_busy_load_per_task (sds->busiest_load_per_task * SCHED_POWER_SCALE) /sds->busiest->sgp->power;
Signed-off-by: Lei Wen <lei...@marvell.com> --- kernel/sched/fair.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c61a614..28052fa 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4727,20 +4727,17 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) pwr_now /= SCHED_POWER_SCALE; /* Amount of load we'd subtract */ - tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / - sds->busiest->sgp->power; - if (sds->max_load > tmp) + if (sds->max_load > scaled_busy_load_per_task) { pwr_move += sds->busiest->sgp->power * - min(sds->busiest_load_per_task, sds->max_load - tmp); - - /* Amount of load we'd add */ - if (sds->max_load * sds->busiest->sgp->power < - sds->busiest_load_per_task * SCHED_POWER_SCALE) - tmp = (sds->max_load * sds->busiest->sgp->power) / - sds->this->sgp->power; - else + min(sds->busiest_load_per_task, + sds->max_load - scaled_busy_load_per_task); tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) / sds->this->sgp->power; + } else + tmp = (sds->max_load * sds->busiest->sgp->power) / + sds->this->sgp->power; + + /* Amount of load we'd add */ pwr_move += sds->this->sgp->power * min(sds->this_load_per_task, sds->this_load + tmp); pwr_move /= SCHED_POWER_SCALE; -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/