In case of active balance, we increase the balance interval to cover pinned tasks cases not covered by all_pinned logic. Neverthless, the active migration triggered by asym packing should be treated as the normal unbalanced case and reset the interval to default value otherwise active migration for asym_packing can be easily delayed for hundreds of ms because of this all_pinned detection mecanism.
Signed-off-by: Vincent Guittot <[email protected]> --- kernel/sched/fair.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9591e7a..487287e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8857,21 +8857,24 @@ static struct rq *find_busiest_queue(struct lb_env *env, */ #define MAX_PINNED_INTERVAL 512 +static inline bool +asym_active_balance(struct lb_env *env) +{ + /* + * ASYM_PACKING needs to force migrate tasks from busy but + * lower priority CPUs in order to pack all tasks in the + * highest priority CPUs. + */ + return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && + sched_asym_prefer(env->dst_cpu, env->src_cpu); +} + static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; - if (env->idle != CPU_NOT_IDLE) { - - /* - * ASYM_PACKING needs to force migrate tasks from busy but - * lower priority CPUs in order to pack all tasks in the - * highest priority CPUs. - */ - if ((sd->flags & SD_ASYM_PACKING) && - sched_asym_prefer(env->dst_cpu, env->src_cpu)) - return 1; - } + if (asym_active_balance(env)) + return 1; /* * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. @@ -9150,7 +9153,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, } else sd->nr_balance_failed = 0; - if (likely(!active_balance)) { + if (likely(!active_balance) || asym_active_balance(&env)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else { -- 2.7.4

