On 18/05/17 20:36, Jeffrey Hugo wrote: [...]
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index d711093..a5d41b1 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -8220,7 +8220,24 @@ static int load_balance(int this_cpu, struct rq > *this_rq, > /* All tasks on this runqueue were pinned by CPU affinity */ > if (unlikely(env.flags & LBF_ALL_PINNED)) { > cpumask_clear_cpu(cpu_of(busiest), cpus); > - if (!cpumask_empty(cpus)) { > + /* > + * dst_cpu is not a valid busiest cpu in the following > + * check since load cannot be pulled from dst_cpu to be > + * put on dst_cpu. > + */ > + cpumask_clear_cpu(env.dst_cpu, cpus); > + /* > + * Go back to "redo" iff the load-balance cpumask > + * contains other potential busiest cpus for the > + * current sched domain. > + */ > + if (cpumask_intersects(cpus, > sched_domain_span(env.sd))) { > + /* > + * Now that the check has passed, reenable > + * dst_cpu so that load can be calculated on > + * it in the redo path. > + */ > + cpumask_set_cpu(env.dst_cpu, cpus); IMHO, this will work nicely and its way easier. Another idea might be to check if the LBF_ALL_PINNED is set when we check if we should clean the imbalance flag. @@ -8307,14 +8307,13 @@ static int load_balance(int this_cpu, struct rq *this_rq, * We reach balance although we may have faced some affinity * constraints. Clear the imbalance flag if it was set. */ - if (sd_parent) { + if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { int *group_imbalance = &sd_parent->groups->sgc->imbalance; if (*group_imbalance) *group_imbalance = 0; } But I think preventing a needless redo loop is even better ...