On 24-04-18, 11:02, Valentin Schneider wrote: > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index cacee15..ad09b67 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -6613,7 +6613,7 @@ static int wake_cap(struct task_struct *p, int cpu, int > prev_cpu) > static int > select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int > wake_flags) > { > - struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; > + struct sched_domain *tmp, *sd = NULL; > int cpu = smp_processor_id(); > int new_cpu = prev_cpu; > int want_affine = 0; > @@ -6636,8 +6636,10 @@ select_task_rq_fair(struct task_struct *p, int > prev_cpu, int sd_flag, int wake_f > */ > if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && > cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { > + if (cpu != prev_cpu) > + new_cpu = wake_affine(tmp, p, cpu, prev_cpu, > sync); > + > sd = NULL; /* Prefer wake_affine over balance flags */ > - affine_sd = tmp; > break; > } > > @@ -6657,16 +6659,11 @@ select_task_rq_fair(struct task_struct *p, int > prev_cpu, int sd_flag, int wake_f > sync_entity_load_avg(&p->se); > > new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); > - } else { > - if (affine_sd && cpu != prev_cpu) > - new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, > sync); > + } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ > + new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); > > - if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ > - new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); > - > - if (want_affine) > - current->recent_used_cpu = cpu; > - } > + if (want_affine) > + current->recent_used_cpu = cpu; > } > rcu_read_unlock();
LGTM. I will merge it as part of the current patch, but maybe wait for a few days before sending V2. -- viresh