Once a non-idle thread is found for a core, there is no point in traversing rest of the threads of that core. We continue traversal currently to clear those threads from "cpus" mask. Clear all the threads with a single call to cpumask_andnot(), which will also let us exit the loop earlier.
Signed-off-by: Viresh Kumar <viresh.ku...@linaro.org> --- kernel/sched/fair.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8d5c82342a36..ccd0ae9878a2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6068,6 +6068,7 @@ void __update_idle_core(struct rq *rq) static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) { struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); + const struct cpumask *smt; int core, cpu; if (!static_branch_likely(&sched_smt_present)) @@ -6081,10 +6082,14 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int for_each_cpu_wrap(core, cpus, target) { bool idle = true; - for_each_cpu(cpu, cpu_smt_mask(core)) { - cpumask_clear_cpu(cpu, cpus); - if (!available_idle_cpu(cpu)) + smt = cpu_smt_mask(core); + cpumask_andnot(cpus, cpus, smt); + + for_each_cpu(cpu, smt) { + if (!available_idle_cpu(cpu)) { idle = false; + break; + } } if (idle) -- 2.20.1.321.g9e740568ce00