In preparation of the next patch, move the actual scanning of the LLC
out of the whole proportional/cost metric stuff, so we can change it
out in a next patch.

Should not actually change anything.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/fair.c |   28 ++++++++++++++++++----------
 1 file changed, 18 insertions(+), 10 deletions(-)

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6364,6 +6364,23 @@ static inline int select_idle_smt(struct
 
 #endif /* CONFIG_SCHED_SMT */
 
+static int __select_idle_cpu(struct task_struct *p, struct sched_domain *sd,
+                            int target, int nr, int *ploops)
+{
+       int cpu;
+
+       for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
+               if ((*ploops)++ >= nr)
+                       return -1;
+               if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+                       continue;
+               if (available_idle_cpu(cpu))
+                       break;
+       }
+
+       return cpu;
+}
+
 /*
  * Scan the LLC domain for idle CPUs; this is dynamically regulated by
  * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
@@ -6420,16 +6437,7 @@ static int select_idle_cpu(struct task_s
 
        time = local_clock();
 
-       for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
-               if (loops++ >= nr) {
-                       cpu = -1;
-                       break;
-               }
-               if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
-                       continue;
-               if (available_idle_cpu(cpu))
-                       break;
-       }
+       cpu = __select_idle_cpu(p, sd, target, nr, &loops);
 
        time = local_clock() - time;
 


Reply via email to