On Mon, May 09, 2016 at 12:48:12PM +0200, Peter Zijlstra wrote:
> +/*
> + * Scan the LLC domain for idle CPUs; this is dynamically regulated by
> + * comparing the average scan cost (tracked in sd->avg_scan_cost) against the

                                       tracked in this_sd->avg_scan_cost

> + * average idle time for this rq (as found in rq->avg_idle).
> + */
> +static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, 
> int target)
> +{
> +     struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
> +     u64 avg_idle = this_rq()->avg_idle;
> +     u64 avg_cost = this_sd->avg_scan_cost;
> +     u64 time, cost;
> +     s64 delta;
> +     int cpu, wrap;
> +
> +     /*
> +      * Due to large variance we need a large fuzz factor; hackbench in
> +      * particularly is sensitive here.
> +      */
> +     if ((avg_idle / 512) < avg_cost)
> +             return -1;
> +
> +     time = local_clock();
> +
> +     for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
> +             if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
> +                     continue;
> +             if (idle_cpu(cpu))
> +                     break;
> +     }
> +
> +     time = local_clock() - time;
> +     cost = this_sd->avg_scan_cost;
> +     delta = (s64)(time - cost) / 8;
> +     this_sd->avg_scan_cost += delta;
> +
> +     return cpu;
> +}

[snip]

> +
> +     i = select_idle_core(p, sd, target);
> +     if ((unsigned)i < nr_cpumask_bits)
> +             return i;
> +
> +     i = select_idle_cpu(p, sd, target);
> +     if ((unsigned)i < nr_cpumask_bits)
> +             return i;
> +
> +     i = select_idle_smt(p, sd, target);
> +     if ((unsigned)i < nr_cpumask_bits)
> +             return i;

First, on smt, these three scans have a lot of overlap, but it also has an
effect of opportunistic-spinning if it was intended, which is good. Anyway,
maybe you should write something about it, and why only consider cost for cpu,
not core.

Then, I am still considering combining them a bit, like the following patch.
And if you want, more might be combined.

P.S., The idle_smt_cpu may not be the first idle, but one more i++ can make
it.

---

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 25bd5b0..648a15c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5281,7 +5281,7 @@ unlock:
 static int select_idle_core(struct task_struct *p, struct sched_domain *sd, 
int target)
 {
        struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
-       int core, cpu, wrap;
+       int core, cpu, wrap, i = 0, idle_smt_cpu = -1;
 
        if (!static_branch_likely(&sched_smt_present))
                return -1;
@@ -5298,8 +5298,14 @@ static int select_idle_core(struct task_struct *p, 
struct sched_domain *sd, int
                        cpumask_clear_cpu(cpu, cpus);
                        if (!idle_cpu(cpu))
                                idle = false;
+                       /*
+                        * First smt must be target's smt, and any cpu here is 
allowed
+                        */
+                       else if (i == 0)
+                               idle_smt_cpu = cpu;
                }
 
+               i++;
                if (idle)
                        return core;
        }

Reply via email to