On 26/02/2021 17:40, Srikar Dronamraju wrote: [...]
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index 8a8bd7b13634..d49bfcdc4a19 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -5869,6 +5869,36 @@ wake_affine_weight(struct sched_domain *sd, struct > task_struct *p, > return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; > } > > +static int prefer_idler_llc(int this_cpu, int prev_cpu, int sync) > +{ > + struct sched_domain_shared *tsds, *psds; > + int pnr_busy, pllc_size, tnr_busy, tllc_size, diff; > + > + tsds = rcu_dereference(per_cpu(sd_llc_shared, this_cpu)); > + tnr_busy = atomic_read(&tsds->nr_busy_cpus); > + tllc_size = per_cpu(sd_llc_size, this_cpu); > + > + psds = rcu_dereference(per_cpu(sd_llc_shared, prev_cpu)); > + pnr_busy = atomic_read(&psds->nr_busy_cpus); > + pllc_size = per_cpu(sd_llc_size, prev_cpu); > + > + /* No need to compare, if both LLCs are fully loaded */ > + if (pnr_busy == pllc_size && tnr_busy == pllc_size) ^ shouldn't this be tllc_size ? > + return nr_cpumask_bits; > + > + if (sched_feat(WA_WAKER) && tnr_busy < tllc_size) > + return this_cpu; > + > + /* For better wakeup latency, prefer idler LLC to cache affinity */ > + diff = tnr_busy * pllc_size - sync - pnr_busy * tllc_size; > + if (!diff) > + return nr_cpumask_bits; > + if (diff < 0) > + return this_cpu; > + > + return prev_cpu; > +} > +