Hi Balbir,

I still placed the patch embedded in this thread, welcome any comments.

Thanks,
-Aubrey
======================================================================

>From d64455dcaf47329673903a68a9df1151400cdd7a Mon Sep 17 00:00:00 2001
From: Aubrey Li <aubrey...@linux.intel.com>
Date: Wed, 2 Dec 2020 13:53:30 +0000
Subject: [PATCH] sched: migration changes for core scheduling

 - Don't migrate if there is a cookie mismatch
     Load balance tries to move task from busiest CPU to the
     destination CPU. When core scheduling is enabled, if the
     task's cookie does not match with the destination CPU's
     core cookie, this task will be skipped by this CPU. This
     mitigates the forced idle time on the destination CPU.

 - Select cookie matched idle CPU
     In the fast path of task wakeup, select the first cookie matched
     idle CPU instead of the first idle CPU.

 - Find cookie matched idlest CPU
     In the slow path of task wakeup, find the idlest CPU whose core
     cookie matches with task's cookie

 - Don't migrate task if cookie not match
     For the NUMA load balance, don't migrate task to the CPU whose
     core cookie does not match with task's cookie

Cc: Balbir Singh <bsinghar...@gmail.com>
Cc: Vincent Guittot <vincent.guit...@linaro.org>
Tested-by: Julien Desfossez <jdesfos...@digitalocean.com>
Signed-off-by: Aubrey Li <aubrey...@linux.intel.com>
Signed-off-by: Tim Chen <tim.c.c...@linux.intel.com>
Signed-off-by: Vineeth Remanan Pillai <virem...@linux.microsoft.com>
Signed-off-by: Joel Fernandes (Google) <j...@joelfernandes.org>
---
 kernel/sched/fair.c  | 33 +++++++++++++++++---
 kernel/sched/sched.h | 71 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 100 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index de82f88ba98c..b8657766b660 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1921,6 +1921,13 @@ static void task_numa_find_cpu(struct task_numa_env *env,
                if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
                        continue;
 
+               /*
+                * Skip this cpu if source task's cookie does not match
+                * with CPU's core cookie.
+                */
+               if (!sched_core_cookie_match(cpu_rq(cpu), env->p))
+                       continue;
+
                env->dst_cpu = cpu;
                if (task_numa_compare(env, taskimp, groupimp, maymove))
                        break;
@@ -5867,11 +5874,15 @@ find_idlest_group_cpu(struct sched_group *group, struct 
task_struct *p, int this
 
        /* Traverse only the allowed CPUs */
        for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) {
+               struct rq *rq = cpu_rq(i);
+
+               if (!sched_core_cookie_match(rq, p))
+                       continue;
+
                if (sched_idle_cpu(i))
                        return i;
 
                if (available_idle_cpu(i)) {
-                       struct rq *rq = cpu_rq(i);
                        struct cpuidle_state *idle = idle_get_state(rq);
                        if (idle && idle->exit_latency < min_exit_latency) {
                                /*
@@ -6129,7 +6140,9 @@ static int select_idle_cpu(struct task_struct *p, struct 
sched_domain *sd, int t
        for_each_cpu_wrap(cpu, cpus, target) {
                if (!--nr)
                        return -1;
-               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
+
+               if (available_idle_cpu(cpu) || sched_idle_cpu(cpu) &&
+                   sched_cpu_cookie_match(cpu_rq(cpu), p))
                        break;
        }
 
@@ -7530,8 +7543,9 @@ int can_migrate_task(struct task_struct *p, struct lb_env 
*env)
         * We do not migrate tasks that are:
         * 1) throttled_lb_pair, or
         * 2) cannot be migrated to this CPU due to cpus_ptr, or
-        * 3) running (obviously), or
-        * 4) are cache-hot on their current CPU.
+        * 3) task's cookie does not match with this CPU's core cookie
+        * 4) running (obviously), or
+        * 5) are cache-hot on their current CPU.
         */
        if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
                return 0;
@@ -7566,6 +7580,13 @@ int can_migrate_task(struct task_struct *p, struct 
lb_env *env)
                return 0;
        }
 
+       /*
+        * Don't migrate task if the task's cookie does not match
+        * with the destination CPU's core cookie.
+        */
+       if (!sched_core_cookie_match(cpu_rq(env->dst_cpu), p))
+               return 0;
+
        /* Record that we found atleast one task that could run on dst_cpu */
        env->flags &= ~LBF_ALL_PINNED;
 
@@ -8792,6 +8813,10 @@ find_idlest_group(struct sched_domain *sd, struct 
task_struct *p, int this_cpu)
                                        p->cpus_ptr))
                        continue;
 
+               /* Skip over this group if no cookie matched */
+               if (!sched_group_cookie_match(cpu_rq(this_cpu), p, group))
+                       continue;
+
                local_group = cpumask_test_cpu(this_cpu,
                                               sched_group_span(group));
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e72942a9ee11..e1adfffe6e39 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1135,6 +1135,61 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
 
 bool cfs_prio_less(struct task_struct *a, struct task_struct *b);
 
+/*
+ * Helpers to check if the CPU's core cookie matches with the task's cookie
+ * when core scheduling is enabled.
+ * A special case is that the task's cookie always matches with CPU's core
+ * cookie if the CPU is in an idle core.
+ */
+static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
+{
+       /* Ignore cookie match if core scheduler is not enabled on the CPU. */
+       if (!sched_core_enabled(rq))
+               return true;
+
+       return rq->core->core_cookie == p->core_cookie;
+}
+
+static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct 
*p)
+{
+       bool idle_core = true;
+       int cpu;
+
+       /* Ignore cookie match if core scheduler is not enabled on the CPU. */
+       if (!sched_core_enabled(rq))
+               return true;
+
+       for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
+               if (!available_idle_cpu(cpu)) {
+                       idle_core = false;
+                       break;
+               }
+       }
+
+       /*
+        * A CPU in an idle core is always the best choice for tasks with
+        * cookies.
+        */
+       return idle_core || __cookie_match(rq, p);
+}
+
+static inline bool sched_group_cookie_match(struct rq *rq,
+                                           struct task_struct *p,
+                                           struct sched_group *group)
+{
+       int cpu;
+
+       /* Ignore cookie match if core scheduler is not enabled on the CPU. */
+       if (!sched_core_enabled(rq))
+               return true;
+
+       for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
+               if (sched_core_cookie_match(cpu_rq(cpu), p))
+                       return true;
+       }
+       return false;
+}
+
 extern void queue_core_balance(struct rq *rq);
 
 #else /* !CONFIG_SCHED_CORE */
@@ -1153,6 +1208,22 @@ static inline void queue_core_balance(struct rq *rq)
 {
 }
 
+static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
+{
+       return true;
+}
+
+static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct 
*p)
+{
+       return true;
+}
+
+static inline bool sched_group_cookie_match(struct rq *rq,
+                                           struct task_struct *p,
+                                           struct sched_group *group)
+{
+       return true;
+}
 #endif /* CONFIG_SCHED_CORE */
 
 #ifdef CONFIG_SCHED_SMT
-- 
2.17.1

Reply via email to