It is possible for fbq_classify_rq to indicate that a CPU has tasks that
should be moved to another NUMA node, but for migrate_improves_locality
and migrate_degrades_locality to not identify those tasks.

This patch always gives preference to preferred node evaluations, and
only checks the number of faults when evaluating moves between two
non-preferred nodes on a larger NUMA system.

On a two node system, the number of faults is never evaluated. Either
a task is about to be pulled off its preferred node, or migrated onto
it.

Signed-off-by: Rik van Riel <r...@redhat.com>
---
 kernel/sched/fair.c | 60 +++++++++++++++++++++++++++++------------------------
 1 file changed, 33 insertions(+), 27 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ffeaa4105e48..9c9f225b14fc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5467,10 +5467,15 @@ static int task_hot(struct task_struct *p, struct 
lb_env *env)
 }
 
 #ifdef CONFIG_NUMA_BALANCING
-/* Returns true if the destination node has incurred more faults */
+/*
+ * Returns true if the destination node is the preferred node.
+ * Needs to match fbq_classify_rq: if there is a runnable task
+ * that is not on its preferred node, we should identify it.
+ */
 static bool migrate_improves_locality(struct task_struct *p, struct lb_env 
*env)
 {
        struct numa_group *numa_group = rcu_dereference(p->numa_group);
+       unsigned long src_faults, dst_faults;
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
@@ -5484,29 +5489,30 @@ static bool migrate_improves_locality(struct 
task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
-       if (numa_group) {
-               /* Task is already in the group's interleave set. */
-               if (node_isset(src_nid, numa_group->active_nodes))
-                       return false;
-
-               /* Task is moving into the group's interleave set. */
-               if (node_isset(dst_nid, numa_group->active_nodes))
-                       return true;
-
-               return group_faults(p, dst_nid) > group_faults(p, src_nid);
-       }
-
        /* Encourage migration to the preferred node. */
        if (dst_nid == p->numa_preferred_nid)
                return true;
 
-       return task_faults(p, dst_nid) > task_faults(p, src_nid);
+       /* Migrating away from the preferred node is bad. */
+       if (src_nid == p->numa_preferred_nid)
+               return false;
+
+       if (numa_group) {
+               src_faults = group_faults(p, src_nid);
+               dst_faults = group_faults(p, dst_nid);
+       } else {
+               src_faults = task_faults(p, src_nid);
+               dst_faults = task_faults(p, dst_nid);
+       }
+
+       return dst_faults > src_faults;
 }
 
 
 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env 
*env)
 {
        struct numa_group *numa_group = rcu_dereference(p->numa_group);
+       unsigned long src_faults, dst_faults;
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
@@ -5521,23 +5527,23 @@ static bool migrate_degrades_locality(struct 
task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
-       if (numa_group) {
-               /* Task is moving within/into the group's interleave set. */
-               if (node_isset(dst_nid, numa_group->active_nodes))
-                       return false;
+       /* Migrating away from the preferred node is bad. */
+       if (src_nid == p->numa_preferred_nid)
+               return true;
 
-               /* Task is moving out of the group's interleave set. */
-               if (node_isset(src_nid, numa_group->active_nodes))
-                       return true;
+       /* Encourage migration to the preferred node. */
+       if (dst_nid == p->numa_preferred_nid)
+               return false;
 
-               return group_faults(p, dst_nid) < group_faults(p, src_nid);
+       if (numa_group) {
+               src_faults = group_faults(p, src_nid);
+               dst_faults = group_faults(p, dst_nid);
+       } else {
+               src_faults = task_faults(p, src_nid);
+               dst_faults = task_faults(p, dst_nid);
        }
 
-       /* Migrating away from the preferred node is always bad. */
-       if (src_nid == p->numa_preferred_nid)
-               return true;
-
-       return task_faults(p, dst_nid) < task_faults(p, src_nid);
+       return dst_faults < src_faults;
 }
 
 #else
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to