The following commit has been merged into the sched/core branch of tip:

Commit-ID:     abeae76a47005aa3f07c9be12d8076365622e25c
Gitweb:        
https://git.kernel.org/tip/abeae76a47005aa3f07c9be12d8076365622e25c
Author:        Mel Gorman <mgor...@techsingularity.net>
AuthorDate:    Fri, 20 Nov 2020 09:06:27 
Committer:     Peter Zijlstra <pet...@infradead.org>
CommitterDate: Tue, 24 Nov 2020 16:47:47 +01:00

sched/numa: Rename nr_running and break out the magic number

This is simply a preparation patch to make the following patches easier
to read. No functional change.

Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guit...@linaro.org>
Link: 
https://lkml.kernel.org/r/20201120090630.3286-2-mgor...@techsingularity.net
---
 kernel/sched/fair.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6691e28..9d10abe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1559,7 +1559,7 @@ struct task_numa_env {
 static unsigned long cpu_load(struct rq *rq);
 static unsigned long cpu_runnable(struct rq *rq);
 static unsigned long cpu_util(int cpu);
-static inline long adjust_numa_imbalance(int imbalance, int nr_running);
+static inline long adjust_numa_imbalance(int imbalance, int dst_running);
 
 static inline enum
 numa_type numa_classify(unsigned int imbalance_pct,
@@ -8991,7 +8991,9 @@ next_group:
        }
 }
 
-static inline long adjust_numa_imbalance(int imbalance, int nr_running)
+#define NUMA_IMBALANCE_MIN 2
+
+static inline long adjust_numa_imbalance(int imbalance, int dst_running)
 {
        unsigned int imbalance_min;
 
@@ -8999,8 +9001,8 @@ static inline long adjust_numa_imbalance(int imbalance, 
int nr_running)
         * Allow a small imbalance based on a simple pair of communicating
         * tasks that remain local when the source domain is almost idle.
         */
-       imbalance_min = 2;
-       if (nr_running <= imbalance_min)
+       imbalance_min = NUMA_IMBALANCE_MIN;
+       if (dst_running <= imbalance_min)
                return 0;
 
        return imbalance;

Reply via email to