Commit-ID:  7ea241afbf4924c58d41078599f7a32ba49fb985
Gitweb:     http://git.kernel.org/tip/7ea241afbf4924c58d41078599f7a32ba49fb985
Author:     Yuyang Du <yuyang...@intel.com>
AuthorDate: Wed, 15 Jul 2015 08:04:42 +0800
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 3 Aug 2015 12:24:32 +0200

sched/fair: Clean up load average references

For cfs_rq, we have load.weight, runnable_load_avg, and load_avg.
Clean up how they are used:

  - First, as group sched_entity already largely uses load_avg, we now expand
    to use load_avg in all cases.

  - Second, for CPU-wide load balancing, we choose to use runnable_load_avg
    in all cases, which is the same as before this series.

Signed-off-by: Yuyang Du <yuyang...@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Mike Galbraith <efa...@gmx.de>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: ar...@linux.intel.com
Cc: bseg...@google.com
Cc: dietmar.eggem...@arm.com
Cc: fengguang...@intel.com
Cc: len.br...@intel.com
Cc: morten.rasmus...@arm.com
Cc: p...@google.com
Cc: rafael.j.wyso...@intel.com
Cc: umgwanakikb...@gmail.com
Cc: vincent.guit...@linaro.org
Link: 
http://lkml.kernel.org/r/1436918682-4971-8-git-send-email-yuyang...@intel.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 kernel/sched/fair.c | 44 +++++++++++++++++++++++++++++---------------
 1 file changed, 29 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1a878d5..858b94a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -685,6 +685,9 @@ void init_entity_runnable_average(struct sched_entity *se)
        sa->util_sum = LOAD_AVG_MAX;
        /* when this task enqueue'ed, it will contribute to its cfs_rq's 
load_avg */
 }
+
+static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
+static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
 #else
 void init_entity_runnable_average(struct sched_entity *se)
 {
@@ -2360,7 +2363,7 @@ static inline long calc_tg_weight(struct task_group *tg, 
struct cfs_rq *cfs_rq)
         */
        tg_weight = atomic_long_read(&tg->load_avg);
        tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq->avg.load_avg;
+       tg_weight += cfs_rq_load_avg(cfs_rq);
 
        return tg_weight;
 }
@@ -2370,7 +2373,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct 
task_group *tg)
        long tg_weight, load, shares;
 
        tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq->avg.load_avg;
+       load = cfs_rq_load_avg(cfs_rq);
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2796,6 +2799,16 @@ void idle_exit_fair(struct rq *this_rq)
 {
 }
 
+static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
+{
+       return cfs_rq->runnable_load_avg;
+}
+
+static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
+{
+       return cfs_rq->avg.load_avg;
+}
+
 static int idle_balance(struct rq *this_rq);
 
 #else /* CONFIG_SMP */
@@ -4270,6 +4283,12 @@ static void __update_cpu_load(struct rq *this_rq, 
unsigned long this_load,
        sched_avg_update(this_rq);
 }
 
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+       return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
+}
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * There is no sane way to deal with nohz on smp when using jiffies because the
@@ -4291,7 +4310,7 @@ static void __update_cpu_load(struct rq *this_rq, 
unsigned long this_load,
 static void update_idle_cpu_load(struct rq *this_rq)
 {
        unsigned long curr_jiffies = READ_ONCE(jiffies);
-       unsigned long load = this_rq->cfs.avg.load_avg;
+       unsigned long load = weighted_cpuload(cpu_of(this_rq));
        unsigned long pending_updates;
 
        /*
@@ -4337,7 +4356,7 @@ void update_cpu_load_nohz(void)
  */
 void update_cpu_load_active(struct rq *this_rq)
 {
-       unsigned long load = this_rq->cfs.avg.load_avg;
+       unsigned long load = weighted_cpuload(cpu_of(this_rq));
        /*
         * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
         */
@@ -4345,12 +4364,6 @@ void update_cpu_load_active(struct rq *this_rq)
        __update_cpu_load(this_rq, load, 1);
 }
 
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
-       return cpu_rq(cpu)->cfs.avg.load_avg;
-}
-
 /*
  * Return a low guess at the load of a migration-source cpu weighted
  * according to the scheduling class and "nice" value.
@@ -4398,7 +4411,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
-       unsigned long load_avg = rq->cfs.avg.load_avg;
+       unsigned long load_avg = weighted_cpuload(cpu);
 
        if (nr_running)
                return load_avg / nr_running;
@@ -4517,7 +4530,7 @@ static long effective_load(struct task_group *tg, int 
cpu, long wl, long wg)
                /*
                 * w = rw_i + @wl
                 */
-               w = se->my_q->avg.load_avg + wl;
+               w = cfs_rq_load_avg(se->my_q) + wl;
 
                /*
                 * wl = S * s'_i; see (2)
@@ -5862,13 +5875,14 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
        }
 
        if (!se) {
-               cfs_rq->h_load = cfs_rq->avg.load_avg;
+               cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
                cfs_rq->last_h_load_update = now;
        }
 
        while ((se = cfs_rq->h_load_next) != NULL) {
                load = cfs_rq->h_load;
-               load = div64_ul(load * se->avg.load_avg, cfs_rq->avg.load_avg + 
1);
+               load = div64_ul(load * se->avg.load_avg,
+                       cfs_rq_load_avg(cfs_rq) + 1);
                cfs_rq = group_cfs_rq(se);
                cfs_rq->h_load = load;
                cfs_rq->last_h_load_update = now;
@@ -5881,7 +5895,7 @@ static unsigned long task_h_load(struct task_struct *p)
 
        update_cfs_rq_h_load(cfs_rq);
        return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
-                       cfs_rq->avg.load_avg + 1);
+                       cfs_rq_load_avg(cfs_rq) + 1);
 }
 #else
 static inline void update_blocked_averages(int cpu)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to