Modulate the tracked load of a task using the measure of current
and maximum compute capacity for the core it is executing on.

Change-Id: If6aea806e631f2313fd925c8902260a522663dbd

Conflicts:

        kernel/sched/fair.c
---
 kernel/sched/fair.c |   51 +++++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 43 insertions(+), 8 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f6bbe1e..3f3ee08 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1319,11 +1319,15 @@ static inline void update_cpu_capacity(int cpu)
 static __always_inline int __update_entity_runnable_avg(u64 now,
                                                        struct sched_avg *sa,
                                                        int runnable,
-                                                       int running)
+                                                       int running,
+                                                       int cpu)
 {
        u64 delta, periods;
        u32 runnable_contrib;
        int delta_w, decayed = 0;
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+       u32 curr_scale = 1<<SCHED_ARCH_SCALE_POWER_SHIFT;
+#endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */
 
        delta = now - sa->last_runnable_update;
        /*
@@ -1344,6 +1348,12 @@ static __always_inline int 
__update_entity_runnable_avg(u64 now,
                return 0;
        sa->last_runnable_update = now;
 
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+       update_cpu_capacity(cpu);
+       curr_scale = (compute_capacity_of(cpu) << SCHED_ARCH_SCALE_POWER_SHIFT)
+                       / (max_compute_capacity_of(cpu)+1);
+#endif
+
        /* delta_w is the amount already accumulated against our next period */
        delta_w = sa->runnable_avg_period % 1024;
        if (delta + delta_w >= 1024) {
@@ -1356,13 +1366,17 @@ static __always_inline int 
__update_entity_runnable_avg(u64 now,
                 * period and accrue it.
                 */
                delta_w = 1024 - delta_w;
+               sa->runnable_avg_period += delta_w;
+               delta -= delta_w;
+               /* scale runnable time if necessary */
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+               delta_w = (delta_w * curr_scale)
+                               >> SCHED_ARCH_SCALE_POWER_SHIFT;
+#endif
                if (runnable)
                        sa->runnable_avg_sum += delta_w;
                if (running)
                        sa->usage_avg_sum += delta_w;
-               sa->runnable_avg_period += delta_w;
-
-               delta -= delta_w;
 
                /* Figure out how many additional periods this update spans */
                periods = delta / 1024;
@@ -1376,19 +1390,31 @@ static __always_inline int 
__update_entity_runnable_avg(u64 now,
 
                /* Efficiently calculate \sum (1..n_period) 1024*y^i */
                runnable_contrib = __compute_runnable_contrib(periods);
+               sa->runnable_avg_period += runnable_contrib;
+               /* Apply load scaling if necessary.
+                * Note that multiplying the whole series is same as
+                * multiplying all terms
+                */
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+               runnable_contrib = (runnable_contrib * curr_scale)
+                               >> SCHED_ARCH_SCALE_POWER_SHIFT;
+#endif 
                if (runnable)
                        sa->runnable_avg_sum += runnable_contrib;
                if (running)
                        sa->usage_avg_sum += runnable_contrib;
-               sa->runnable_avg_period += runnable_contrib;
        }
 
        /* Remainder of delta accrued against u_0` */
+       sa->runnable_avg_period += delta;
+       /* scale if necessary */
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+       delta = ((delta * curr_scale) >> SCHED_ARCH_SCALE_POWER_SHIFT);
+#endif
        if (runnable)
                sa->runnable_avg_sum += delta;
        if (running)
                sa->usage_avg_sum += delta;
-       sa->runnable_avg_period += delta;
 
        return decayed;
 }
@@ -1551,7 +1577,11 @@ static inline void update_entity_load_avg(struct 
sched_entity *se,
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        long contrib_delta;
        u64 now;
+       int cpu = -1;   /* not used in normal case */
 
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+       cpu = cfs_rq->rq->cpu;
+#endif
        /*
         * For a group entity we need to use their owned cfs_rq_clock_task() in
         * case they are the parent of a throttled hierarchy.
@@ -1562,7 +1592,7 @@ static inline void update_entity_load_avg(struct 
sched_entity *se,
                now = cfs_rq_clock_task(group_cfs_rq(se));
 
        if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq,
-                                         cfs_rq->curr == se))
+                       cfs_rq->curr == se, cpu))
                return;
 
        contrib_delta = __update_entity_load_avg_contrib(se);
@@ -1607,8 +1637,13 @@ static void update_cfs_rq_blocked_load(struct cfs_rq 
*cfs_rq, int force_update)
 static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
 {
        u32 contrib;
+       int cpu = -1;   /* not used in normal case */
+
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+       cpu = rq->cpu;
+#endif
        __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable,
-                                    runnable);
+                       runnable, cpu);
        __update_tg_runnable_avg(&rq->avg, &rq->cfs);
        contrib = rq->avg.runnable_avg_sum * scale_load_down(1024);
        contrib /= (rq->avg.runnable_avg_period + 1);
-- 
1.7.9.5



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to