[PATCH v2 02/12] sched/fair: Rename variable names for sched averages

2016-05-02 Thread Yuyang Du
The names of sched averages (including load_avg and util_avg) have
been changed and added in the past a couple of years, some of
the names are a bit confusing especially to people who first read them.
This patch attempts to make the names more self-explaining. And some
comments are updated too.

Signed-off-by: Yuyang Du 
---
 kernel/sched/fair.c |  209 ++-
 1 file changed, 107 insertions(+), 102 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e803f11..74eaeab 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -660,13 +660,15 @@ static int select_idle_sibling(struct task_struct *p, int 
cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 /*
- * We choose a half-life close to 1 scheduling period.
- * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
- * dependent on this value.
+ * Note: everything in sched average calculation, including
+ * __decay_inv_multiply_N, __accumulated_sum_N, __accumulated_sum_N32,
+ * SCHED_AVG_MAX, and SCHED_AVG_MAX_N are all dependent on and only on
+ * (1) exponential decay, (2) a period of 1024*1024ns (~1ms), and (3)
+ * a half-life of 32 periods.
  */
-#define LOAD_AVG_PERIOD 32
-#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
-#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
+#define SCHED_AVG_HALFLIFE 32  /* number of periods as a half-life */
+#define SCHED_AVG_MAX 47742/* maximum possible sched avg */
+#define SCHED_AVG_MAX_N 345/* number of full periods to produce 
SCHED_AVG_MAX */
 
 /* Give new sched_entity start runnable values to heavy its load in infant 
time */
 void init_entity_runnable_average(struct sched_entity *se)
@@ -681,7 +683,7 @@ void init_entity_runnable_average(struct sched_entity *se)
 */
sa->period_contrib = 1023;
sa->load_avg = scale_load_down(se->load.weight);
-   sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
+   sa->load_sum = sa->load_avg * SCHED_AVG_MAX;
/*
 * At this point, util_avg won't be used in select_task_rq_fair anyway
 */
@@ -731,7 +733,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
} else {
sa->util_avg = cap;
}
-   sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+   sa->util_sum = sa->util_avg * SCHED_AVG_MAX;
}
 }
 
@@ -1834,7 +1836,7 @@ static u64 numa_get_avg_runtime(struct task_struct *p, 
u64 *period)
*period = now - p->last_task_numa_placement;
} else {
delta = p->se.avg.load_sum / p->se.load.weight;
-   *period = LOAD_AVG_MAX;
+   *period = SCHED_AVG_MAX;
}
 
p->last_sum_exec_runtime = runtime;
@@ -2583,7 +2585,7 @@ static inline void update_cfs_shares(struct cfs_rq 
*cfs_rq)
 
 #ifdef CONFIG_SMP
 /* Precomputed fixed inverse multiplies for multiplication by y^n */
-static const u32 runnable_avg_yN_inv[] = {
+static const u32 __decay_inv_multiply_N[] = {
0x, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
@@ -2596,7 +2598,7 @@ static const u32 runnable_avg_yN_inv[] = {
  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
  * over-estimates when re-combining.
  */
-static const u32 runnable_avg_yN_sum[] = {
+static const u32 __accumulated_sum_N[] = {
0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
@@ -2612,16 +2614,18 @@ static const u32 __accumulated_sum_N32[] = {
 };
 
 /*
- * Approximate:
- *   val * y^n,where y^32 ~= 0.5 (~1 scheduling period)
+ * val * y^n, where y^m ~= 0.5
+ *
+ * n is the number of periods past; a period is ~1ms
+ * m is called half-life in exponential decay; here it is 
SCHED_AVG_HALFLIFE=32.
  */
-static __always_inline u64 decay_load(u64 val, u64 n)
+static __always_inline u64 __decay_sum(u64 val, u64 n)
 {
unsigned int local_n;
 
if (!n)
return val;
-   else if (unlikely(n > LOAD_AVG_PERIOD * 63))
+   else if (unlikely(n > SCHED_AVG_HALFLIFE * 63))
return 0;
 
/* after bounds checking we can collapse to 32-bit */
@@ -2634,36 +2638,36 @@ static __always_inline u64 decay_load(u64 val, u64 n)
 *
 * To achieve constant time decay_load.
 */
-   if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
-   val >>= local_n / LOAD_AVG_PERIOD;
-   local_n %= LOAD_AVG_PERIOD;
+   if (unlikely(local_n >= SCHED_AVG_HALFLIFE)) {
+   val >>= local_n / SCHED_AVG_HALFLIFE;
+   local_n %= 

[PATCH v2 02/12] sched/fair: Rename variable names for sched averages

2016-05-02 Thread Yuyang Du
The names of sched averages (including load_avg and util_avg) have
been changed and added in the past a couple of years, some of
the names are a bit confusing especially to people who first read them.
This patch attempts to make the names more self-explaining. And some
comments are updated too.

Signed-off-by: Yuyang Du 
---
 kernel/sched/fair.c |  209 ++-
 1 file changed, 107 insertions(+), 102 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e803f11..74eaeab 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -660,13 +660,15 @@ static int select_idle_sibling(struct task_struct *p, int 
cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
 /*
- * We choose a half-life close to 1 scheduling period.
- * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
- * dependent on this value.
+ * Note: everything in sched average calculation, including
+ * __decay_inv_multiply_N, __accumulated_sum_N, __accumulated_sum_N32,
+ * SCHED_AVG_MAX, and SCHED_AVG_MAX_N are all dependent on and only on
+ * (1) exponential decay, (2) a period of 1024*1024ns (~1ms), and (3)
+ * a half-life of 32 periods.
  */
-#define LOAD_AVG_PERIOD 32
-#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
-#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
+#define SCHED_AVG_HALFLIFE 32  /* number of periods as a half-life */
+#define SCHED_AVG_MAX 47742/* maximum possible sched avg */
+#define SCHED_AVG_MAX_N 345/* number of full periods to produce 
SCHED_AVG_MAX */
 
 /* Give new sched_entity start runnable values to heavy its load in infant 
time */
 void init_entity_runnable_average(struct sched_entity *se)
@@ -681,7 +683,7 @@ void init_entity_runnable_average(struct sched_entity *se)
 */
sa->period_contrib = 1023;
sa->load_avg = scale_load_down(se->load.weight);
-   sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
+   sa->load_sum = sa->load_avg * SCHED_AVG_MAX;
/*
 * At this point, util_avg won't be used in select_task_rq_fair anyway
 */
@@ -731,7 +733,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
} else {
sa->util_avg = cap;
}
-   sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
+   sa->util_sum = sa->util_avg * SCHED_AVG_MAX;
}
 }
 
@@ -1834,7 +1836,7 @@ static u64 numa_get_avg_runtime(struct task_struct *p, 
u64 *period)
*period = now - p->last_task_numa_placement;
} else {
delta = p->se.avg.load_sum / p->se.load.weight;
-   *period = LOAD_AVG_MAX;
+   *period = SCHED_AVG_MAX;
}
 
p->last_sum_exec_runtime = runtime;
@@ -2583,7 +2585,7 @@ static inline void update_cfs_shares(struct cfs_rq 
*cfs_rq)
 
 #ifdef CONFIG_SMP
 /* Precomputed fixed inverse multiplies for multiplication by y^n */
-static const u32 runnable_avg_yN_inv[] = {
+static const u32 __decay_inv_multiply_N[] = {
0x, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
@@ -2596,7 +2598,7 @@ static const u32 runnable_avg_yN_inv[] = {
  * Precomputed \Sum y^k { 1<=k<=n }.  These are floor(true_value) to prevent
  * over-estimates when re-combining.
  */
-static const u32 runnable_avg_yN_sum[] = {
+static const u32 __accumulated_sum_N[] = {
0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
@@ -2612,16 +2614,18 @@ static const u32 __accumulated_sum_N32[] = {
 };
 
 /*
- * Approximate:
- *   val * y^n,where y^32 ~= 0.5 (~1 scheduling period)
+ * val * y^n, where y^m ~= 0.5
+ *
+ * n is the number of periods past; a period is ~1ms
+ * m is called half-life in exponential decay; here it is 
SCHED_AVG_HALFLIFE=32.
  */
-static __always_inline u64 decay_load(u64 val, u64 n)
+static __always_inline u64 __decay_sum(u64 val, u64 n)
 {
unsigned int local_n;
 
if (!n)
return val;
-   else if (unlikely(n > LOAD_AVG_PERIOD * 63))
+   else if (unlikely(n > SCHED_AVG_HALFLIFE * 63))
return 0;
 
/* after bounds checking we can collapse to 32-bit */
@@ -2634,36 +2638,36 @@ static __always_inline u64 decay_load(u64 val, u64 n)
 *
 * To achieve constant time decay_load.
 */
-   if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
-   val >>= local_n / LOAD_AVG_PERIOD;
-   local_n %= LOAD_AVG_PERIOD;
+   if (unlikely(local_n >= SCHED_AVG_HALFLIFE)) {
+   val >>= local_n / SCHED_AVG_HALFLIFE;
+   local_n %= SCHED_AVG_HALFLIFE;