From: Olivier Cozette <olivier.coze...@arm.com>

These functions allow to change the load average period used
in the task load average computation through
/sys/kernel/hmp/load_avg_period_ms. This period is the time
in ms to go from 0 to 0.5 load average while running or the
time from 1 to 0.5 while sleeping.

The default one used is 32 and gives the same load_avg_ratio
computation than without this patch. These functions also allow
to change the up and down threshold of HMP using
/sys/kernel/hmp/{up,down}_threshold. Both must be between 0 and
1024. The thresholds are divided by 1024 before being compared
to the load_avg_ratio.

If /sys/kernel/hmp/load_avg_period_ms is 128 and
/sys/kernel/hmp/up_threshold is 512, a task will be migrated
to a bigger cluster after running for 128ms. Because after
load_avg_period_ms the load average is 0.5 and real up_threshold
us 512 / 1024 = 0.5.

Signed-off-by: Olivier Cozette <olivier.coze...@arm.com>
Signed-off-by: Chris Redpath <chris.redp...@arm.com>
---
 arch/arm/Kconfig    |   23 +++++++
 kernel/sched/fair.c |  183 ++++++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 204 insertions(+), 2 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f15c657..92ef121 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1603,6 +1603,29 @@ config HMP_SLOW_CPU_MASK
          Specify the cpuids of the slow CPUs in the system as a list string,
          e.g. cpuid 0+1 should be specified as 0-1.
 
+config HMP_VARIABLE_SCALE
+       bool "Allows changing the load tracking scale through sysfs"
+       depends on SCHED_HMP
+       help
+         When turned on, this option exports the thresholds and load average
+         period value for the load tracking patches through sysfs.
+         The values can be modified to change the rate of load accumulation
+         and the thresholds used for HMP migration.
+         The load_avg_period_ms is the time in ms to reach a load average of
+         0.5 for an idle task of 0 load average ratio that start a busy loop.
+         The up_threshold and down_threshold is the value to go to a faster
+         CPU or to go back to a slower cpu.
+         The {up,down}_threshold are devided by 1024 before being compared
+         to the load average.
+         For examples, with load_avg_period_ms = 128 and up_threshold = 512,
+         a running task with a load of 0 will be migrated to a bigger CPU after
+         128ms, because after 128ms its load_avg_ratio is 0.5 and the real
+         up_threshold is 0.5.
+         This patch has the same behavior as changing the Y of the load
+         average computation to
+               (1002/1024)^(LOAD_AVG_PERIOD/load_avg_period_ms)
+         but it remove intermadiate overflows in computation.
+
 config HAVE_ARM_SCU
        bool
        help
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e24f2e3..0108da6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -28,6 +28,10 @@
 #include <linux/interrupt.h>
 
 #include <trace/events/sched.h>
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
+#endif
 
 #include "sched.h"
 
@@ -1043,8 +1047,10 @@ static u32 __compute_runnable_contrib(u64 n)
        return contrib + runnable_avg_yN_sum[n];
 }
 
-/*
- * We can represent the historical contribution to runnable average as the
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+static u64 hmp_variable_scale_convert(u64 delta);
+#endif
+/* We can represent the historical contribution to runnable average as the
  * coefficients of a geometric series.  To do this we sub-divide our runnable
  * history into segments of approximately 1ms (1024us); label the segment that
  * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
@@ -1081,6 +1087,9 @@ static __always_inline int 
__update_entity_runnable_avg(u64 now,
        int delta_w, decayed = 0;
 
        delta = now - sa->last_runnable_update;
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+       delta = hmp_variable_scale_convert(delta);
+#endif
        /*
         * This should only happen when time goes backwards, which it
         * unfortunately does during sched clock init when we swap over to TSC.
@@ -3325,6 +3334,176 @@ static inline void hmp_next_down_delay(struct 
sched_entity *se, int cpu)
        se->avg.hmp_last_down_migration = cfs_rq_clock_task(cfs_rq);
        se->avg.hmp_last_up_migration = 0;
 }
+
+#ifdef CONFIG_HMP_VARIABLE_SCALE
+/*
+ * Heterogenous multiprocessor (HMP) optimizations
+ *
+ * These functions allow to change the growing speed of the load_avg_ratio
+ * by default it goes from 0 to 0.5 in LOAD_AVG_PERIOD = 32ms
+ * This can now be changed with /sys/kernel/hmp/load_avg_period_ms.
+ *
+ * These functions also allow to change the up and down threshold of HMP
+ * using /sys/kernel/hmp/{up,down}_threshold.
+ * Both must be between 0 and 1023. The threshold that is compared
+ * to the load_avg_ratio is up_threshold/1024 and down_threshold/1024.
+ *
+ * For instance, if load_avg_period = 64 and up_threshold = 512, an idle
+ * task with a load of 0 will reach the threshold after 64ms of busy loop.
+ *
+ * Changing load_avg_periods_ms has the same effect than changing the
+ * default scaling factor Y=1002/1024 in the load_avg_ratio computation to
+ * (1002/1024.0)^(LOAD_AVG_PERIOD/load_avg_period_ms), but the last one
+ * could trigger overflows.
+ * For instance, with Y = 1023/1024 in __update_task_entity_contrib()
+ * "contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);"
+ * could be overflowed for a weight > 2^12 even is the load_avg_contrib
+ * should still be a 32bits result. This would not happen by multiplicating
+ * delta time by 1/22 and setting load_avg_period_ms = 706.
+ */
+
+#define HMP_VARIABLE_SCALE_SHIFT 16ULL
+struct hmp_global_attr {
+       struct attribute attr;
+       ssize_t (*show)(struct kobject *kobj,
+                       struct attribute *attr, char *buf);
+       ssize_t (*store)(struct kobject *a, struct attribute *b,
+                       const char *c, size_t count);
+       int *value;
+       int (*to_sysfs)(int);
+       int (*from_sysfs)(int);
+};
+
+#define HMP_DATA_SYSFS_MAX 3
+
+struct hmp_data_struct {
+       int multiplier; /* used to scale the time delta */
+       struct attribute_group attr_group;
+       struct attribute *attributes[HMP_DATA_SYSFS_MAX + 1];
+       struct hmp_global_attr attr[HMP_DATA_SYSFS_MAX];
+} hmp_data;
+
+/*
+ * By scaling the delta time it end-up increasing or decrease the
+ * growing speed of the per entity load_avg_ratio
+ * The scale factor hmp_data.multiplier is a fixed point
+ * number: (32-HMP_VARIABLE_SCALE_SHIFT).HMP_VARIABLE_SCALE_SHIFT
+ */
+static u64 hmp_variable_scale_convert(u64 delta)
+{
+       u64 high = delta >> 32ULL;
+       u64 low = delta & 0xffffffffULL;
+       low *= hmp_data.multiplier;
+       high *= hmp_data.multiplier;
+       return (low >> HMP_VARIABLE_SCALE_SHIFT)
+                       + (high << (32ULL - HMP_VARIABLE_SCALE_SHIFT));
+}
+
+static ssize_t hmp_show(struct kobject *kobj,
+                               struct attribute *attr, char *buf)
+{
+       ssize_t ret = 0;
+       struct hmp_global_attr *hmp_attr =
+               container_of(attr, struct hmp_global_attr, attr);
+       int temp = *(hmp_attr->value);
+       if (hmp_attr->to_sysfs != NULL)
+               temp = hmp_attr->to_sysfs(temp);
+       ret = sprintf(buf, "%d\n", temp);
+       return ret;
+}
+
+static ssize_t hmp_store(struct kobject *a, struct attribute *attr,
+                               const char *buf, size_t count)
+{
+       int temp;
+       ssize_t ret = count;
+       struct hmp_global_attr *hmp_attr =
+               container_of(attr, struct hmp_global_attr, attr);
+       char *str = vmalloc(count + 1);
+       if (str == NULL)
+               return -ENOMEM;
+       memcpy(str, buf, count);
+       str[count] = 0;
+       if (sscanf(str, "%d", &temp) < 1)
+               ret = -EINVAL;
+       else {
+               if (hmp_attr->from_sysfs != NULL)
+                       temp = hmp_attr->from_sysfs(temp);
+               if (temp < 0)
+                       ret = -EINVAL;
+               else
+                       *(hmp_attr->value) = temp;
+       }
+       vfree(str);
+       return ret;
+}
+
+static int hmp_period_tofrom_sysfs(int value)
+{
+       return (LOAD_AVG_PERIOD << HMP_VARIABLE_SCALE_SHIFT) / value;
+}
+
+/* max value for threshold is 1024 */
+static int hmp_theshold_from_sysfs(int value)
+{
+       if (value > 1024)
+               return -1;
+       return value;
+}
+
+static void hmp_attr_add(
+       const char *name,
+       int *value,
+       int (*to_sysfs)(int),
+       int (*from_sysfs)(int))
+{
+       int i = 0;
+       while (hmp_data.attributes[i] != NULL) {
+               i++;
+               if (i >= HMP_DATA_SYSFS_MAX)
+                       return;
+       }
+       hmp_data.attr[i].attr.mode = 0644;
+       hmp_data.attr[i].show = hmp_show;
+       hmp_data.attr[i].store = hmp_store;
+       hmp_data.attr[i].attr.name = name;
+       hmp_data.attr[i].value = value;
+       hmp_data.attr[i].to_sysfs = to_sysfs;
+       hmp_data.attr[i].from_sysfs = from_sysfs;
+       hmp_data.attributes[i] = &hmp_data.attr[i].attr;
+       hmp_data.attributes[i + 1] = NULL;
+}
+
+static int hmp_attr_init(void)
+{
+       int ret;
+       memset(&hmp_data, sizeof(hmp_data), 0);
+       /* by default load_avg_period_ms == LOAD_AVG_PERIOD
+        * meaning no change
+        */
+       hmp_data.multiplier = hmp_period_tofrom_sysfs(LOAD_AVG_PERIOD);
+
+       hmp_attr_add("load_avg_period_ms",
+               &hmp_data.multiplier,
+               hmp_period_tofrom_sysfs,
+               hmp_period_tofrom_sysfs);
+       hmp_attr_add("up_threshold",
+               &hmp_up_threshold,
+               NULL,
+               hmp_theshold_from_sysfs);
+       hmp_attr_add("down_threshold",
+               &hmp_down_threshold,
+               NULL,
+               hmp_theshold_from_sysfs);
+
+       hmp_data.attr_group.name = "hmp";
+       hmp_data.attr_group.attrs = hmp_data.attributes;
+       ret = sysfs_create_group(kernel_kobj,
+               &hmp_data.attr_group);
+       return 0;
+}
+late_initcall(hmp_attr_init);
+#endif /* CONFIG_HMP_VARIABLE_SCALE */
 #endif /* CONFIG_SCHED_HMP */
 
 static inline bool is_buddy_busy(int cpu)
-- 
1.7.9.5



_______________________________________________
linaro-dev mailing list
linaro-dev@lists.linaro.org
http://lists.linaro.org/mailman/listinfo/linaro-dev

Reply via email to