Adds new fields to 'rq' structure and routine called during fair class
setup, which initializes some HMP scheduler variables: big and little cluster
masks. They are read from kernel config(if set), else default values are used.

Signed-off-by: Tarek Dakhran <t.dakh...@samsung.com>
Signed-off-by: Sergey Dyasly <s.dya...@samsung.com>
Signed-off-by: Dmitriy Safonov <d.safo...@partner.samsung.com>
Signed-off-by: Arseniy Krasnov <a.kras...@samsung.com>
Signed-off-by: Ilya Maximets <i.maxim...@samsung.com>
---
 kernel/sched/core.c  |  4 ++++
 kernel/sched/fair.c  | 46 ++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/sched.h | 15 +++++++++++++++
 3 files changed, 65 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e3a632f..8747e06 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7488,6 +7488,10 @@ void __init sched_init(void)
 #endif
                init_rq_hrtick(rq);
                atomic_set(&rq->nr_iowait, 0);
+#ifdef CONFIG_HPERF_HMP
+               rq->druntime_sum = 0;
+               rq->nr_hmp_tasks = 0;
+#endif
        }
 
        set_load_weight(&init_task);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9a5e60f..c57007f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -100,6 +100,11 @@ const_debug unsigned int sysctl_sched_migration_cost = 
500000UL;
  */
 unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
 
+#ifdef CONFIG_HPERF_HMP
+extern void hmp_set_cpu_masks(struct cpumask *, struct cpumask *);
+static unsigned int freq_scale_cpu_power[CONFIG_NR_CPUS];
+#endif /* CONFIG_HPERF_HMP */
+
 #ifdef CONFIG_CFS_BANDWIDTH
 /*
  * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
@@ -8305,8 +8310,38 @@ void show_numa_stats(struct task_struct *p, struct 
seq_file *m)
 #endif /* CONFIG_NUMA_BALANCING */
 #endif /* CONFIG_SCHED_DEBUG */
 
+#ifdef CONFIG_HPERF_HMP
+static unsigned long default_fast_mask = 0x0F;
+static unsigned long default_slow_mask = 0xF0;
+
+void hmp_set_cpu_masks(struct cpumask *fast_mask, struct cpumask *slow_mask)
+{
+       cpumask_clear(fast_mask);
+       cpumask_clear(slow_mask);
+
+       /* try to parse CPU masks from config */
+       if (strlen(CONFIG_HMP_FAST_CPU_MASK) &&
+           strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+               if (cpumask_parse(CONFIG_HMP_FAST_CPU_MASK, fast_mask) ||
+                   cpumask_parse(CONFIG_HMP_SLOW_CPU_MASK, slow_mask))
+                       pr_err("hperf_hmp: Failed to get CPU masks from 
config!\n");
+               else
+                       return;
+       }
+
+       pr_err("hperf_hmp: Fast mask will be: %08lX, slow mask: %08lX\n",
+              default_fast_mask, default_slow_mask);
+
+       fast_mask->bits[0] = default_fast_mask;
+       slow_mask->bits[0] = default_slow_mask;
+}
+#endif
+
 __init void init_sched_fair_class(void)
 {
+#ifdef CONFIG_HPERF_HMP
+       int cpu;
+#endif
 #ifdef CONFIG_SMP
        open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
 
@@ -8315,6 +8350,17 @@ __init void init_sched_fair_class(void)
        zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
        cpu_notifier(sched_ilb_notifier, 0);
 #endif
+
+#ifdef CONFIG_HPERF_HMP
+       for_each_possible_cpu(cpu)
+               freq_scale_cpu_power[cpu] = SCHED_CAPACITY_SCALE;
+       hmp_set_cpu_masks(cpu_fastest_mask, cpu_slowest_mask);
+       pr_info("hperf_hmp: fast CPUs mask: %08X\n",
+               (unsigned int)cpumask_bits(cpu_fastest_mask)[0]);
+       pr_info("hperf_hmp: slow CPUs mask: %08X\n",
+               (unsigned int)cpumask_bits(cpu_slowest_mask)[0]);
+#endif
+
 #endif /* SMP */
 
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6d2a119..94828dc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -597,6 +597,11 @@ struct rq {
         */
        unsigned long nr_uninterruptible;
 
+#ifdef CONFIG_HPERF_HMP
+       /* shows the amount of accumulated unfairness by tasks of this rq */
+       long druntime_sum;
+       unsigned int nr_hmp_tasks;
+#endif
        struct task_struct *curr, *idle, *stop;
        unsigned long next_balance;
        struct mm_struct *prev_mm;
@@ -892,6 +897,16 @@ static inline unsigned int group_first_cpu(struct 
sched_group *group)
 
 extern int group_balance_cpu(struct sched_group *sg);
 
+#ifdef CONFIG_HPERF_HMP
+extern struct cpumask *cpu_fastest_mask;
+extern struct cpumask *cpu_slowest_mask;
+
+static inline bool cpu_is_fastest(int cpu)
+{
+       return cpumask_test_cpu(cpu, cpu_fastest_mask);
+}
+#endif
+
 #else
 
 static inline void sched_ttwu_pending(void) { }
-- 
1.9.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to