With scheduling domains sufficiently prepared, we can now initialize
the full hierarchy of runqueues and link it with the already existing
bottom level, which we set up earlier.

Signed-off-by: Jan H. Schönherr <jscho...@amazon.de>
---
 kernel/sched/core.c    |  1 +
 kernel/sched/cosched.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/sched/sched.h   |  2 ++
 3 files changed, 79 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cc801f84bf97..5350cab7ac4a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5876,6 +5876,7 @@ void __init sched_init_smp(void)
         */
        mutex_lock(&sched_domains_mutex);
        sched_init_domains(cpu_active_mask);
+       cosched_init_hierarchy();
        mutex_unlock(&sched_domains_mutex);
 
        /* Move init over to a non-isolated CPU */
diff --git a/kernel/sched/cosched.c b/kernel/sched/cosched.c
index 7a793aa93114..48394050ec34 100644
--- a/kernel/sched/cosched.c
+++ b/kernel/sched/cosched.c
@@ -351,3 +351,79 @@ void cosched_init_topology(void)
        /* Make permanent */
        set_sched_topology(tl);
 }
+
+/*
+ * Build the SD-RQ hierarchy according to the scheduling domains.
+ *
+ * Note, that the scheduler is already live at this point, but the scheduling
+ * domains only just have become available. That means, we only setup 
everything
+ * above the bottom level of the SD-RQ hierarchy and link it with the already
+ * active bottom level.
+ *
+ * We can do this without any locks, as nothing will automatically traverse 
into
+ * these data structures. This requires an update of the sdrq.is_root property,
+ * which will happen only later.
+ */
+void cosched_init_hierarchy(void)
+{
+       struct sched_domain *sd;
+       struct sdrq *sdrq;
+       int cpu, level = 1;
+
+       /* Only one CPU in the system, we are finished here */
+       if (cpumask_weight(cpu_possible_mask) == 1)
+               return;
+
+       /* Determine and initialize top */
+       for_each_domain(0, sd) {
+               if (!sd->parent)
+                       break;
+               level++;
+       }
+
+       init_sdrq_data(&sd->shared->rq.sdrq_data, NULL, sched_domain_span(sd),
+                      level);
+       init_cfs_rq(&sd->shared->rq.cfs);
+       init_tg_cfs_entry(&root_task_group, &sd->shared->rq.cfs, NULL,
+                         &sd->shared->rq, NULL);
+       init_sdrq(&root_task_group, &sd->shared->rq.cfs.sdrq, NULL, NULL,
+                 &sd->shared->rq.sdrq_data);
+
+       root_task_group.top_cfsrq = &sd->shared->rq.cfs;
+
+       /* Initialize others top-down, per CPU */
+       for_each_possible_cpu(cpu) {
+               /* Find highest not-yet initialized position for this CPU */
+               for_each_domain(cpu, sd) {
+                       if (sd->shared->rq.sdrq_data.span_weight)
+                               break;
+               }
+               if (WARN(!sd, "SD hierarchy seems to have multiple roots"))
+                       continue;
+               sd = sd->child;
+
+               /* Initialize from there downwards */
+               for_each_lower_domain(sd) {
+                       init_sdrq_data(&sd->shared->rq.sdrq_data,
+                                      &sd->parent->shared->rq.sdrq_data,
+                                      sched_domain_span(sd), -1);
+                       init_cfs_rq(&sd->shared->rq.cfs);
+                       init_tg_cfs_entry(&root_task_group, &sd->shared->rq.cfs,
+                                         NULL, &sd->shared->rq, NULL);
+                       init_sdrq(&root_task_group, &sd->shared->rq.cfs.sdrq,
+                                 &sd->parent->shared->rq.cfs.sdrq, NULL,
+                                 &sd->shared->rq.sdrq_data);
+               }
+
+               /* Link up with local data structures */
+               sdrq = &cpu_rq(cpu)->cfs.sdrq;
+               sd = cpu_rq(cpu)->sd;
+
+               /* sdrq_data */
+               sdrq->data->parent = &sd->shared->rq.sdrq_data;
+
+               /* sdrq */
+               sdrq->sd_parent = &sd->shared->rq.cfs.sdrq;
+               list_add_tail(&sdrq->siblings, &sdrq->sd_parent->children);
+       }
+}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ed9c526b74ee..d65c98c34c13 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1132,9 +1132,11 @@ static inline struct cfs_rq *taskgroup_next_cfsrq(struct 
task_group *tg,
 #ifdef CONFIG_COSCHEDULING
 void cosched_init_bottom(void);
 void cosched_init_topology(void);
+void cosched_init_hierarchy(void);
 #else /* !CONFIG_COSCHEDULING */
 static inline void cosched_init_bottom(void) { }
 static inline void cosched_init_topology(void) { }
+static inline void cosched_init_hierarchy(void) { }
 #endif /* !CONFIG_COSCHEDULING */
 
 #ifdef CONFIG_SCHED_SMT
-- 
2.9.3.1.gcba166c.dirty

Reply via email to