Move the allocation of topology specific cpumasks into the topology
code.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/sched/core.c     |    4 +---
 kernel/sched/sched.h    |    4 +---
 kernel/sched/topology.c |    7 +++++--
 3 files changed, 7 insertions(+), 8 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5961,7 +5961,6 @@ void __init sched_init_smp(void)
        cpumask_var_t non_isolated_cpus;
 
        alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
-       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
 
        sched_init_numa();
 
@@ -5971,7 +5970,7 @@ void __init sched_init_smp(void)
         * happen.
         */
        mutex_lock(&sched_domains_mutex);
-       init_sched_domains(cpu_active_mask);
+       sched_init_domains(cpu_active_mask);
        cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
        if (cpumask_empty(non_isolated_cpus))
                cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
@@ -6185,7 +6184,6 @@ void __init sched_init(void)
        calc_load_update = jiffies + LOAD_FREQ;
 
 #ifdef CONFIG_SMP
-       zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
        /* May be allocated at isolcpus cmdline parse time */
        if (cpu_isolated_map == NULL)
                zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -606,11 +606,9 @@ struct root_domain {
 
 extern struct root_domain def_root_domain;
 extern struct mutex sched_domains_mutex;
-extern cpumask_var_t fallback_doms;
-extern cpumask_var_t sched_domains_tmpmask;
 
 extern void init_defrootdomain(void);
-extern int init_sched_domains(const struct cpumask *cpu_map);
+extern int sched_init_domains(const struct cpumask *cpu_map);
 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
 
 #endif /* CONFIG_SMP */
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1519,7 +1519,7 @@ static struct sched_domain_attr           *dattr_
  * cpumask) fails, then fallback to a single sched domain,
  * as determined by the single cpumask fallback_doms.
  */
-cpumask_var_t                          fallback_doms;
+static cpumask_var_t                   fallback_doms;
 
 /*
  * arch_update_cpu_topology lets virtualized architectures update the
@@ -1561,10 +1561,13 @@ void free_sched_domains(cpumask_var_t do
  * For now this just excludes isolated CPUs, but could be used to
  * exclude other special cases in the future.
  */
-int init_sched_domains(const struct cpumask *cpu_map)
+int sched_init_domains(const struct cpumask *cpu_map)
 {
        int err;
 
+       zalloc_cpumask_var(&sched_domains_tmpmask, GFP_KERNEL);
+       zalloc_cpumask_var(&fallback_doms, GFP_KERNEL);
+
        arch_update_cpu_topology();
        ndoms_cur = 1;
        doms_cur = alloc_sched_domains(ndoms_cur);


Reply via email to