From: Dietmar Eggemann <[email protected]>

The provision of the sd topology flag SD_ASYM_PACKING is shifted from the
weak function arch_sd_sibling_asym_packing() towards an arch specific
topology info table.

Signed-off-by: Dietmar Eggemann <[email protected]>
---
 arch/powerpc/kernel/smp.c |   34 +++++++++++++++++++++++++---------
 kernel/sched/core.c       |   13 -------------
 2 files changed, 25 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c1cf4a1522d9..f8ba79dd9147 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -760,6 +760,30 @@ int setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+#ifdef CONFIG_SCHED_SMT
+static inline int arch_cpu_smt_flags(int cpu)
+{
+       int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+
+       if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
+               printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
+               flags |= SD_ASYM_PACKING;
+       }
+
+       return flags;
+}
+#endif
+
+static struct sched_domain_topology_info topology_info[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, arch_cpu_smt_flags, SD_NAME(SIBLING) },
+#endif
+#ifdef CONFIG_SCHED_MC
+       { cpu_coregroup_mask, cpu_coregroup_flags, SD_NAME(MC) },
+#endif
+       { cpu_cpu_mask, SD_NAME(CPU) },
+};
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        cpumask_var_t old_mask;
@@ -784,15 +808,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
        dump_numa_cpu_topology();
 
-}
-
-int arch_sd_sibling_asym_packing(void)
-{
-       if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
-               printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
-               return SD_ASYM_PACKING;
-       }
-       return 0;
+       set_sd_topology_info(topology_info, ARRAY_SIZE(topology_info));
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 79f34cc5f547..a3e945021e97 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5233,11 +5233,6 @@ static void init_sched_groups_power(int cpu, struct 
sched_domain *sd)
        atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
 }
 
-int __weak arch_sd_sibling_asym_packing(void)
-{
-       return 0*SD_ASYM_PACKING;
-}
-
 /*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -5662,14 +5657,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
        if (sd->flags & SD_SHARE_CPUPOWER) {
                sd->imbalance_pct = 110;
                sd->smt_gain = 1178; /* ~15% */
-
-               /*
-                * Call SMT specific arch topology function.
-                * This goes away once the powerpc arch uses
-                * the new interface for scheduler domain
-                * setup.
-                */
-               sd->flags |= arch_sd_sibling_asym_packing();
        } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
                sd->cache_nice_tries = 1;
                sd->busy_idx = 2;
-- 
1.7.9.5


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to