Currently, update_top_cache_domain() is called whenever schedule domain is 
built or destroyed. But, the following
callpath shows that they're at the same callpath and can be avoided 
update_top_cache_domain() while destroying schedule
domain and update only at the times of building schedule domains.

        partition_sched_domains()
                detach_destroy_domain()
                        cpu_attach_domain()
                                update_top_cache_domain()
                build_sched_domains()
                        cpu_attach_domain()
                                update_top_cache_domain()
                
                        

Signed-off-by: Rakib Mullick <rakib.mull...@gmail.com>
---

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb..8c6fee4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5102,8 +5102,8 @@ static void update_top_cache_domain(int cpu)
  * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  * hold the hotplug lock.
  */
-static void
-cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
+static void cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd,
+                               int cpu, bool update_cache_domain)
 {
        struct rq *rq = cpu_rq(cpu);
        struct sched_domain *tmp;
@@ -5138,7 +5138,8 @@ cpu_attach_domain(struct sched_domain *sd, struct 
root_domain *rd, int cpu)
        rcu_assign_pointer(rq->sd, sd);
        destroy_sched_domains(tmp, cpu);
 
-       update_top_cache_domain(cpu);
+       if (update_cache_domain)
+               update_top_cache_domain(cpu);
 }
 
 /* cpus with isolated domains */
@@ -6021,7 +6022,7 @@ static int build_sched_domains(const struct cpumask 
*cpu_map,
        rcu_read_lock();
        for_each_cpu(i, cpu_map) {
                sd = *per_cpu_ptr(d.sd, i);
-               cpu_attach_domain(sd, d.rd, i);
+               cpu_attach_domain(sd, d.rd, i, 1);
        }
        rcu_read_unlock();
 
@@ -6109,7 +6110,7 @@ static void detach_destroy_domains(const struct cpumask 
*cpu_map)
 
        rcu_read_lock();
        for_each_cpu(i, cpu_map)
-               cpu_attach_domain(NULL, &def_root_domain, i);
+               cpu_attach_domain(NULL, &def_root_domain, i, 0);
        rcu_read_unlock();
 }
 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to