4.4-stable review patch.  If anyone has any objections, please let me know.

------------------

[ Upstream commit 6fe1f348b3dd1f700f9630562b7d38afd6949568 ]

When a cgroup's CPU runqueue is destroyed, it should remove its
remaining load accounting from its parent cgroup.

The current site for doing so it unsuited because its far too late and
unordered against other cgroup removal (->css_free() will be, but we're also
in an RCU callback).

Put it in the ->css_offline() callback, which is the start of cgroup
destruction, right after the group has been made unavailable to
userspace. The ->css_offline() callbacks are called in hierarchical order
after the following v4.4 commit:

  aa226ff4a1ce ("cgroup: make sure a parent css isn't offlined before its 
children")

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: Christian Borntraeger <borntrae...@de.ibm.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Li Zefan <lize...@huawei.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Paul E. McKenney <paul...@linux.vnet.ibm.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Tejun Heo <t...@kernel.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: 
http://lkml.kernel.org/r/20160121212416.gl6...@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 kernel/sched/core.c  |  4 +---
 kernel/sched/fair.c  | 37 +++++++++++++++++++++----------------
 kernel/sched/sched.h |  2 +-
 3 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 65ed3501c2ca..4743e1f2a3d1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7817,11 +7817,9 @@ void sched_destroy_group(struct task_group *tg)
 void sched_offline_group(struct task_group *tg)
 {
        unsigned long flags;
-       int i;
 
        /* end participation in shares distribution */
-       for_each_possible_cpu(i)
-               unregister_fair_sched_group(tg, i);
+       unregister_fair_sched_group(tg);
 
        spin_lock_irqsave(&task_group_lock, flags);
        list_del_rcu(&tg->list);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3b136fb4422c..a0c5bb93a3ab 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8154,11 +8154,8 @@ void free_fair_sched_group(struct task_group *tg)
        for_each_possible_cpu(i) {
                if (tg->cfs_rq)
                        kfree(tg->cfs_rq[i]);
-               if (tg->se) {
-                       if (tg->se[i])
-                               remove_entity_load_avg(tg->se[i]);
+               if (tg->se)
                        kfree(tg->se[i]);
-               }
        }
 
        kfree(tg->cfs_rq);
@@ -8206,21 +8203,29 @@ err:
        return 0;
 }
 
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
 {
-       struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
+       struct rq *rq;
+       int cpu;
 
-       /*
-       * Only empty task groups can be destroyed; so we can speculatively
-       * check on_list without danger of it being re-added.
-       */
-       if (!tg->cfs_rq[cpu]->on_list)
-               return;
+       for_each_possible_cpu(cpu) {
+               if (tg->se[cpu])
+                       remove_entity_load_avg(tg->se[cpu]);
 
-       raw_spin_lock_irqsave(&rq->lock, flags);
-       list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
+               /*
+                * Only empty task groups can be destroyed; so we can 
speculatively
+                * check on_list without danger of it being re-added.
+                */
+               if (!tg->cfs_rq[cpu]->on_list)
+                       continue;
+
+               rq = cpu_rq(cpu);
+
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
 }
 
 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
@@ -8302,7 +8307,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct 
task_group *parent)
        return 1;
 }
 
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0c9ebd82a684..af8d8c3eb8ab 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -308,7 +308,7 @@ extern int tg_nop(struct task_group *tg, void *data);
 
 extern void free_fair_sched_group(struct task_group *tg);
 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group 
*parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
                        struct sched_entity *se, int cpu,
                        struct sched_entity *parent);
-- 
2.17.1



Reply via email to