Use css_get/put instead of mem_cgroup_get/put.

We can't do a simple replacement, because here mem_cgroup_put()
is called during mem_cgroup_css_free(), while mem_cgroup_css_free()
won't be called until css refcnt goes down to 0.

Instead we increment css refcnt in mem_cgroup_css_offline(), and
then check if there's still kmem charges. If not, css refcnt will
be decremented, otherwise the refcnt will be decremented when
kmem charges goes down to 0.

Signed-off-by: Li Zefan <lize...@huawei.com>
---
 mm/memcontrol.c | 49 ++++++++++++++++++++++++++-----------------------
 1 file changed, 26 insertions(+), 23 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index dafacb8..877551d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3004,7 +3004,7 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, 
u64 size)
                return;
 
        if (memcg_kmem_test_and_clear_dead(memcg))
-               mem_cgroup_put(memcg);
+               css_put(&memcg->css);
 }
 
 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
@@ -5089,14 +5089,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, 
u64 val)
                 * starts accounting before all call sites are patched
                 */
                memcg_kmem_set_active(memcg);
-
-               /*
-                * kmem charges can outlive the cgroup. In the case of slab
-                * pages, for instance, a page contain objects from various
-                * processes, so it is unfeasible to migrate them away. We
-                * need to reference count the memcg because of that.
-                */
-               mem_cgroup_get(memcg);
        } else
                ret = res_counter_set_limit(&memcg->kmem, val);
 out:
@@ -5129,12 +5121,11 @@ static int memcg_propagate_kmem(struct mem_cgroup 
*memcg)
                goto out;
 
        /*
-        * destroy(), called if we fail, will issue static_key_slow_inc() and
-        * mem_cgroup_put() if kmem is enabled. We have to either call them
-        * unconditionally, or clear the KMEM_ACTIVE flag. I personally find
-        * this more consistent, since it always leads to the same destroy path
+        * destroy(), called if we fail, will issue static_key_slow_dec() if
+        * kmem is enabled. We have to either call them unconditionally, or
+        * clear the KMEM_ACTIVE flag. I personally find this more consistent,
+        * since it always leads to the same destroy path
         */
-       mem_cgroup_get(memcg);
        static_key_slow_inc(&memcg_kmem_enabled_key);
 
        mutex_lock(&set_limit_mutex);
@@ -5823,23 +5814,33 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, 
struct cgroup_subsys *ss)
        return mem_cgroup_sockets_init(memcg, ss);
 };
 
-static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
+static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
 {
-       mem_cgroup_sockets_destroy(memcg);
+       /*
+        * kmem charges can outlive the cgroup. In the case of slab
+        * pages, for instance, a page contain objects from various
+        * processes, so it is unfeasible to migrate them away. We
+        * need to reference count the memcg because of that.
+        */
+       css_get(&memcg->css);
 
+       /*
+        * We need to call css_get() first, because memcg_uncharge_kmem()
+        * will call css_put() if it sees the memcg is dead.
+        */
        memcg_kmem_mark_dead(memcg);
 
        if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
                return;
 
        /*
-        * Charges already down to 0, undo mem_cgroup_get() done in the charge
-        * path here, being careful not to race with memcg_uncharge_kmem: it is
-        * possible that the charges went down to 0 between mark_dead and the
-        * res_counter read, so in that case, we don't need the put
+        * Charges already down to 0, undo css_get() done previosly,, being
+        * careful not to race with memcg_uncharge_kmem: it is possible that
+        * the charges went down to 0 between mark_dead and the res_counter
+        * read, so in that case, we don't need the put
         */
        if (memcg_kmem_test_and_clear_dead(memcg))
-               mem_cgroup_put(memcg);
+               css_put(&memcg->css);
 }
 #else
 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
@@ -5847,7 +5848,7 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, 
struct cgroup_subsys *ss)
        return 0;
 }
 
-static void kmem_cgroup_destroy(struct mem_cgroup *memcg)
+static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
 {
 }
 #endif
@@ -6274,6 +6275,8 @@ static void mem_cgroup_css_offline(struct cgroup *cont)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
+       kmem_cgroup_css_offline(memcg);
+
        mem_cgroup_invalidate_reclaim_iterators(memcg);
        mem_cgroup_reparent_charges(memcg);
        mem_cgroup_destroy_all_caches(memcg);
@@ -6283,7 +6286,7 @@ static void mem_cgroup_css_free(struct cgroup *cont)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
 
-       kmem_cgroup_destroy(memcg);
+       mem_cgroup_sockets_destroy(memcg);
 
        mem_cgroup_put(memcg);
 }
-- 
1.8.0.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to