Now shrinker's nr_deferred is per memcg for memcg aware shrinkers, add to 
parent's
corresponding nr_deferred when memcg offline.

Acked-by: Vlastimil Babka <vba...@suse.cz>
Acked-by: Kirill Tkhai <ktk...@virtuozzo.com>
Acked-by: Roman Gushchin <g...@fb.com>
Reviewed-by: Shakeel Butt <shake...@google.com>
Signed-off-by: Yang Shi <shy828...@gmail.com>
---
 include/linux/memcontrol.h |  1 +
 mm/memcontrol.c            |  1 +
 mm/vmscan.c                | 24 ++++++++++++++++++++++++
 3 files changed, 26 insertions(+)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 24e735434a46..4064c9dda534 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1537,6 +1537,7 @@ static inline bool 
mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
 int alloc_shrinker_info(struct mem_cgroup *memcg);
 void free_shrinker_info(struct mem_cgroup *memcg);
 void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
+void reparent_shrinker_deferred(struct mem_cgroup *memcg);
 #else
 #define mem_cgroup_sockets_enabled 0
 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 35d44afdd9fc..a945dfc85156 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5167,6 +5167,7 @@ static void mem_cgroup_css_offline(struct 
cgroup_subsys_state *css)
        page_counter_set_low(&memcg->memory, 0);
 
        memcg_offline_kmem(memcg);
+       reparent_shrinker_deferred(memcg);
        wb_memcg_offline(memcg);
 
        drain_all_stock(memcg);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 324c34c6e5cf..d0791ebd6761 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -397,6 +397,30 @@ static long add_nr_deferred_memcg(long nr, int nid, struct 
shrinker *shrinker,
        return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
 }
 
+void reparent_shrinker_deferred(struct mem_cgroup *memcg)
+{
+       int i, nid;
+       long nr;
+       struct mem_cgroup *parent;
+       struct shrinker_info *child_info, *parent_info;
+
+       parent = parent_mem_cgroup(memcg);
+       if (!parent)
+               parent = root_mem_cgroup;
+
+       /* Prevent from concurrent shrinker_info expand */
+       down_read(&shrinker_rwsem);
+       for_each_node(nid) {
+               child_info = shrinker_info_protected(memcg, nid);
+               parent_info = shrinker_info_protected(parent, nid);
+               for (i = 0; i < shrinker_nr_max; i++) {
+                       nr = atomic_long_read(&child_info->nr_deferred[i]);
+                       atomic_long_add(nr, &parent_info->nr_deferred[i]);
+               }
+       }
+       up_read(&shrinker_rwsem);
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
        return sc->target_mem_cgroup;
-- 
2.26.2

Reply via email to