The shrinker map management is not really memcg specific, it's just allocation
and assignment of a structure, and the only memcg bit is the map is being stored
in a memcg structure.  So move the shrinker_maps handling code into vmscan.c for
tighter integration with shrinker code.  There is no functional change.

Signed-off-by: Yang Shi <shy828...@gmail.com>
---
 include/linux/memcontrol.h |   4 +-
 mm/memcontrol.c            | 124 ------------------------------------
 mm/vmscan.c                | 126 +++++++++++++++++++++++++++++++++++++
 3 files changed, 128 insertions(+), 126 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d827bd7f3bfe..d128d2842f22 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1581,8 +1581,8 @@ static inline bool 
mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
        return false;
 }
 
-extern int memcg_expand_shrinker_maps(int new_id);
-
+extern int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg);
+extern void memcg_free_shrinker_maps(struct mem_cgroup *memcg);
 extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
                                   int nid, int shrinker_id);
 #else
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 605f671203ef..817dde366258 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -397,130 +397,6 @@ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 EXPORT_SYMBOL(memcg_kmem_enabled_key);
 #endif
 
-static int memcg_shrinker_map_size;
-static DEFINE_MUTEX(memcg_shrinker_map_mutex);
-
-static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
-{
-       kvfree(container_of(head, struct memcg_shrinker_map, rcu));
-}
-
-static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
-                                        int size, int old_size)
-{
-       struct memcg_shrinker_map *new, *old;
-       int nid;
-
-       lockdep_assert_held(&memcg_shrinker_map_mutex);
-
-       for_each_node(nid) {
-               old = rcu_dereference_protected(
-                       mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
-               /* Not yet online memcg */
-               if (!old)
-                       return 0;
-
-               new = kvmalloc_node(sizeof(*new) + size, GFP_KERNEL, nid);
-               if (!new)
-                       return -ENOMEM;
-
-               /* Set all old bits, clear all new bits */
-               memset(new->map, (int)0xff, old_size);
-               memset((void *)new->map + old_size, 0, size - old_size);
-
-               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
-               call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
-       }
-
-       return 0;
-}
-
-static void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
-{
-       struct mem_cgroup_per_node *pn;
-       struct memcg_shrinker_map *map;
-       int nid;
-
-       if (mem_cgroup_is_root(memcg))
-               return;
-
-       for_each_node(nid) {
-               pn = mem_cgroup_nodeinfo(memcg, nid);
-               map = rcu_dereference_protected(pn->shrinker_map, true);
-               if (map)
-                       kvfree(map);
-               rcu_assign_pointer(pn->shrinker_map, NULL);
-       }
-}
-
-static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
-{
-       struct memcg_shrinker_map *map;
-       int nid, size, ret = 0;
-
-       if (mem_cgroup_is_root(memcg))
-               return 0;
-
-       mutex_lock(&memcg_shrinker_map_mutex);
-       size = memcg_shrinker_map_size;
-       for_each_node(nid) {
-               map = kvzalloc_node(sizeof(*map) + size, GFP_KERNEL, nid);
-               if (!map) {
-                       memcg_free_shrinker_maps(memcg);
-                       ret = -ENOMEM;
-                       break;
-               }
-               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
-       }
-       mutex_unlock(&memcg_shrinker_map_mutex);
-
-       return ret;
-}
-
-int memcg_expand_shrinker_maps(int new_id)
-{
-       int size, old_size, ret = 0;
-       struct mem_cgroup *memcg;
-
-       size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
-       old_size = memcg_shrinker_map_size;
-       if (size <= old_size)
-               return 0;
-
-       mutex_lock(&memcg_shrinker_map_mutex);
-       if (!root_mem_cgroup)
-               goto unlock;
-
-       for_each_mem_cgroup(memcg) {
-               if (mem_cgroup_is_root(memcg))
-                       continue;
-               ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
-               if (ret) {
-                       mem_cgroup_iter_break(NULL, memcg);
-                       goto unlock;
-               }
-       }
-unlock:
-       if (!ret)
-               memcg_shrinker_map_size = size;
-       mutex_unlock(&memcg_shrinker_map_mutex);
-       return ret;
-}
-
-void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
-{
-       if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
-               struct memcg_shrinker_map *map;
-
-               rcu_read_lock();
-               map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
-               /* Pairs with smp mb in shrink_slab() */
-               smp_mb__before_atomic();
-               set_bit(shrinker_id, map->map);
-               rcu_read_unlock();
-       }
-}
-
 /**
  * mem_cgroup_css_from_page - css of the memcg associated with a page
  * @page: page of interest
diff --git a/mm/vmscan.c b/mm/vmscan.c
index cb24ef952efc..9db7b4d6d0ae 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -185,6 +185,132 @@ static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
 #ifdef CONFIG_MEMCG
+
+static int memcg_shrinker_map_size;
+static DEFINE_MUTEX(memcg_shrinker_map_mutex);
+
+static void memcg_free_shrinker_map_rcu(struct rcu_head *head)
+{
+       kvfree(container_of(head, struct memcg_shrinker_map, rcu));
+}
+
+static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg,
+                                        int size, int old_size)
+{
+       struct memcg_shrinker_map *new, *old;
+       int nid;
+
+       lockdep_assert_held(&memcg_shrinker_map_mutex);
+
+       for_each_node(nid) {
+               old = rcu_dereference_protected(
+                       mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true);
+               /* Not yet online memcg */
+               if (!old)
+                       return 0;
+
+               new = kvmalloc(sizeof(*new) + size, GFP_KERNEL);
+               if (!new)
+                       return -ENOMEM;
+
+               /* Set all old bits, clear all new bits */
+               memset(new->map, (int)0xff, old_size);
+               memset((void *)new->map + old_size, 0, size - old_size);
+
+               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new);
+               call_rcu(&old->rcu, memcg_free_shrinker_map_rcu);
+       }
+
+       return 0;
+}
+
+void memcg_free_shrinker_maps(struct mem_cgroup *memcg)
+{
+       struct mem_cgroup_per_node *pn;
+       struct memcg_shrinker_map *map;
+       int nid;
+
+       if (mem_cgroup_is_root(memcg))
+               return;
+
+       for_each_node(nid) {
+               pn = mem_cgroup_nodeinfo(memcg, nid);
+               map = rcu_dereference_protected(pn->shrinker_map, true);
+               if (map)
+                       kvfree(map);
+               rcu_assign_pointer(pn->shrinker_map, NULL);
+       }
+}
+
+int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg)
+{
+       struct memcg_shrinker_map *map;
+       int nid, size, ret = 0;
+
+       if (mem_cgroup_is_root(memcg))
+               return 0;
+
+       mutex_lock(&memcg_shrinker_map_mutex);
+       size = memcg_shrinker_map_size;
+       for_each_node(nid) {
+               map = kvzalloc(sizeof(*map) + size, GFP_KERNEL);
+               if (!map) {
+                       memcg_free_shrinker_maps(memcg);
+                       ret = -ENOMEM;
+                       break;
+               }
+               rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map);
+       }
+       mutex_unlock(&memcg_shrinker_map_mutex);
+
+       return ret;
+}
+
+static int memcg_expand_shrinker_maps(int new_id)
+{
+       int size, old_size, ret = 0;
+       struct mem_cgroup *memcg;
+
+       size = DIV_ROUND_UP(new_id + 1, BITS_PER_LONG) * sizeof(unsigned long);
+       old_size = memcg_shrinker_map_size;
+       if (size <= old_size)
+               return 0;
+
+       mutex_lock(&memcg_shrinker_map_mutex);
+       if (!root_mem_cgroup)
+               goto unlock;
+
+       memcg = mem_cgroup_iter(NULL, NULL, NULL);
+       do {
+               if (mem_cgroup_is_root(memcg))
+                       continue;
+               ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
+               if (ret) {
+                       mem_cgroup_iter_break(NULL, memcg);
+                       goto unlock;
+               }
+       } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
+unlock:
+       if (!ret)
+               memcg_shrinker_map_size = size;
+       mutex_unlock(&memcg_shrinker_map_mutex);
+       return ret;
+}
+
+void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
+{
+       if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
+               struct memcg_shrinker_map *map;
+
+               rcu_read_lock();
+               map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map);
+               /* Pairs with smp mb in shrink_slab() */
+               smp_mb__before_atomic();
+               set_bit(shrinker_id, map->map);
+               rcu_read_unlock();
+       }
+}
+
 /*
  * We allow subsystems to populate their shrinker-related
  * LRU lists before register_shrinker_prepared() is called
-- 
2.26.2

Reply via email to