The patch introduces shrinker::id number, which is used to enumerate
memcg-aware shrinkers. The number start from 0, and the code tries
to maintain it as small as possible.

This will be used as to represent a memcg-aware shrinkers in memcg
shrinkers map.

Since all memcg-aware shrinkers are based on list_lru, which is per-memcg
in case of !CONFIG_MEMCG_KMEM only, the new functionality will be under
this config option.

Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
---
 include/linux/shrinker.h |    4 +++
 mm/vmscan.c              |   60 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 64 insertions(+)

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 6794490f25b2..7ca9c18cf130 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -66,6 +66,10 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
+#ifdef CONFIG_MEMCG_KMEM
+       /* ID in shrinker_idr */
+       int id;
+#endif
        /* objs pending delete, per node */
        atomic_long_t *nr_deferred;
 };
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 50055d72f294..3de12a9bdf85 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -169,6 +169,48 @@ unsigned long vm_total_pages;
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
+#ifdef CONFIG_MEMCG_KMEM
+static DEFINE_IDR(shrinker_idr);
+
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+       int id, ret;
+
+       shrinker->id = -1;
+       down_write(&shrinker_rwsem);
+       ret = id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
+       if (ret < 0)
+               goto unlock;
+       shrinker->id = id;
+       ret = 0;
+unlock:
+       up_write(&shrinker_rwsem);
+       return ret;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+       int id = shrinker->id;
+
+       if (id < 0)
+               return;
+
+       down_write(&shrinker_rwsem);
+       idr_remove(&shrinker_idr, id);
+       up_write(&shrinker_rwsem);
+       shrinker->id = -1;
+}
+#else /* CONFIG_MEMCG_KMEM */
+static int prealloc_memcg_shrinker(struct shrinker *shrinker)
+{
+       return 0;
+}
+
+static void unregister_memcg_shrinker(struct shrinker *shrinker)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
+
 #ifdef CONFIG_MEMCG
 static bool global_reclaim(struct scan_control *sc)
 {
@@ -306,6 +348,7 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum 
lru_list lru, int zone
 int prealloc_shrinker(struct shrinker *shrinker)
 {
        size_t size = sizeof(*shrinker->nr_deferred);
+       int ret;
 
        if (shrinker->flags & SHRINKER_NUMA_AWARE)
                size *= nr_node_ids;
@@ -313,11 +356,26 @@ int prealloc_shrinker(struct shrinker *shrinker)
        shrinker->nr_deferred = kzalloc(size, GFP_KERNEL);
        if (!shrinker->nr_deferred)
                return -ENOMEM;
+
+       if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
+               ret = prealloc_memcg_shrinker(shrinker);
+               if (ret)
+                       goto free_deferred;
+       }
+
        return 0;
+
+free_deferred:
+       kfree(shrinker->nr_deferred);
+       shrinker->nr_deferred = NULL;
+       return -ENOMEM;
 }
 
 void free_prealloced_shrinker(struct shrinker *shrinker)
 {
+       if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+               unregister_memcg_shrinker(shrinker);
+
        kfree(shrinker->nr_deferred);
        shrinker->nr_deferred = NULL;
 }
@@ -347,6 +405,8 @@ void unregister_shrinker(struct shrinker *shrinker)
 {
        if (!shrinker->nr_deferred)
                return;
+       if (shrinker->flags & SHRINKER_MEMCG_AWARE)
+               unregister_memcg_shrinker(shrinker);
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);

Reply via email to