Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's 
nr_deferred
will be used in the following cases:
    1. Non memcg aware shrinkers
    2. !CONFIG_MEMCG
    3. memcg is disabled by boot parameter

Signed-off-by: Yang Shi <[email protected]>
---
 mm/vmscan.c | 87 ++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 73 insertions(+), 14 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index 20be0db291fe..e1f8960f5cf6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -205,7 +205,8 @@ static int expand_one_shrinker_info(struct mem_cgroup 
*memcg,
 
        for_each_node(nid) {
                old = rcu_dereference_protected(
-                       mem_cgroup_nodeinfo(memcg, nid)->shrinker_info, true);
+                       mem_cgroup_nodeinfo(memcg, nid)->shrinker_info,
+                       lockdep_is_held(&shrinker_rwsem));
                /* Not yet online memcg */
                if (!old)
                        return 0;
@@ -239,7 +240,8 @@ void free_shrinker_info(struct mem_cgroup *memcg)
 
        for_each_node(nid) {
                pn = mem_cgroup_nodeinfo(memcg, nid);
-               info = rcu_dereference_protected(pn->shrinker_info, true);
+               info = rcu_dereference_protected(pn->shrinker_info,
+                                                
lockdep_is_held(&shrinker_rwsem));
                if (info)
                        kvfree(info);
                rcu_assign_pointer(pn->shrinker_info, NULL);
@@ -360,6 +362,27 @@ static void unregister_memcg_shrinker(struct shrinker 
*shrinker)
        up_write(&shrinker_rwsem);
 }
 
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+                                   struct mem_cgroup *memcg)
+{
+       struct shrinker_info *info;
+
+       info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+                                        lockdep_is_held(&shrinker_rwsem));
+       return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+                                 struct mem_cgroup *memcg)
+{
+       struct shrinker_info *info;
+
+       info = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
+                                        lockdep_is_held(&shrinker_rwsem));
+
+       return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
        return sc->target_mem_cgroup;
@@ -398,6 +421,18 @@ static void unregister_memcg_shrinker(struct shrinker 
*shrinker)
 {
 }
 
+static long count_nr_deferred_memcg(int nid, struct shrinker *shrinker,
+                                   struct mem_cgroup *memcg)
+{
+       return 0;
+}
+
+static long set_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
+                                 struct mem_cgroup *memcg)
+{
+       return 0;
+}
+
 static bool cgroup_reclaim(struct scan_control *sc)
 {
        return false;
@@ -409,6 +444,39 @@ static bool writeback_throttling_sane(struct scan_control 
*sc)
 }
 #endif
 
+static long count_nr_deferred(struct shrinker *shrinker,
+                             struct shrink_control *sc)
+{
+       int nid = sc->nid;
+
+       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+               nid = 0;
+
+       if (sc->memcg &&
+           (shrinker->flags & SHRINKER_MEMCG_AWARE))
+               return count_nr_deferred_memcg(nid, shrinker,
+                                              sc->memcg);
+
+       return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+}
+
+
+static long set_nr_deferred(long nr, struct shrinker *shrinker,
+                           struct shrink_control *sc)
+{
+       int nid = sc->nid;
+
+       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
+               nid = 0;
+
+       if (sc->memcg &&
+           (shrinker->flags & SHRINKER_MEMCG_AWARE))
+               return set_nr_deferred_memcg(nr, nid, shrinker,
+                                            sc->memcg);
+
+       return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
+}
+
 /*
  * This misses isolated pages which are not accounted for to save counters.
  * As the data only determines if reclaim or compaction continues, it is
@@ -545,14 +613,10 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
        long freeable;
        long nr;
        long new_nr;
-       int nid = shrinkctl->nid;
        long batch_size = shrinker->batch ? shrinker->batch
                                          : SHRINK_BATCH;
        long scanned = 0, next_deferred;
 
-       if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
-               nid = 0;
-
        freeable = shrinker->count_objects(shrinker, shrinkctl);
        if (freeable == 0 || freeable == SHRINK_EMPTY)
                return freeable;
@@ -562,7 +626,7 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
         * and zero it so that other concurrent shrinker invocations
         * don't also do this scanning work.
         */
-       nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
+       nr = count_nr_deferred(shrinker, shrinkctl);
 
        total_scan = nr;
        if (shrinker->seeks) {
@@ -653,14 +717,9 @@ static unsigned long do_shrink_slab(struct shrink_control 
*shrinkctl,
                next_deferred = 0;
        /*
         * move the unused scan count back into the shrinker in a
-        * manner that handles concurrent updates. If we exhausted the
-        * scan, there is no need to do an update.
+        * manner that handles concurrent updates.
         */
-       if (next_deferred > 0)
-               new_nr = atomic_long_add_return(next_deferred,
-                                               &shrinker->nr_deferred[nid]);
-       else
-               new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+       new_nr = set_nr_deferred(next_deferred, shrinker, shrinkctl);
 
        trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, 
total_scan);
        return freed;
-- 
2.26.2

Reply via email to