The shrink_slab_memcg() races with mem_cgroup_css_online(). A visibility of 
CSS_ONLINE flag
in shrink_slab_memcg()->mem_cgroup_online() does not guarantee that we will see
memcg->nodeinfo[nid]->shrinker_maps != NULL.  This may occur because of 
processor reordering
on !x86.

This seems like the below case:

           CPU A          CPU B
store shrinker_map      load CSS_ONLINE
store CSS_ONLINE        load shrinker_map

So the memory ordering could be guaranteed by smp_wmb()/smp_rmb() pair.

The memory barriers pair will guarantee the ordering between shrinker_deferred 
and CSS_ONLINE
for the following patches as well.

Signed-off-by: Yang Shi <[email protected]>
---
 mm/memcontrol.c | 7 +++++++
 mm/vmscan.c     | 8 +++++---
 2 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ed942734235f..3d4ddbb84a01 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5406,6 +5406,13 @@ static int mem_cgroup_css_online(struct 
cgroup_subsys_state *css)
                return -ENOMEM;
        }
 
+       /*
+        * Barrier for CSS_ONLINE, so that shrink_slab_memcg() sees 
shirnker_maps
+        * and shrinker_deferred before CSS_ONLINE. It pairs with the read 
barrier
+        * in shrink_slab_memcg().
+        */
+       smp_wmb();
+
        /* Online state pins memcg ID, memcg ID pins CSS */
        refcount_set(&memcg->id.ref, 1);
        css_get(css);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 912c044301dd..9b31b9c419ec 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -552,13 +552,15 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, 
int nid,
        if (!mem_cgroup_online(memcg))
                return 0;
 
+       /* Pairs with write barrier in mem_cgroup_css_online() */
+       smp_rmb();
+
        if (!down_read_trylock(&shrinker_rwsem))
                return 0;
 
+       /* Once memcg is online it can't be NULL */
        map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map,
                                        true);
-       if (unlikely(!map))
-               goto unlock;
 
        for_each_set_bit(i, map->map, shrinker_nr_max) {
                struct shrink_control sc = {
@@ -612,7 +614,7 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int 
nid,
                        break;
                }
        }
-unlock:
+
        up_read(&shrinker_rwsem);
        return freed;
 }
-- 
2.26.2

Reply via email to