If we are already under list_lock, don't call kmalloc(). Otherwise we
will run into deadlock because kmalloc() also tries to grab the same
lock.

Instead, statically allocate bitmap in struct kmem_cache_node. Given
currently page->objects has 15 bits, we bloat the per-node struct by
4K. So we waste some memory but only do so when slub debug is on.

  WARNING: possible recursive locking detected
  --------------------------------------------
  mount-encrypted/4921 is trying to acquire lock:
  (&(&n->list_lock)->rlock){-.-.}, at: ___slab_alloc+0x104/0x437

  but task is already holding lock:
  (&(&n->list_lock)->rlock){-.-.}, at: __kmem_cache_shutdown+0x81/0x3cb

  other info that might help us debug this:
   Possible unsafe locking scenario:

         CPU0
         ----
    lock(&(&n->list_lock)->rlock);
    lock(&(&n->list_lock)->rlock);

   *** DEADLOCK ***

Signed-off-by: Yu Zhao <yuz...@google.com>
---
 include/linux/slub_def.h |  4 ++++
 mm/slab.h                |  1 +
 mm/slub.c                | 44 ++++++++++++++--------------------------
 3 files changed, 20 insertions(+), 29 deletions(-)

diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index d2153789bd9f..719d43574360 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -9,6 +9,10 @@
  */
 #include <linux/kobject.h>
 
+#define OO_SHIFT       15
+#define OO_MASK                ((1 << OO_SHIFT) - 1)
+#define MAX_OBJS_PER_PAGE      32767 /* since page.objects is u15 */
+
 enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
        ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
diff --git a/mm/slab.h b/mm/slab.h
index 9057b8056b07..2d8639835db1 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -556,6 +556,7 @@ struct kmem_cache_node {
        atomic_long_t nr_slabs;
        atomic_long_t total_objects;
        struct list_head full;
+       unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
 #endif
 #endif
 
diff --git a/mm/slub.c b/mm/slub.c
index 62053ceb4464..f28072c9f2ce 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -187,10 +187,6 @@ static inline bool kmem_cache_has_cpu_partial(struct 
kmem_cache *s)
  */
 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
 
-#define OO_SHIFT       15
-#define OO_MASK                ((1 << OO_SHIFT) - 1)
-#define MAX_OBJS_PER_PAGE      32767 /* since page.objects is u15 */
-
 /* Internal SLUB flags */
 /* Poison object */
 #define __OBJECT_POISON                ((slab_flags_t __force)0x80000000U)
@@ -454,6 +450,8 @@ static void get_map(struct kmem_cache *s, struct page 
*page, unsigned long *map)
        void *p;
        void *addr = page_address(page);
 
+       bitmap_zero(map, page->objects);
+
        for (p = page->freelist; p; p = get_freepointer(s, p))
                set_bit(slab_index(p, s, addr), map);
 }
@@ -3680,14 +3678,12 @@ static int kmem_cache_open(struct kmem_cache *s, 
slab_flags_t flags)
 }
 
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
-                                                       const char *text)
+                             unsigned long *map, const char *text)
 {
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
        void *p;
-       unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
-       if (!map)
-               return;
+
        slab_err(s, page, text, s->name);
        slab_lock(page);
 
@@ -3699,8 +3695,8 @@ static void list_slab_objects(struct kmem_cache *s, 
struct page *page,
                        print_tracking(s, p);
                }
        }
+
        slab_unlock(page);
-       bitmap_free(map);
 #endif
 }
 
@@ -3721,7 +3717,7 @@ static void free_partial(struct kmem_cache *s, struct 
kmem_cache_node *n)
                        remove_partial(n, page);
                        list_add(&page->slab_list, &discard);
                } else {
-                       list_slab_objects(s, page,
+                       list_slab_objects(s, page, n->object_map,
                        "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
@@ -4397,7 +4393,6 @@ static int validate_slab(struct kmem_cache *s, struct 
page *page,
                return 0;
 
        /* Now we know that a valid freelist exists */
-       bitmap_zero(map, page->objects);
 
        get_map(s, page, map);
        for_each_object(p, s, addr, page->objects) {
@@ -4422,7 +4417,7 @@ static void validate_slab_slab(struct kmem_cache *s, 
struct page *page,
 }
 
 static int validate_slab_node(struct kmem_cache *s,
-               struct kmem_cache_node *n, unsigned long *map)
+               struct kmem_cache_node *n)
 {
        unsigned long count = 0;
        struct page *page;
@@ -4431,7 +4426,7 @@ static int validate_slab_node(struct kmem_cache *s,
        spin_lock_irqsave(&n->list_lock, flags);
 
        list_for_each_entry(page, &n->partial, slab_list) {
-               validate_slab_slab(s, page, map);
+               validate_slab_slab(s, page, n->object_map);
                count++;
        }
        if (count != n->nr_partial)
@@ -4442,7 +4437,7 @@ static int validate_slab_node(struct kmem_cache *s,
                goto out;
 
        list_for_each_entry(page, &n->full, slab_list) {
-               validate_slab_slab(s, page, map);
+               validate_slab_slab(s, page, n->object_map);
                count++;
        }
        if (count != atomic_long_read(&n->nr_slabs))
@@ -4459,15 +4454,11 @@ static long validate_slab_cache(struct kmem_cache *s)
        int node;
        unsigned long count = 0;
        struct kmem_cache_node *n;
-       unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-
-       if (!map)
-               return -ENOMEM;
 
        flush_all(s);
        for_each_kmem_cache_node(s, node, n)
-               count += validate_slab_node(s, n, map);
-       bitmap_free(map);
+               count += validate_slab_node(s, n);
+
        return count;
 }
 /*
@@ -4603,9 +4594,7 @@ static void process_slab(struct loc_track *t, struct 
kmem_cache *s,
        void *addr = page_address(page);
        void *p;
 
-       bitmap_zero(map, page->objects);
        get_map(s, page, map);
-
        for_each_object(p, s, addr, page->objects)
                if (!test_bit(slab_index(p, s, addr), map))
                        add_location(t, s, get_track(s, p, alloc));
@@ -4619,11 +4608,9 @@ static int list_locations(struct kmem_cache *s, char 
*buf,
        struct loc_track t = { 0, 0, NULL };
        int node;
        struct kmem_cache_node *n;
-       unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
 
-       if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
-                                    GFP_KERNEL)) {
-               bitmap_free(map);
+       if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
+                            GFP_KERNEL)) {
                return sprintf(buf, "Out of memory\n");
        }
        /* Push back cpu slabs */
@@ -4638,9 +4625,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
 
                spin_lock_irqsave(&n->list_lock, flags);
                list_for_each_entry(page, &n->partial, slab_list)
-                       process_slab(&t, s, page, alloc, map);
+                       process_slab(&t, s, page, alloc, n->object_map);
                list_for_each_entry(page, &n->full, slab_list)
-                       process_slab(&t, s, page, alloc, map);
+                       process_slab(&t, s, page, alloc, n->object_map);
                spin_unlock_irqrestore(&n->list_lock, flags);
        }
 
@@ -4689,7 +4676,6 @@ static int list_locations(struct kmem_cache *s, char *buf,
        }
 
        free_loc_track(&t);
-       bitmap_free(map);
        if (!t.count)
                len += sprintf(buf, "No data\n");
        return len;
-- 
2.23.0.162.g0b9fbb3734-goog

Reply via email to