If we are already under list_lock, don't call kmalloc(). Otherwise we
will run into deadlock because kmalloc() also tries to grab the same
lock.

Instead, allocate pages directly. Given currently page->objects has
15 bits, we only need 1 page. We may waste some memory but we only do
so when slub debug is on.

  WARNING: possible recursive locking detected
  --------------------------------------------
  mount-encrypted/4921 is trying to acquire lock:
  (&(&n->list_lock)->rlock){-.-.}, at: ___slab_alloc+0x104/0x437

  but task is already holding lock:
  (&(&n->list_lock)->rlock){-.-.}, at: __kmem_cache_shutdown+0x81/0x3cb

  other info that might help us debug this:
   Possible unsafe locking scenario:

         CPU0
         ----
    lock(&(&n->list_lock)->rlock);
    lock(&(&n->list_lock)->rlock);

   *** DEADLOCK ***

Signed-off-by: Yu Zhao <yuz...@google.com>
---
 mm/slub.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 8834563cdb4b..574a53ee31e1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3683,7 +3683,11 @@ static void list_slab_objects(struct kmem_cache *s, 
struct page *page,
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
        void *p;
-       unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
+       int order;
+       unsigned long *map;
+
+       order = get_order(DIV_ROUND_UP(page->objects, BITS_PER_BYTE));
+       map = (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
        if (!map)
                return;
        slab_err(s, page, text, s->name);
@@ -3698,7 +3702,7 @@ static void list_slab_objects(struct kmem_cache *s, 
struct page *page,
                }
        }
        slab_unlock(page);
-       bitmap_free(map);
+       free_pages((unsigned long)map, order);
 #endif
 }
 
-- 
2.23.0.187.g17f5b7556c-goog

Reply via email to