Running z3fold stress testing with address sanitization
showed zhdr->slots was being used after it was freed.

z3fold_free(z3fold_pool, handle)
  free_handle(handle)
    kmem_cache_free(pool->c_handle, zhdr->slots)
  release_z3fold_page_locked_list(kref)
    __release_z3fold_page(zhdr, true)
      zhdr_to_pool(zhdr)
        slots_to_pool(zhdr->slots)  *BOOM*

Instead we split free_handle into two functions, release_handle()
and free_slots(). We use release_handle() in place of free_handle(),
and use free_slots() to call kmem_cache_free() after
__release_z3fold_page() is done.

Fixes: 7c2b8baa61fe  ("mm/z3fold.c: add structure for buddy handles")
Signed-off-by: Henry Burns <henrybu...@google.com>
---
 mm/z3fold.c | 33 ++++++++++++++-------------------
 1 file changed, 14 insertions(+), 19 deletions(-)

diff --git a/mm/z3fold.c b/mm/z3fold.c
index f7993ff778df..e174d1549734 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -213,31 +213,24 @@ static inline struct z3fold_buddy_slots 
*handle_to_slots(unsigned long handle)
        return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
 }
 
-static inline void free_handle(unsigned long handle)
+static inline void release_handle(unsigned long handle)
 {
-       struct z3fold_buddy_slots *slots;
-       int i;
-       bool is_free;
-
        if (handle & (1 << PAGE_HEADLESS))
                return;
 
        WARN_ON(*(unsigned long *)handle == 0);
        *(unsigned long *)handle = 0;
-       slots = handle_to_slots(handle);
-       is_free = true;
-       for (i = 0; i <= BUDDY_MASK; i++) {
-               if (slots->slot[i]) {
-                       is_free = false;
-                       break;
-               }
-       }
+}
 
-       if (is_free) {
-               struct z3fold_pool *pool = slots_to_pool(slots);
+/* At this point all of the slots should be empty */
+static inline void free_slots(struct z3fold_buddy_slots *slots)
+{
+       struct z3fold_pool *pool = slots_to_pool(slots);
+       int i;
 
-               kmem_cache_free(pool->c_handle, slots);
-       }
+       for (i = 0; i <= BUDDY_MASK; i++)
+               VM_BUG_ON(slots->slot[i]);
+       kmem_cache_free(pool->c_handle, slots);
 }
 
 static struct dentry *z3fold_do_mount(struct file_system_type *fs_type,
@@ -431,7 +424,8 @@ static inline struct z3fold_pool *zhdr_to_pool(struct 
z3fold_header *zhdr)
 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
 {
        struct page *page = virt_to_page(zhdr);
-       struct z3fold_pool *pool = zhdr_to_pool(zhdr);
+       struct z3fold_buddy_slots *slots = zhdr->slots;
+       struct z3fold_pool *pool = slots_to_pool(slots);
 
        WARN_ON(!list_empty(&zhdr->buddy));
        set_bit(PAGE_STALE, &page->private);
@@ -442,6 +436,7 @@ static void __release_z3fold_page(struct z3fold_header 
*zhdr, bool locked)
        spin_unlock(&pool->lock);
        if (locked)
                z3fold_page_unlock(zhdr);
+       free_slots(slots);
        spin_lock(&pool->stale_lock);
        list_add(&zhdr->buddy, &pool->stale);
        queue_work(pool->release_wq, &pool->work);
@@ -1009,7 +1004,7 @@ static void z3fold_free(struct z3fold_pool *pool, 
unsigned long handle)
                return;
        }
 
-       free_handle(handle);
+       release_handle(handle);
        if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
                atomic64_dec(&pool->pages_nr);
                return;
-- 
2.22.0.410.gd8fdbe21b5-goog

Reply via email to