Currently, putback_zspage does free zspage under class->lock
if fullness become ZS_EMPTY but it makes trouble to implement
locking scheme for new zspage migration.
So, this patch is to separate free_zspage from putback_zspage
and free zspage out of class->lock which is preparation for
zspage migration.

Signed-off-by: Minchan Kim <minc...@kernel.org>
---
 mm/zsmalloc.c | 46 +++++++++++++++++++++++-----------------------
 1 file changed, 23 insertions(+), 23 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index f86f8aaeb902..49ae6531b7ad 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -945,7 +945,8 @@ static void reset_page(struct page *page)
        page_mapcount_reset(page);
 }
 
-static void free_zspage(struct page *first_page)
+static void free_zspage(struct zs_pool *pool, struct size_class *class,
+                       struct page *first_page)
 {
        struct page *nextp, *tmp, *head_extra;
 
@@ -968,6 +969,11 @@ static void free_zspage(struct page *first_page)
        }
        reset_page(head_extra);
        __free_page(head_extra);
+
+       zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+                       class->size, class->pages_per_zspage));
+       atomic_long_sub(class->pages_per_zspage,
+                               &pool->pages_allocated);
 }
 
 /* Initialize a newly allocated zspage */
@@ -1557,13 +1563,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
        spin_lock(&class->lock);
        obj_free(class, obj);
        fullness = fix_fullness_group(class, first_page);
-       if (fullness == ZS_EMPTY) {
-               zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                               class->size, class->pages_per_zspage));
-               atomic_long_sub(class->pages_per_zspage,
-                               &pool->pages_allocated);
-               free_zspage(first_page);
-       }
+       if (fullness == ZS_EMPTY)
+               free_zspage(pool, class, first_page);
        spin_unlock(&class->lock);
        unpin_tag(handle);
 
@@ -1750,7 +1751,7 @@ static struct page *isolate_target_page(struct size_class 
*class)
  * @class: destination class
  * @first_page: target page
  *
- * Return @fist_page's fullness_group
+ * Return @first_page's updated fullness_group
  */
 static enum fullness_group putback_zspage(struct zs_pool *pool,
                        struct size_class *class,
@@ -1762,15 +1763,6 @@ static enum fullness_group putback_zspage(struct zs_pool 
*pool,
        insert_zspage(class, fullness, first_page);
        set_zspage_mapping(first_page, class->index, fullness);
 
-       if (fullness == ZS_EMPTY) {
-               zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                       class->size, class->pages_per_zspage));
-               atomic_long_sub(class->pages_per_zspage,
-                               &pool->pages_allocated);
-
-               free_zspage(first_page);
-       }
-
        return fullness;
 }
 
@@ -1833,23 +1825,31 @@ static void __zs_compact(struct zs_pool *pool, struct 
size_class *class)
                        if (!migrate_zspage(pool, class, &cc))
                                break;
 
-                       putback_zspage(pool, class, dst_page);
+                       VM_BUG_ON_PAGE(putback_zspage(pool, class,
+                               dst_page) == ZS_EMPTY, dst_page);
                }
 
                /* Stop if we couldn't find slot */
                if (dst_page == NULL)
                        break;
 
-               putback_zspage(pool, class, dst_page);
-               if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+               VM_BUG_ON_PAGE(putback_zspage(pool, class,
+                               dst_page) == ZS_EMPTY, dst_page);
+               if (putback_zspage(pool, class, src_page) == ZS_EMPTY) {
                        pool->stats.pages_compacted += class->pages_per_zspage;
-               spin_unlock(&class->lock);
+                       spin_unlock(&class->lock);
+                       free_zspage(pool, class, src_page);
+               } else {
+                       spin_unlock(&class->lock);
+               }
+
                cond_resched();
                spin_lock(&class->lock);
        }
 
        if (src_page)
-               putback_zspage(pool, class, src_page);
+               VM_BUG_ON_PAGE(putback_zspage(pool, class,
+                               src_page) == ZS_EMPTY, src_page);
 
        spin_unlock(&class->lock);
 }
-- 
1.9.1

Reply via email to