Currently, putback_zspage does free zspage under class->lock
if fullness become ZS_EMPTY but it makes trouble to implement
locking scheme for new zspage migration.
So, this patch is to separate free_zspage from putback_zspage
and free zspage out of class->lock which is preparation for
zspage migration.

Cc: Sergey Senozhatsky <sergey.senozhat...@gmail.com>
Signed-off-by: Minchan Kim <minc...@kernel.org>
---
 mm/zsmalloc.c | 27 +++++++++++----------------
 1 file changed, 11 insertions(+), 16 deletions(-)

diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 4b5ead85c7e7..9522bca068de 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1684,14 +1684,12 @@ static struct zspage *isolate_zspage(struct size_class 
*class, bool source)
 
 /*
  * putback_zspage - add @zspage into right class's fullness list
- * @pool: target pool
  * @class: destination class
  * @zspage: target page
  *
  * Return @zspage's fullness_group
  */
-static enum fullness_group putback_zspage(struct zs_pool *pool,
-                       struct size_class *class,
+static enum fullness_group putback_zspage(struct size_class *class,
                        struct zspage *zspage)
 {
        enum fullness_group fullness;
@@ -1700,15 +1698,6 @@ static enum fullness_group putback_zspage(struct zs_pool 
*pool,
        insert_zspage(class, zspage, fullness);
        set_zspage_mapping(zspage, class->index, fullness);
 
-       if (fullness == ZS_EMPTY) {
-               zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                       class->size, class->pages_per_zspage));
-               atomic_long_sub(class->pages_per_zspage,
-                               &pool->pages_allocated);
-
-               free_zspage(pool, zspage);
-       }
-
        return fullness;
 }
 
@@ -1754,23 +1743,29 @@ static void __zs_compact(struct zs_pool *pool, struct 
size_class *class)
                        if (!migrate_zspage(pool, class, &cc))
                                break;
 
-                       putback_zspage(pool, class, dst_zspage);
+                       putback_zspage(class, dst_zspage);
                }
 
                /* Stop if we couldn't find slot */
                if (dst_zspage == NULL)
                        break;
 
-               putback_zspage(pool, class, dst_zspage);
-               if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY)
+               putback_zspage(class, dst_zspage);
+               if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+                       zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+                                       class->size, class->pages_per_zspage));
+                       atomic_long_sub(class->pages_per_zspage,
+                                       &pool->pages_allocated);
+                       free_zspage(pool, src_zspage);
                        pool->stats.pages_compacted += class->pages_per_zspage;
+               }
                spin_unlock(&class->lock);
                cond_resched();
                spin_lock(&class->lock);
        }
 
        if (src_zspage)
-               putback_zspage(pool, class, src_zspage);
+               putback_zspage(class, src_zspage);
 
        spin_unlock(&class->lock);
 }
-- 
1.9.1

Reply via email to