With this clean-up phase, I want to use zram's wrapper function
to lock table access which is more consistent with other zram's
functions.

Signed-off-by: Minchan Kim <minc...@kernel.org>
---
 drivers/block/zram/zram_drv.c | 42 ++++++++++++++++++++++++++++--------------
 1 file changed, 28 insertions(+), 14 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 7938f4b98b01..71b0a584bc85 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -415,24 +415,39 @@ static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(debug_stat);
 
+
+static void zram_slot_lock(struct zram *zram, u32 index)
+{
+       struct zram_meta *meta = zram->meta;
+
+       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+}
+
+static void zram_slot_unlock(struct zram *zram, u32 index)
+{
+       struct zram_meta *meta = zram->meta;
+
+       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+}
+
 static bool zram_special_page_read(struct zram *zram, u32 index,
                                struct page *page,
                                unsigned int offset, unsigned int len)
 {
        struct zram_meta *meta = zram->meta;
 
-       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_lock(zram, index);
        if (unlikely(!meta->table[index].handle) ||
                        zram_test_flag(meta, index, ZRAM_SAME)) {
                void *mem;
 
-               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+               zram_slot_unlock(zram, index);
                mem = kmap_atomic(page);
                zram_fill_page(mem + offset, len, meta->table[index].element);
                kunmap_atomic(mem);
                return true;
        }
-       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_unlock(zram, index);
 
        return false;
 }
@@ -448,11 +463,11 @@ static bool zram_special_page_write(struct zram *zram, 
u32 index,
 
                kunmap_atomic(mem);
                /* Free memory associated with this sector now. */
-               bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+               zram_slot_lock(zram, index);
                zram_free_page(zram, index);
                zram_set_flag(meta, index, ZRAM_SAME);
                zram_set_element(meta, index, element);
-               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+               zram_slot_unlock(zram, index);
 
                atomic64_inc(&zram->stats.same_pages);
                return true;
@@ -559,7 +574,7 @@ static int zram_decompress_page(struct zram *zram, struct 
page *page, u32 index)
        if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
                return 0;
 
-       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_lock(zram, index);
        handle = meta->table[index].handle;
        size = zram_get_obj_size(meta, index);
 
@@ -578,7 +593,7 @@ static int zram_decompress_page(struct zram *zram, struct 
page *page, u32 index)
                zcomp_stream_put(zram->comp);
        }
        zs_unmap_object(meta->mem_pool, handle);
-       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_unlock(zram, index);
 
        /* Should NEVER happen. Return bio error if it does. */
        if (unlikely(ret))
@@ -731,11 +746,11 @@ static int __zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec,
         * Free memory associated with this sector
         * before overwriting unused sectors.
         */
-       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_lock(zram, index);
        zram_free_page(zram, index);
        meta->table[index].handle = handle;
        zram_set_obj_size(meta, index, comp_len);
-       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_unlock(zram, index);
 
        /* Update stats */
        atomic64_add(comp_len, &zram->stats.compr_data_size);
@@ -793,7 +808,6 @@ static void zram_bio_discard(struct zram *zram, u32 index,
                             int offset, struct bio *bio)
 {
        size_t n = bio->bi_iter.bi_size;
-       struct zram_meta *meta = zram->meta;
 
        /*
         * zram manages data in physical block size units. Because logical block
@@ -814,9 +828,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
        }
 
        while (n >= PAGE_SIZE) {
-               bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+               zram_slot_lock(zram, index);
                zram_free_page(zram, index);
-               bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+               zram_slot_unlock(zram, index);
                atomic64_inc(&zram->stats.notify_free);
                index++;
                n -= PAGE_SIZE;
@@ -925,9 +939,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
        zram = bdev->bd_disk->private_data;
        meta = zram->meta;
 
-       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_lock(zram, index);
        zram_free_page(zram, index);
-       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+       zram_slot_unlock(zram, index);
        atomic64_inc(&zram->stats.notify_free);
 }
 
-- 
2.7.4

Reply via email to