From: Joonsoo Kim <iamjoonsoo....@lge.com>

Benefit of deduplication is dependent on the workload so it's not
preferable to always enable. Therefore, make it optional.

Signed-off-by: Joonsoo Kim <iamjoonsoo....@lge.com>
---
 drivers/block/zram/zram_drv.c | 80 ++++++++++++++++++++++++++++++++++++++-----
 drivers/block/zram/zram_drv.h |  1 +
 2 files changed, 73 insertions(+), 8 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 012425f..e45aa9f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -328,6 +328,39 @@ static ssize_t comp_algorithm_store(struct device *dev,
        return len;
 }
 
+static ssize_t use_dedup_show(struct device *dev,
+               struct device_attribute *attr, char *buf)
+{
+       int val;
+       struct zram *zram = dev_to_zram(dev);
+
+       down_read(&zram->init_lock);
+       val = zram->use_dedup;
+       up_read(&zram->init_lock);
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t use_dedup_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t len)
+{
+       int val;
+       struct zram *zram = dev_to_zram(dev);
+
+       if (kstrtoint(buf, 10, &val) || (val != 0 && val != 1))
+               return -EINVAL;
+
+       down_write(&zram->init_lock);
+       if (init_done(zram)) {
+               up_write(&zram->init_lock);
+               pr_info("Can't change dedup usage for initialized device\n");
+               return -EBUSY;
+       }
+       zram->use_dedup = val;
+       up_write(&zram->init_lock);
+       return len;
+}
+
 static ssize_t compact_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -422,11 +455,23 @@ static ssize_t debug_stat_show(struct device *dev,
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(debug_stat);
 
-static u32 zram_calc_checksum(unsigned char *mem)
+static u32 zram_calc_checksum(struct zram *zram, unsigned char *mem)
 {
+       if (!zram->use_dedup)
+               return 0;
+
        return jhash(mem, PAGE_SIZE, 0);
 }
 
+static unsigned long zram_entry_handle(struct zram *zram,
+                               struct zram_entry *entry)
+{
+       if (!zram->use_dedup)
+               return (unsigned long)entry;
+
+       return entry->handle;
+}
+
 static struct zram_entry *zram_entry_alloc(struct zram *zram,
                                        unsigned int len, gfp_t flags)
 {
@@ -438,6 +483,9 @@ static struct zram_entry *zram_entry_alloc(struct zram 
*zram,
        if (!handle)
                return NULL;
 
+       if (!zram->use_dedup)
+               return (struct zram_entry *)handle;
+
        entry = kzalloc(sizeof(*entry), flags);
        if (!entry) {
                zs_free(meta->mem_pool, handle);
@@ -462,6 +510,9 @@ static void zram_entry_insert(struct zram *zram, struct 
zram_entry *new,
        struct rb_node **rb_node, *parent = NULL;
        struct zram_entry *entry;
 
+       if (!zram->use_dedup)
+               return;
+
        new->checksum = checksum;
        hash = &meta->hash[checksum % meta->hash_size];
        rb_root = &hash->rb_root;
@@ -492,7 +543,8 @@ static bool zram_entry_match(struct zram *zram, struct 
zram_entry *entry,
        struct zram_meta *meta = zram->meta;
        struct zcomp_strm *zstrm;
 
-       cmem = zs_map_object(meta->mem_pool, entry->handle, ZS_MM_RO);
+       cmem = zs_map_object(meta->mem_pool,
+                       zram_entry_handle(zram, entry), ZS_MM_RO);
        if (entry->len == PAGE_SIZE) {
                match = !memcmp(mem, cmem, PAGE_SIZE);
        } else {
@@ -501,7 +553,7 @@ static bool zram_entry_match(struct zram *zram, struct 
zram_entry *entry,
                        match = !memcmp(mem, zstrm->buffer, PAGE_SIZE);
                zcomp_stream_put(zram->comp);
        }
-       zs_unmap_object(meta->mem_pool, entry->handle);
+       zs_unmap_object(meta->mem_pool, zram_entry_handle(zram, entry));
 
        return match;
 }
@@ -521,6 +573,11 @@ static bool zram_entry_put(struct zram *zram, struct 
zram_meta *meta,
        struct zram_hash *hash;
        u32 checksum;
 
+       if (!zram->use_dedup) {
+               zs_free(meta->mem_pool, zram_entry_handle(zram, entry));
+               return false;
+       }
+
        if (!populated)
                goto free;
 
@@ -551,6 +608,9 @@ static struct zram_entry *zram_entry_get(struct zram *zram,
        struct zram_entry *entry;
        struct rb_node *rb_node;
 
+       if (!zram->use_dedup)
+               return NULL;
+
        hash = &meta->hash[checksum % meta->hash_size];
 
        spin_lock(&hash->lock);
@@ -713,7 +773,8 @@ static int zram_decompress_page(struct zram *zram, char 
*mem, u32 index)
                return 0;
        }
 
-       cmem = zs_map_object(meta->mem_pool, entry->handle, ZS_MM_RO);
+       cmem = zs_map_object(meta->mem_pool,
+                       zram_entry_handle(zram, entry), ZS_MM_RO);
        if (size == PAGE_SIZE) {
                copy_page(mem, cmem);
        } else {
@@ -722,7 +783,7 @@ static int zram_decompress_page(struct zram *zram, char 
*mem, u32 index)
                ret = zcomp_decompress(zstrm, cmem, size, mem);
                zcomp_stream_put(zram->comp);
        }
-       zs_unmap_object(meta->mem_pool, entry->handle);
+       zs_unmap_object(meta->mem_pool, zram_entry_handle(zram, entry));
        bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
 
        /* Should NEVER happen. Return bio error if it does. */
@@ -840,7 +901,7 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,
                goto out;
        }
 
-       checksum = zram_calc_checksum(uncmem);
+       checksum = zram_calc_checksum(zram, uncmem);
        if (!entry) {
                entry = zram_entry_get(zram, uncmem, checksum);
                if (entry) {
@@ -915,7 +976,8 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,
                goto out;
        }
 
-       cmem = zs_map_object(meta->mem_pool, entry->handle, ZS_MM_WO);
+       cmem = zs_map_object(meta->mem_pool,
+                       zram_entry_handle(zram, entry), ZS_MM_WO);
 
        if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
                src = kmap_atomic(page);
@@ -927,7 +989,7 @@ static int zram_bvec_write(struct zram *zram, struct 
bio_vec *bvec, u32 index,
 
        zcomp_stream_put(zram->comp);
        zstrm = NULL;
-       zs_unmap_object(meta->mem_pool, entry->handle);
+       zs_unmap_object(meta->mem_pool, zram_entry_handle(zram, entry));
        zram_entry_insert(zram, entry, checksum);
 
 found_duplication:
@@ -1310,6 +1372,7 @@ static int zram_open(struct block_device *bdev, fmode_t 
mode)
 static DEVICE_ATTR_WO(mem_used_max);
 static DEVICE_ATTR_RW(max_comp_streams);
 static DEVICE_ATTR_RW(comp_algorithm);
+static DEVICE_ATTR_RW(use_dedup);
 
 static struct attribute *zram_disk_attrs[] = {
        &dev_attr_disksize.attr,
@@ -1320,6 +1383,7 @@ static int zram_open(struct block_device *bdev, fmode_t 
mode)
        &dev_attr_mem_used_max.attr,
        &dev_attr_max_comp_streams.attr,
        &dev_attr_comp_algorithm.attr,
+       &dev_attr_use_dedup.attr,
        &dev_attr_io_stat.attr,
        &dev_attr_mm_stat.attr,
        &dev_attr_debug_stat.attr,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 07d1f8d..b823555 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -141,5 +141,6 @@ struct zram {
         * zram is claimed so open request will be failed
         */
        bool claim; /* Protected by bdev->bd_mutex */
+       int use_dedup;
 };
 #endif
-- 
1.9.1

Reply via email to