Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Minchan Kim
Hi Sergey,

On Tue, Apr 04, 2017 at 11:17:06AM +0900, Sergey Senozhatsky wrote:
> Hello,
> 
> On (04/03/17 14:17), Minchan Kim wrote:
> > +static bool zram_special_page_read(struct zram *zram, u32 index,
> > +   struct page *page,
> > +   unsigned int offset, unsigned int len)
> > +{
> > +   struct zram_meta *meta = zram->meta;
> > +
> > +   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> > +   if (unlikely(!meta->table[index].handle) ||
> > +   zram_test_flag(meta, index, ZRAM_SAME)) {
> > +   void *mem;
> > +
> > +   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > +   mem = kmap_atomic(page);
> > +   zram_fill_page(mem + offset, len, meta->table[index].element);
> > +   kunmap_atomic(mem);
> > +   return true;
> > +   }
> > +   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > +
> > +   return false;
> > +}
> > +
> > +static bool zram_special_page_write(struct zram *zram, u32 index,
> > +   struct page *page)
> > +{
> > +   unsigned long element;
> > +   void *mem = kmap_atomic(page);
> > +
> > +   if (page_same_filled(mem, )) {
> > +   struct zram_meta *meta = zram->meta;
> > +
> > +   kunmap_atomic(mem);
> > +   /* Free memory associated with this sector now. */
> > +   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> > +   zram_free_page(zram, index);
> > +   zram_set_flag(meta, index, ZRAM_SAME);
> > +   zram_set_element(meta, index, element);
> > +   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > +
> > +   atomic64_inc(>stats.same_pages);
> > +   return true;
> > +   }
> > +   kunmap_atomic(mem);
> > +
> > +   return false;
> > +}
> 
> zram_special_page_read() and zram_special_page_write() have a slightly
> different locking semantics.
> 
> zram_special_page_read() copy-out ZRAM_SAME page having slot unlocked
> (can the slot got overwritten in the meantime?), while

IMHO, yes, it can be overwritten but it doesn't make corruption of kernel.
I mean if such race happens, it's user fault who should protect the race.
zRAM is dumb block device so it can read/write block user request but
one thing we should keep the promise is it shouldn't corrupt the kernel.
Such pov, zram_special_page_read wouldn't be a problem to return
stale data, I think.

> zram_special_page_write() keeps the slot locked through out the entire
> operation.

zram_special_page_write is something different because it updates
zram_table's slot via zram_set_[flag|element] so it should be protected
by zram.

> 
> >  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
> >  {
> > size_t num_pages = disksize >> PAGE_SHIFT;
> > @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, 
> > size_t index)
> > zram_set_obj_size(meta, index, 0);
> >  }
> >  
> > -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> > +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> > index)
> >  {
> > -   int ret = 0;
> > -   unsigned char *cmem;
> > -   struct zram_meta *meta = zram->meta;
> > +   int ret;
> > unsigned long handle;
> > unsigned int size;
> > +   void *src, *dst;
> > +   struct zram_meta *meta = zram->meta;
> > +
> > +   if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> > +   return 0;
> >  
> > bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> > handle = meta->table[index].handle;
> > size = zram_get_obj_size(meta, index);
> >  
> > -   if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> > -   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > -   zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
> > -   return 0;
> > -   }
> > -
> > -   cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> > +   src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> > if (size == PAGE_SIZE) {
> > -   copy_page(mem, cmem);
> > +   dst = kmap_atomic(page);
> > +   copy_page(dst, src);
> > +   kunmap_atomic(dst);
> > +   ret = 0;
> > } else {
> > struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
> >  
> > -   ret = zcomp_decompress(zstrm, cmem, size, mem);
> > +   dst = kmap_atomic(page);
> > +   ret = zcomp_decompress(zstrm, src, size, dst);
> > +   kunmap_atomic(dst);
> > zcomp_stream_put(zram->comp);
> > }
> > zs_unmap_object(meta->mem_pool, handle);
> > bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> >  
> > /* Should NEVER happen. Return bio error if it does. */
> > -   if (unlikely(ret)) {
> > +   if (unlikely(ret))
> > pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
> > -   return ret;
> > -   }
> >  
> > -   return 0;
> > +   return ret;
> >  }
> >  
> >  static int 

Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Minchan Kim
Hi Sergey,

On Tue, Apr 04, 2017 at 11:17:06AM +0900, Sergey Senozhatsky wrote:
> Hello,
> 
> On (04/03/17 14:17), Minchan Kim wrote:
> > +static bool zram_special_page_read(struct zram *zram, u32 index,
> > +   struct page *page,
> > +   unsigned int offset, unsigned int len)
> > +{
> > +   struct zram_meta *meta = zram->meta;
> > +
> > +   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> > +   if (unlikely(!meta->table[index].handle) ||
> > +   zram_test_flag(meta, index, ZRAM_SAME)) {
> > +   void *mem;
> > +
> > +   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > +   mem = kmap_atomic(page);
> > +   zram_fill_page(mem + offset, len, meta->table[index].element);
> > +   kunmap_atomic(mem);
> > +   return true;
> > +   }
> > +   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > +
> > +   return false;
> > +}
> > +
> > +static bool zram_special_page_write(struct zram *zram, u32 index,
> > +   struct page *page)
> > +{
> > +   unsigned long element;
> > +   void *mem = kmap_atomic(page);
> > +
> > +   if (page_same_filled(mem, )) {
> > +   struct zram_meta *meta = zram->meta;
> > +
> > +   kunmap_atomic(mem);
> > +   /* Free memory associated with this sector now. */
> > +   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> > +   zram_free_page(zram, index);
> > +   zram_set_flag(meta, index, ZRAM_SAME);
> > +   zram_set_element(meta, index, element);
> > +   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > +
> > +   atomic64_inc(>stats.same_pages);
> > +   return true;
> > +   }
> > +   kunmap_atomic(mem);
> > +
> > +   return false;
> > +}
> 
> zram_special_page_read() and zram_special_page_write() have a slightly
> different locking semantics.
> 
> zram_special_page_read() copy-out ZRAM_SAME page having slot unlocked
> (can the slot got overwritten in the meantime?), while

IMHO, yes, it can be overwritten but it doesn't make corruption of kernel.
I mean if such race happens, it's user fault who should protect the race.
zRAM is dumb block device so it can read/write block user request but
one thing we should keep the promise is it shouldn't corrupt the kernel.
Such pov, zram_special_page_read wouldn't be a problem to return
stale data, I think.

> zram_special_page_write() keeps the slot locked through out the entire
> operation.

zram_special_page_write is something different because it updates
zram_table's slot via zram_set_[flag|element] so it should be protected
by zram.

> 
> >  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
> >  {
> > size_t num_pages = disksize >> PAGE_SHIFT;
> > @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, 
> > size_t index)
> > zram_set_obj_size(meta, index, 0);
> >  }
> >  
> > -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> > +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> > index)
> >  {
> > -   int ret = 0;
> > -   unsigned char *cmem;
> > -   struct zram_meta *meta = zram->meta;
> > +   int ret;
> > unsigned long handle;
> > unsigned int size;
> > +   void *src, *dst;
> > +   struct zram_meta *meta = zram->meta;
> > +
> > +   if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> > +   return 0;
> >  
> > bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> > handle = meta->table[index].handle;
> > size = zram_get_obj_size(meta, index);
> >  
> > -   if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> > -   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> > -   zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
> > -   return 0;
> > -   }
> > -
> > -   cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> > +   src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> > if (size == PAGE_SIZE) {
> > -   copy_page(mem, cmem);
> > +   dst = kmap_atomic(page);
> > +   copy_page(dst, src);
> > +   kunmap_atomic(dst);
> > +   ret = 0;
> > } else {
> > struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
> >  
> > -   ret = zcomp_decompress(zstrm, cmem, size, mem);
> > +   dst = kmap_atomic(page);
> > +   ret = zcomp_decompress(zstrm, src, size, dst);
> > +   kunmap_atomic(dst);
> > zcomp_stream_put(zram->comp);
> > }
> > zs_unmap_object(meta->mem_pool, handle);
> > bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> >  
> > /* Should NEVER happen. Return bio error if it does. */
> > -   if (unlikely(ret)) {
> > +   if (unlikely(ret))
> > pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
> > -   return ret;
> > -   }
> >  
> > -   return 0;
> > +   return ret;
> >  }
> >  
> >  static int 

Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Sergey Senozhatsky
Hello,

On (04/03/17 14:17), Minchan Kim wrote:
> +static bool zram_special_page_read(struct zram *zram, u32 index,
> + struct page *page,
> + unsigned int offset, unsigned int len)
> +{
> + struct zram_meta *meta = zram->meta;
> +
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + if (unlikely(!meta->table[index].handle) ||
> + zram_test_flag(meta, index, ZRAM_SAME)) {
> + void *mem;
> +
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> + mem = kmap_atomic(page);
> + zram_fill_page(mem + offset, len, meta->table[index].element);
> + kunmap_atomic(mem);
> + return true;
> + }
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + return false;
> +}
> +
> +static bool zram_special_page_write(struct zram *zram, u32 index,
> + struct page *page)
> +{
> + unsigned long element;
> + void *mem = kmap_atomic(page);
> +
> + if (page_same_filled(mem, )) {
> + struct zram_meta *meta = zram->meta;
> +
> + kunmap_atomic(mem);
> + /* Free memory associated with this sector now. */
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + zram_free_page(zram, index);
> + zram_set_flag(meta, index, ZRAM_SAME);
> + zram_set_element(meta, index, element);
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + atomic64_inc(>stats.same_pages);
> + return true;
> + }
> + kunmap_atomic(mem);
> +
> + return false;
> +}

zram_special_page_read() and zram_special_page_write() have a slightly
different locking semantics.

zram_special_page_read() copy-out ZRAM_SAME page having slot unlocked
(can the slot got overwritten in the meantime?), while
zram_special_page_write() keeps the slot locked through out the entire
operation.

>  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>  {
>   size_t num_pages = disksize >> PAGE_SHIFT;
> @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
> index)
>   zram_set_obj_size(meta, index, 0);
>  }
>  
> -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> index)
>  {
> - int ret = 0;
> - unsigned char *cmem;
> - struct zram_meta *meta = zram->meta;
> + int ret;
>   unsigned long handle;
>   unsigned int size;
> + void *src, *dst;
> + struct zram_meta *meta = zram->meta;
> +
> + if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> + return 0;
>  
>   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
>   handle = meta->table[index].handle;
>   size = zram_get_obj_size(meta, index);
>  
> - if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> - bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> - zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
> - return 0;
> - }
> -
> - cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> + src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
>   if (size == PAGE_SIZE) {
> - copy_page(mem, cmem);
> + dst = kmap_atomic(page);
> + copy_page(dst, src);
> + kunmap_atomic(dst);
> + ret = 0;
>   } else {
>   struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
>  
> - ret = zcomp_decompress(zstrm, cmem, size, mem);
> + dst = kmap_atomic(page);
> + ret = zcomp_decompress(zstrm, src, size, dst);
> + kunmap_atomic(dst);
>   zcomp_stream_put(zram->comp);
>   }
>   zs_unmap_object(meta->mem_pool, handle);
>   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
>  
>   /* Should NEVER happen. Return bio error if it does. */
> - if (unlikely(ret)) {
> + if (unlikely(ret))
>   pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
> - return ret;
> - }
>  
> - return 0;
> + return ret;
>  }
>  
>  static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> -   u32 index, int offset)
> + u32 index, int offset)
>  {
>   int ret;
>   struct page *page;
> - unsigned char *user_mem, *uncmem = NULL;
> - struct zram_meta *meta = zram->meta;
> - page = bvec->bv_page;
>  
> - bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> - if (unlikely(!meta->table[index].handle) ||
> - zram_test_flag(meta, index, ZRAM_SAME)) {
> - bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> - handle_same_page(bvec, meta->table[index].element);
> + page = bvec->bv_page;
> + if 

Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Sergey Senozhatsky
Hello,

On (04/03/17 14:17), Minchan Kim wrote:
> +static bool zram_special_page_read(struct zram *zram, u32 index,
> + struct page *page,
> + unsigned int offset, unsigned int len)
> +{
> + struct zram_meta *meta = zram->meta;
> +
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + if (unlikely(!meta->table[index].handle) ||
> + zram_test_flag(meta, index, ZRAM_SAME)) {
> + void *mem;
> +
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> + mem = kmap_atomic(page);
> + zram_fill_page(mem + offset, len, meta->table[index].element);
> + kunmap_atomic(mem);
> + return true;
> + }
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + return false;
> +}
> +
> +static bool zram_special_page_write(struct zram *zram, u32 index,
> + struct page *page)
> +{
> + unsigned long element;
> + void *mem = kmap_atomic(page);
> +
> + if (page_same_filled(mem, )) {
> + struct zram_meta *meta = zram->meta;
> +
> + kunmap_atomic(mem);
> + /* Free memory associated with this sector now. */
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + zram_free_page(zram, index);
> + zram_set_flag(meta, index, ZRAM_SAME);
> + zram_set_element(meta, index, element);
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + atomic64_inc(>stats.same_pages);
> + return true;
> + }
> + kunmap_atomic(mem);
> +
> + return false;
> +}

zram_special_page_read() and zram_special_page_write() have a slightly
different locking semantics.

zram_special_page_read() copy-out ZRAM_SAME page having slot unlocked
(can the slot got overwritten in the meantime?), while
zram_special_page_write() keeps the slot locked through out the entire
operation.

>  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>  {
>   size_t num_pages = disksize >> PAGE_SHIFT;
> @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
> index)
>   zram_set_obj_size(meta, index, 0);
>  }
>  
> -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> index)
>  {
> - int ret = 0;
> - unsigned char *cmem;
> - struct zram_meta *meta = zram->meta;
> + int ret;
>   unsigned long handle;
>   unsigned int size;
> + void *src, *dst;
> + struct zram_meta *meta = zram->meta;
> +
> + if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> + return 0;
>  
>   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
>   handle = meta->table[index].handle;
>   size = zram_get_obj_size(meta, index);
>  
> - if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> - bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> - zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
> - return 0;
> - }
> -
> - cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> + src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
>   if (size == PAGE_SIZE) {
> - copy_page(mem, cmem);
> + dst = kmap_atomic(page);
> + copy_page(dst, src);
> + kunmap_atomic(dst);
> + ret = 0;
>   } else {
>   struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
>  
> - ret = zcomp_decompress(zstrm, cmem, size, mem);
> + dst = kmap_atomic(page);
> + ret = zcomp_decompress(zstrm, src, size, dst);
> + kunmap_atomic(dst);
>   zcomp_stream_put(zram->comp);
>   }
>   zs_unmap_object(meta->mem_pool, handle);
>   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
>  
>   /* Should NEVER happen. Return bio error if it does. */
> - if (unlikely(ret)) {
> + if (unlikely(ret))
>   pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
> - return ret;
> - }
>  
> - return 0;
> + return ret;
>  }
>  
>  static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> -   u32 index, int offset)
> + u32 index, int offset)
>  {
>   int ret;
>   struct page *page;
> - unsigned char *user_mem, *uncmem = NULL;
> - struct zram_meta *meta = zram->meta;
> - page = bvec->bv_page;
>  
> - bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> - if (unlikely(!meta->table[index].handle) ||
> - zram_test_flag(meta, index, ZRAM_SAME)) {
> - bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> - handle_same_page(bvec, meta->table[index].element);
> + page = bvec->bv_page;
> + if 

Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Mika Penttilä
On 04/03/2017 09:12 AM, Minchan Kim wrote:
> Hi Mika,
> 
> On Mon, Apr 03, 2017 at 08:52:33AM +0300, Mika Penttilä wrote:
>>
>> Hi!
>>
>> On 04/03/2017 08:17 AM, Minchan Kim wrote:
>>> For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
>>> However, the mixed code for handling normal/partial IO is too mess,
>>> error-prone to modify IO handler functions with upcoming feature
>>> so this patch aims for cleaning up zram's IO handling functions.
>>>
>>> Signed-off-by: Minchan Kim 
>>> ---
>>>  drivers/block/zram/zram_drv.c | 333 
>>> +++---
>>>  1 file changed, 184 insertions(+), 149 deletions(-)
>>>
>>> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
>>> index 28c2836f8c96..7938f4b98b01 100644
>>> --- a/drivers/block/zram/zram_drv.c
>>> +++ b/drivers/block/zram/zram_drv.c
>>> @@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
>>>  /* Module params (documentation at end) */
>>>  static unsigned int num_devices = 1;
>>>  
>>> +static void zram_free_page(struct zram *zram, size_t index);
>>> +
>>>  static inline bool init_done(struct zram *zram)
>>>  {
>>> return zram->disksize;
>>> @@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
>>> meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
>>>  }
>>>  
>>> +#if PAGE_SIZE != 4096
>>>  static inline bool is_partial_io(struct bio_vec *bvec)
>>>  {
>>> return bvec->bv_len != PAGE_SIZE;
>>>  }
>>> +#else
>>
>> For page size of 4096 bv_len can still be < 4096 and partial pages should be 
>> supported 
>> (uncompress before write etc). ? 
> 
> zram declares this.
> 
> #define ZRAM_LOGICAL_BLOCK_SIZE (1<<12)
> 
>   blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
>   blk_queue_logical_block_size(zram->disk->queue,
>   ZRAM_LOGICAL_BLOCK_SIZE);
> 
> So, I thought there is no such partial IO in 4096 page architecture.
> Am I missing something? Could you tell the scenario if it happens?

I think you're right. At least swap operates with min 4096 sizes.

> 
> Thanks!
> 



Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Mika Penttilä
On 04/03/2017 09:12 AM, Minchan Kim wrote:
> Hi Mika,
> 
> On Mon, Apr 03, 2017 at 08:52:33AM +0300, Mika Penttilä wrote:
>>
>> Hi!
>>
>> On 04/03/2017 08:17 AM, Minchan Kim wrote:
>>> For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
>>> However, the mixed code for handling normal/partial IO is too mess,
>>> error-prone to modify IO handler functions with upcoming feature
>>> so this patch aims for cleaning up zram's IO handling functions.
>>>
>>> Signed-off-by: Minchan Kim 
>>> ---
>>>  drivers/block/zram/zram_drv.c | 333 
>>> +++---
>>>  1 file changed, 184 insertions(+), 149 deletions(-)
>>>
>>> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
>>> index 28c2836f8c96..7938f4b98b01 100644
>>> --- a/drivers/block/zram/zram_drv.c
>>> +++ b/drivers/block/zram/zram_drv.c
>>> @@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
>>>  /* Module params (documentation at end) */
>>>  static unsigned int num_devices = 1;
>>>  
>>> +static void zram_free_page(struct zram *zram, size_t index);
>>> +
>>>  static inline bool init_done(struct zram *zram)
>>>  {
>>> return zram->disksize;
>>> @@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
>>> meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
>>>  }
>>>  
>>> +#if PAGE_SIZE != 4096
>>>  static inline bool is_partial_io(struct bio_vec *bvec)
>>>  {
>>> return bvec->bv_len != PAGE_SIZE;
>>>  }
>>> +#else
>>
>> For page size of 4096 bv_len can still be < 4096 and partial pages should be 
>> supported 
>> (uncompress before write etc). ? 
> 
> zram declares this.
> 
> #define ZRAM_LOGICAL_BLOCK_SIZE (1<<12)
> 
>   blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
>   blk_queue_logical_block_size(zram->disk->queue,
>   ZRAM_LOGICAL_BLOCK_SIZE);
> 
> So, I thought there is no such partial IO in 4096 page architecture.
> Am I missing something? Could you tell the scenario if it happens?

I think you're right. At least swap operates with min 4096 sizes.

> 
> Thanks!
> 



Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Minchan Kim
Hi Mika,

On Mon, Apr 03, 2017 at 08:52:33AM +0300, Mika Penttilä wrote:
> 
> Hi!
> 
> On 04/03/2017 08:17 AM, Minchan Kim wrote:
> > For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
> > However, the mixed code for handling normal/partial IO is too mess,
> > error-prone to modify IO handler functions with upcoming feature
> > so this patch aims for cleaning up zram's IO handling functions.
> > 
> > Signed-off-by: Minchan Kim 
> > ---
> >  drivers/block/zram/zram_drv.c | 333 
> > +++---
> >  1 file changed, 184 insertions(+), 149 deletions(-)
> > 
> > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> > index 28c2836f8c96..7938f4b98b01 100644
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
> >  /* Module params (documentation at end) */
> >  static unsigned int num_devices = 1;
> >  
> > +static void zram_free_page(struct zram *zram, size_t index);
> > +
> >  static inline bool init_done(struct zram *zram)
> >  {
> > return zram->disksize;
> > @@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
> > meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
> >  }
> >  
> > +#if PAGE_SIZE != 4096
> >  static inline bool is_partial_io(struct bio_vec *bvec)
> >  {
> > return bvec->bv_len != PAGE_SIZE;
> >  }
> > +#else
> 
> For page size of 4096 bv_len can still be < 4096 and partial pages should be 
> supported 
> (uncompress before write etc). ? 

zram declares this.

#define ZRAM_LOGICAL_BLOCK_SIZE (1<<12)

blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
blk_queue_logical_block_size(zram->disk->queue,
ZRAM_LOGICAL_BLOCK_SIZE);

So, I thought there is no such partial IO in 4096 page architecture.
Am I missing something? Could you tell the scenario if it happens?

Thanks!



Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-03 Thread Minchan Kim
Hi Mika,

On Mon, Apr 03, 2017 at 08:52:33AM +0300, Mika Penttilä wrote:
> 
> Hi!
> 
> On 04/03/2017 08:17 AM, Minchan Kim wrote:
> > For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
> > However, the mixed code for handling normal/partial IO is too mess,
> > error-prone to modify IO handler functions with upcoming feature
> > so this patch aims for cleaning up zram's IO handling functions.
> > 
> > Signed-off-by: Minchan Kim 
> > ---
> >  drivers/block/zram/zram_drv.c | 333 
> > +++---
> >  1 file changed, 184 insertions(+), 149 deletions(-)
> > 
> > diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> > index 28c2836f8c96..7938f4b98b01 100644
> > --- a/drivers/block/zram/zram_drv.c
> > +++ b/drivers/block/zram/zram_drv.c
> > @@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
> >  /* Module params (documentation at end) */
> >  static unsigned int num_devices = 1;
> >  
> > +static void zram_free_page(struct zram *zram, size_t index);
> > +
> >  static inline bool init_done(struct zram *zram)
> >  {
> > return zram->disksize;
> > @@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
> > meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
> >  }
> >  
> > +#if PAGE_SIZE != 4096
> >  static inline bool is_partial_io(struct bio_vec *bvec)
> >  {
> > return bvec->bv_len != PAGE_SIZE;
> >  }
> > +#else
> 
> For page size of 4096 bv_len can still be < 4096 and partial pages should be 
> supported 
> (uncompress before write etc). ? 

zram declares this.

#define ZRAM_LOGICAL_BLOCK_SIZE (1<<12)

blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
blk_queue_logical_block_size(zram->disk->queue,
ZRAM_LOGICAL_BLOCK_SIZE);

So, I thought there is no such partial IO in 4096 page architecture.
Am I missing something? Could you tell the scenario if it happens?

Thanks!



Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-02 Thread Mika Penttilä

Hi!

On 04/03/2017 08:17 AM, Minchan Kim wrote:
> For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
> However, the mixed code for handling normal/partial IO is too mess,
> error-prone to modify IO handler functions with upcoming feature
> so this patch aims for cleaning up zram's IO handling functions.
> 
> Signed-off-by: Minchan Kim 
> ---
>  drivers/block/zram/zram_drv.c | 333 
> +++---
>  1 file changed, 184 insertions(+), 149 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 28c2836f8c96..7938f4b98b01 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
>  /* Module params (documentation at end) */
>  static unsigned int num_devices = 1;
>  
> +static void zram_free_page(struct zram *zram, size_t index);
> +
>  static inline bool init_done(struct zram *zram)
>  {
>   return zram->disksize;
> @@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
>   meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
>  }
>  
> +#if PAGE_SIZE != 4096
>  static inline bool is_partial_io(struct bio_vec *bvec)
>  {
>   return bvec->bv_len != PAGE_SIZE;
>  }
> +#else

For page size of 4096 bv_len can still be < 4096 and partial pages should be 
supported 
(uncompress before write etc). ? 

> +static inline bool is_partial_io(struct bio_vec *bvec)
> +{
> + return false;
> +}
> +#endif
>  
>  static void zram_revalidate_disk(struct zram *zram)
>  {
> @@ -191,18 +200,6 @@ static bool page_same_filled(void *ptr, unsigned long 
> *element)
>   return true;
>  }
>  
> -static void handle_same_page(struct bio_vec *bvec, unsigned long element)
> -{
> - struct page *page = bvec->bv_page;
> - void *user_mem;
> -
> - user_mem = kmap_atomic(page);
> - zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
> - kunmap_atomic(user_mem);
> -
> - flush_dcache_page(page);
> -}
> -
>  static ssize_t initstate_show(struct device *dev,
>   struct device_attribute *attr, char *buf)
>  {
> @@ -418,6 +415,53 @@ static DEVICE_ATTR_RO(io_stat);
>  static DEVICE_ATTR_RO(mm_stat);
>  static DEVICE_ATTR_RO(debug_stat);
>  
> +static bool zram_special_page_read(struct zram *zram, u32 index,
> + struct page *page,
> + unsigned int offset, unsigned int len)
> +{
> + struct zram_meta *meta = zram->meta;
> +
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + if (unlikely(!meta->table[index].handle) ||
> + zram_test_flag(meta, index, ZRAM_SAME)) {
> + void *mem;
> +
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> + mem = kmap_atomic(page);
> + zram_fill_page(mem + offset, len, meta->table[index].element);
> + kunmap_atomic(mem);
> + return true;
> + }
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + return false;
> +}
> +
> +static bool zram_special_page_write(struct zram *zram, u32 index,
> + struct page *page)
> +{
> + unsigned long element;
> + void *mem = kmap_atomic(page);
> +
> + if (page_same_filled(mem, )) {
> + struct zram_meta *meta = zram->meta;
> +
> + kunmap_atomic(mem);
> + /* Free memory associated with this sector now. */
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + zram_free_page(zram, index);
> + zram_set_flag(meta, index, ZRAM_SAME);
> + zram_set_element(meta, index, element);
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + atomic64_inc(>stats.same_pages);
> + return true;
> + }
> + kunmap_atomic(mem);
> +
> + return false;
> +}
> +
>  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>  {
>   size_t num_pages = disksize >> PAGE_SHIFT;
> @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
> index)
>   zram_set_obj_size(meta, index, 0);
>  }
>  
> -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> index)
>  {
> - int ret = 0;
> - unsigned char *cmem;
> - struct zram_meta *meta = zram->meta;
> + int ret;
>   unsigned long handle;
>   unsigned int size;
> + void *src, *dst;
> + struct zram_meta *meta = zram->meta;
> +
> + if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> + return 0;
>  
>   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
>   handle = meta->table[index].handle;
>   size = zram_get_obj_size(meta, index);
>  
> - if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> - 

Re: [PATCH 2/5] zram: partial IO refactoring

2017-04-02 Thread Mika Penttilä

Hi!

On 04/03/2017 08:17 AM, Minchan Kim wrote:
> For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
> However, the mixed code for handling normal/partial IO is too mess,
> error-prone to modify IO handler functions with upcoming feature
> so this patch aims for cleaning up zram's IO handling functions.
> 
> Signed-off-by: Minchan Kim 
> ---
>  drivers/block/zram/zram_drv.c | 333 
> +++---
>  1 file changed, 184 insertions(+), 149 deletions(-)
> 
> diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
> index 28c2836f8c96..7938f4b98b01 100644
> --- a/drivers/block/zram/zram_drv.c
> +++ b/drivers/block/zram/zram_drv.c
> @@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
>  /* Module params (documentation at end) */
>  static unsigned int num_devices = 1;
>  
> +static void zram_free_page(struct zram *zram, size_t index);
> +
>  static inline bool init_done(struct zram *zram)
>  {
>   return zram->disksize;
> @@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
>   meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
>  }
>  
> +#if PAGE_SIZE != 4096
>  static inline bool is_partial_io(struct bio_vec *bvec)
>  {
>   return bvec->bv_len != PAGE_SIZE;
>  }
> +#else

For page size of 4096 bv_len can still be < 4096 and partial pages should be 
supported 
(uncompress before write etc). ? 

> +static inline bool is_partial_io(struct bio_vec *bvec)
> +{
> + return false;
> +}
> +#endif
>  
>  static void zram_revalidate_disk(struct zram *zram)
>  {
> @@ -191,18 +200,6 @@ static bool page_same_filled(void *ptr, unsigned long 
> *element)
>   return true;
>  }
>  
> -static void handle_same_page(struct bio_vec *bvec, unsigned long element)
> -{
> - struct page *page = bvec->bv_page;
> - void *user_mem;
> -
> - user_mem = kmap_atomic(page);
> - zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
> - kunmap_atomic(user_mem);
> -
> - flush_dcache_page(page);
> -}
> -
>  static ssize_t initstate_show(struct device *dev,
>   struct device_attribute *attr, char *buf)
>  {
> @@ -418,6 +415,53 @@ static DEVICE_ATTR_RO(io_stat);
>  static DEVICE_ATTR_RO(mm_stat);
>  static DEVICE_ATTR_RO(debug_stat);
>  
> +static bool zram_special_page_read(struct zram *zram, u32 index,
> + struct page *page,
> + unsigned int offset, unsigned int len)
> +{
> + struct zram_meta *meta = zram->meta;
> +
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + if (unlikely(!meta->table[index].handle) ||
> + zram_test_flag(meta, index, ZRAM_SAME)) {
> + void *mem;
> +
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> + mem = kmap_atomic(page);
> + zram_fill_page(mem + offset, len, meta->table[index].element);
> + kunmap_atomic(mem);
> + return true;
> + }
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + return false;
> +}
> +
> +static bool zram_special_page_write(struct zram *zram, u32 index,
> + struct page *page)
> +{
> + unsigned long element;
> + void *mem = kmap_atomic(page);
> +
> + if (page_same_filled(mem, )) {
> + struct zram_meta *meta = zram->meta;
> +
> + kunmap_atomic(mem);
> + /* Free memory associated with this sector now. */
> + bit_spin_lock(ZRAM_ACCESS, >table[index].value);
> + zram_free_page(zram, index);
> + zram_set_flag(meta, index, ZRAM_SAME);
> + zram_set_element(meta, index, element);
> + bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
> +
> + atomic64_inc(>stats.same_pages);
> + return true;
> + }
> + kunmap_atomic(mem);
> +
> + return false;
> +}
> +
>  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>  {
>   size_t num_pages = disksize >> PAGE_SHIFT;
> @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
> index)
>   zram_set_obj_size(meta, index, 0);
>  }
>  
> -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> index)
>  {
> - int ret = 0;
> - unsigned char *cmem;
> - struct zram_meta *meta = zram->meta;
> + int ret;
>   unsigned long handle;
>   unsigned int size;
> + void *src, *dst;
> + struct zram_meta *meta = zram->meta;
> +
> + if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> + return 0;
>  
>   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
>   handle = meta->table[index].handle;
>   size = zram_get_obj_size(meta, index);
>  
> - if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> - 

[PATCH 2/5] zram: partial IO refactoring

2017-04-02 Thread Minchan Kim
For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
However, the mixed code for handling normal/partial IO is too mess,
error-prone to modify IO handler functions with upcoming feature
so this patch aims for cleaning up zram's IO handling functions.

Signed-off-by: Minchan Kim 
---
 drivers/block/zram/zram_drv.c | 333 +++---
 1 file changed, 184 insertions(+), 149 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 28c2836f8c96..7938f4b98b01 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
 
+static void zram_free_page(struct zram *zram, size_t index);
+
 static inline bool init_done(struct zram *zram)
 {
return zram->disksize;
@@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
+#if PAGE_SIZE != 4096
 static inline bool is_partial_io(struct bio_vec *bvec)
 {
return bvec->bv_len != PAGE_SIZE;
 }
+#else
+static inline bool is_partial_io(struct bio_vec *bvec)
+{
+   return false;
+}
+#endif
 
 static void zram_revalidate_disk(struct zram *zram)
 {
@@ -191,18 +200,6 @@ static bool page_same_filled(void *ptr, unsigned long 
*element)
return true;
 }
 
-static void handle_same_page(struct bio_vec *bvec, unsigned long element)
-{
-   struct page *page = bvec->bv_page;
-   void *user_mem;
-
-   user_mem = kmap_atomic(page);
-   zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
-   kunmap_atomic(user_mem);
-
-   flush_dcache_page(page);
-}
-
 static ssize_t initstate_show(struct device *dev,
struct device_attribute *attr, char *buf)
 {
@@ -418,6 +415,53 @@ static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(debug_stat);
 
+static bool zram_special_page_read(struct zram *zram, u32 index,
+   struct page *page,
+   unsigned int offset, unsigned int len)
+{
+   struct zram_meta *meta = zram->meta;
+
+   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
+   if (unlikely(!meta->table[index].handle) ||
+   zram_test_flag(meta, index, ZRAM_SAME)) {
+   void *mem;
+
+   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
+   mem = kmap_atomic(page);
+   zram_fill_page(mem + offset, len, meta->table[index].element);
+   kunmap_atomic(mem);
+   return true;
+   }
+   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
+
+   return false;
+}
+
+static bool zram_special_page_write(struct zram *zram, u32 index,
+   struct page *page)
+{
+   unsigned long element;
+   void *mem = kmap_atomic(page);
+
+   if (page_same_filled(mem, )) {
+   struct zram_meta *meta = zram->meta;
+
+   kunmap_atomic(mem);
+   /* Free memory associated with this sector now. */
+   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
+   zram_free_page(zram, index);
+   zram_set_flag(meta, index, ZRAM_SAME);
+   zram_set_element(meta, index, element);
+   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
+
+   atomic64_inc(>stats.same_pages);
+   return true;
+   }
+   kunmap_atomic(mem);
+
+   return false;
+}
+
 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 {
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
index)
zram_set_obj_size(meta, index, 0);
 }
 
-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+static int zram_decompress_page(struct zram *zram, struct page *page, u32 
index)
 {
-   int ret = 0;
-   unsigned char *cmem;
-   struct zram_meta *meta = zram->meta;
+   int ret;
unsigned long handle;
unsigned int size;
+   void *src, *dst;
+   struct zram_meta *meta = zram->meta;
+
+   if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
+   return 0;
 
bit_spin_lock(ZRAM_ACCESS, >table[index].value);
handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index);
 
-   if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
-   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
-   zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
-   return 0;
-   }
-
-   cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+   src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
-   

[PATCH 2/5] zram: partial IO refactoring

2017-04-02 Thread Minchan Kim
For architecture(PAGE_SIZE > 4K), zram have supported partial IO.
However, the mixed code for handling normal/partial IO is too mess,
error-prone to modify IO handler functions with upcoming feature
so this patch aims for cleaning up zram's IO handling functions.

Signed-off-by: Minchan Kim 
---
 drivers/block/zram/zram_drv.c | 333 +++---
 1 file changed, 184 insertions(+), 149 deletions(-)

diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 28c2836f8c96..7938f4b98b01 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -45,6 +45,8 @@ static const char *default_compressor = "lzo";
 /* Module params (documentation at end) */
 static unsigned int num_devices = 1;
 
+static void zram_free_page(struct zram *zram, size_t index);
+
 static inline bool init_done(struct zram *zram)
 {
return zram->disksize;
@@ -98,10 +100,17 @@ static void zram_set_obj_size(struct zram_meta *meta,
meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
 }
 
+#if PAGE_SIZE != 4096
 static inline bool is_partial_io(struct bio_vec *bvec)
 {
return bvec->bv_len != PAGE_SIZE;
 }
+#else
+static inline bool is_partial_io(struct bio_vec *bvec)
+{
+   return false;
+}
+#endif
 
 static void zram_revalidate_disk(struct zram *zram)
 {
@@ -191,18 +200,6 @@ static bool page_same_filled(void *ptr, unsigned long 
*element)
return true;
 }
 
-static void handle_same_page(struct bio_vec *bvec, unsigned long element)
-{
-   struct page *page = bvec->bv_page;
-   void *user_mem;
-
-   user_mem = kmap_atomic(page);
-   zram_fill_page(user_mem + bvec->bv_offset, bvec->bv_len, element);
-   kunmap_atomic(user_mem);
-
-   flush_dcache_page(page);
-}
-
 static ssize_t initstate_show(struct device *dev,
struct device_attribute *attr, char *buf)
 {
@@ -418,6 +415,53 @@ static DEVICE_ATTR_RO(io_stat);
 static DEVICE_ATTR_RO(mm_stat);
 static DEVICE_ATTR_RO(debug_stat);
 
+static bool zram_special_page_read(struct zram *zram, u32 index,
+   struct page *page,
+   unsigned int offset, unsigned int len)
+{
+   struct zram_meta *meta = zram->meta;
+
+   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
+   if (unlikely(!meta->table[index].handle) ||
+   zram_test_flag(meta, index, ZRAM_SAME)) {
+   void *mem;
+
+   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
+   mem = kmap_atomic(page);
+   zram_fill_page(mem + offset, len, meta->table[index].element);
+   kunmap_atomic(mem);
+   return true;
+   }
+   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
+
+   return false;
+}
+
+static bool zram_special_page_write(struct zram *zram, u32 index,
+   struct page *page)
+{
+   unsigned long element;
+   void *mem = kmap_atomic(page);
+
+   if (page_same_filled(mem, )) {
+   struct zram_meta *meta = zram->meta;
+
+   kunmap_atomic(mem);
+   /* Free memory associated with this sector now. */
+   bit_spin_lock(ZRAM_ACCESS, >table[index].value);
+   zram_free_page(zram, index);
+   zram_set_flag(meta, index, ZRAM_SAME);
+   zram_set_element(meta, index, element);
+   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
+
+   atomic64_inc(>stats.same_pages);
+   return true;
+   }
+   kunmap_atomic(mem);
+
+   return false;
+}
+
 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
 {
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
index)
zram_set_obj_size(meta, index, 0);
 }
 
-static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
+static int zram_decompress_page(struct zram *zram, struct page *page, u32 
index)
 {
-   int ret = 0;
-   unsigned char *cmem;
-   struct zram_meta *meta = zram->meta;
+   int ret;
unsigned long handle;
unsigned int size;
+   void *src, *dst;
+   struct zram_meta *meta = zram->meta;
+
+   if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
+   return 0;
 
bit_spin_lock(ZRAM_ACCESS, >table[index].value);
handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index);
 
-   if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
-   bit_spin_unlock(ZRAM_ACCESS, >table[index].value);
-   zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
-   return 0;
-   }
-
-   cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+   src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
-   copy_page(mem, cmem);