Hello,

On (04/03/17 14:17), Minchan Kim wrote:
> +static bool zram_special_page_read(struct zram *zram, u32 index,
> +                             struct page *page,
> +                             unsigned int offset, unsigned int len)
> +{
> +     struct zram_meta *meta = zram->meta;
> +
> +     bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
> +     if (unlikely(!meta->table[index].handle) ||
> +                     zram_test_flag(meta, index, ZRAM_SAME)) {
> +             void *mem;
> +
> +             bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> +             mem = kmap_atomic(page);
> +             zram_fill_page(mem + offset, len, meta->table[index].element);
> +             kunmap_atomic(mem);
> +             return true;
> +     }
> +     bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> +
> +     return false;
> +}
> +
> +static bool zram_special_page_write(struct zram *zram, u32 index,
> +                                     struct page *page)
> +{
> +     unsigned long element;
> +     void *mem = kmap_atomic(page);
> +
> +     if (page_same_filled(mem, &element)) {
> +             struct zram_meta *meta = zram->meta;
> +
> +             kunmap_atomic(mem);
> +             /* Free memory associated with this sector now. */
> +             bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
> +             zram_free_page(zram, index);
> +             zram_set_flag(meta, index, ZRAM_SAME);
> +             zram_set_element(meta, index, element);
> +             bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> +
> +             atomic64_inc(&zram->stats.same_pages);
> +             return true;
> +     }
> +     kunmap_atomic(mem);
> +
> +     return false;
> +}

zram_special_page_read() and zram_special_page_write() have a slightly
different locking semantics.

zram_special_page_read() copy-out ZRAM_SAME page having slot unlocked
(can the slot got overwritten in the meantime?), while
zram_special_page_write() keeps the slot locked through out the entire
operation.

>  static void zram_meta_free(struct zram_meta *meta, u64 disksize)
>  {
>       size_t num_pages = disksize >> PAGE_SHIFT;
> @@ -504,169 +548,104 @@ static void zram_free_page(struct zram *zram, size_t 
> index)
>       zram_set_obj_size(meta, index, 0);
>  }
>  
> -static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
> +static int zram_decompress_page(struct zram *zram, struct page *page, u32 
> index)
>  {
> -     int ret = 0;
> -     unsigned char *cmem;
> -     struct zram_meta *meta = zram->meta;
> +     int ret;
>       unsigned long handle;
>       unsigned int size;
> +     void *src, *dst;
> +     struct zram_meta *meta = zram->meta;
> +
> +     if (zram_special_page_read(zram, index, page, 0, PAGE_SIZE))
> +             return 0;
>  
>       bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
>       handle = meta->table[index].handle;
>       size = zram_get_obj_size(meta, index);
>  
> -     if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
> -             bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> -             zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
> -             return 0;
> -     }
> -
> -     cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
> +     src = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
>       if (size == PAGE_SIZE) {
> -             copy_page(mem, cmem);
> +             dst = kmap_atomic(page);
> +             copy_page(dst, src);
> +             kunmap_atomic(dst);
> +             ret = 0;
>       } else {
>               struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
>  
> -             ret = zcomp_decompress(zstrm, cmem, size, mem);
> +             dst = kmap_atomic(page);
> +             ret = zcomp_decompress(zstrm, src, size, dst);
> +             kunmap_atomic(dst);
>               zcomp_stream_put(zram->comp);
>       }
>       zs_unmap_object(meta->mem_pool, handle);
>       bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
>  
>       /* Should NEVER happen. Return bio error if it does. */
> -     if (unlikely(ret)) {
> +     if (unlikely(ret))
>               pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
> -             return ret;
> -     }
>  
> -     return 0;
> +     return ret;
>  }
>  
>  static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
> -                       u32 index, int offset)
> +                             u32 index, int offset)
>  {
>       int ret;
>       struct page *page;
> -     unsigned char *user_mem, *uncmem = NULL;
> -     struct zram_meta *meta = zram->meta;
> -     page = bvec->bv_page;
>  
> -     bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
> -     if (unlikely(!meta->table[index].handle) ||
> -                     zram_test_flag(meta, index, ZRAM_SAME)) {
> -             bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> -             handle_same_page(bvec, meta->table[index].element);
> +     page = bvec->bv_page;
> +     if (zram_special_page_read(zram, index, page, bvec->bv_offset,
> +                             bvec->bv_len))

so, I think zram_bvec_read() path calls zram_special_page_read() twice:

  a) direct zram_special_page_read() call

  b) zram_decompress_page()->zram_special_page_read()

is it supposed to be so?

>               return 0;
> -     }
> -     bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
> -
> -     if (is_partial_io(bvec))
> -             /* Use  a temporary buffer to decompress the page */
> -             uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
> -
> -     user_mem = kmap_atomic(page);
> -     if (!is_partial_io(bvec))
> -             uncmem = user_mem;
>  
> -     if (!uncmem) {
> -             pr_err("Unable to allocate temp memory\n");
> -             ret = -ENOMEM;
> -             goto out_cleanup;
> +     if (is_partial_io(bvec)) {
> +             /* Use a temporary buffer to decompress the page */
> +             page = alloc_page(GFP_NOIO|__GFP_HIGHMEM);
> +             if (!page)
> +                     return -ENOMEM;
>       }
>  
> -     ret = zram_decompress_page(zram, uncmem, index);
> -     /* Should NEVER happen. Return bio error if it does. */
> +     ret = zram_decompress_page(zram, page, index);
>       if (unlikely(ret))
> -             goto out_cleanup;
> +             goto out;
>  
> -     if (is_partial_io(bvec))
> -             memcpy(user_mem + bvec->bv_offset, uncmem + offset,
> -                             bvec->bv_len);
> +     if (is_partial_io(bvec)) {
> +             void *dst = kmap_atomic(bvec->bv_page);
> +             void *src = kmap_atomic(page);
>  
> -     flush_dcache_page(page);
> -     ret = 0;
> -out_cleanup:
> -     kunmap_atomic(user_mem);
> +             memcpy(dst + bvec->bv_offset, src + offset, bvec->bv_len);
> +             kunmap_atomic(src);
> +             kunmap_atomic(dst);
> +     }
> +out:
>       if (is_partial_io(bvec))
> -             kfree(uncmem);
> +             __free_page(page);
> +
>       return ret;
>  }
>  
> -static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 
> index,
> -                        int offset)
> +static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
> +                     struct page *page,
> +                     unsigned long *out_handle, unsigned int *out_comp_len)

ok, I see why... not super happy with the `zstrm' magic, but OK. can do.

        -ss

Reply via email to