Re: [PATCH v3 2/5] zero-filled pages awareness
On Fri, Mar 15, 2013 at 10:34:17AM +0800, Wanpeng Li wrote: > Compression of zero-filled pages can unneccessarily cause internal > fragmentation, and thus waste memory. This special case can be > optimized. > > This patch captures zero-filled pages, and marks their corresponding > zcache backing page entry as zero-filled. Whenever such zero-filled > page is retrieved, we fill the page frame with zero. > > Acked-by: Dan Magenheimer > Signed-off-by: Wanpeng Li This patch applies with a bunch of fuzz, meaning it wasn't made against the latest tree, which worries me. Care to redo it, and the rest of the series, and resend it? thanks, greg k-h -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH v3 2/5] zero-filled pages awareness
On 03/15/2013 10:34 AM, Wanpeng Li wrote: > Compression of zero-filled pages can unneccessarily cause internal > fragmentation, and thus waste memory. This special case can be > optimized. > > This patch captures zero-filled pages, and marks their corresponding > zcache backing page entry as zero-filled. Whenever such zero-filled > page is retrieved, we fill the page frame with zero. > > Acked-by: Dan Magenheimer > Signed-off-by: Wanpeng Li > --- > drivers/staging/zcache/zcache-main.c | 81 > +++--- > 1 files changed, 75 insertions(+), 6 deletions(-) > > diff --git a/drivers/staging/zcache/zcache-main.c > b/drivers/staging/zcache/zcache-main.c > index d73dd4b..6c35c7d 100644 > --- a/drivers/staging/zcache/zcache-main.c > +++ b/drivers/staging/zcache/zcache-main.c > @@ -59,6 +59,12 @@ static inline void frontswap_tmem_exclusive_gets(bool b) > } > #endif > > +/* > + * mark pampd to special value in order that later > + * retrieve will identify zero-filled pages > + */ > +#define ZERO_FILLED 0x2 > + > /* enable (or fix code) when Seth's patches are accepted upstream */ > #define zcache_writeback_enabled 0 > > @@ -543,7 +549,23 @@ static void *zcache_pampd_eph_create(char *data, size_t > size, bool raw, > { > void *pampd = NULL, *cdata = data; > unsigned clen = size; > + bool zero_filled = false; > struct page *page = (struct page *)(data), *newpage; > + char *user_mem; > + > + user_mem = kmap_atomic(page); > + > + /* > + * Compressing zero-filled pages will waste memory and introduce > + * serious fragmentation, skip it to avoid overhead > + */ > + if (page_is_zero_filled(user_mem)) { > + kunmap_atomic(user_mem); > + clen = 0; > + zero_filled = true; > + goto got_pampd; > + } > + kunmap_atomic(user_mem); > > if (!raw) { > zcache_compress(page, &cdata, &clen); > @@ -592,6 +614,8 @@ got_pampd: > zcache_eph_zpages_max = zcache_eph_zpages; > if (ramster_enabled && raw) > ramster_count_foreign_pages(true, 1); > + if (zero_filled) > + pampd = (void *)ZERO_FILLED; > out: > return pampd; > } > @@ -601,14 +625,31 @@ static void *zcache_pampd_pers_create(char *data, > size_t size, bool raw, > { > void *pampd = NULL, *cdata = data; > unsigned clen = size; > + bool zero_filled = false; > struct page *page = (struct page *)(data), *newpage; > unsigned long zbud_mean_zsize; > unsigned long curr_pers_zpages, total_zsize; > + char *user_mem; > > if (data == NULL) { > BUG_ON(!ramster_enabled); > goto create_pampd; > } > + > + user_mem = kmap_atomic(page); > + > + /* > + * Compressing zero-filled pages will waste memory and introduce > + * serious fragmentation, skip it to avoid overhead > + */ > + if (page_is_zero_filled(page)) { > + kunmap_atomic(user_mem); > + clen = 0; > + zero_filled = true; > + goto got_pampd; > + } > + kunmap_atomic(user_mem); > + Maybe we can add a function for this code? It seems a bit duplicated. > curr_pers_zpages = zcache_pers_zpages; > /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */ > if (!raw) > @@ -674,6 +715,8 @@ got_pampd: > zcache_pers_zbytes_max = zcache_pers_zbytes; > if (ramster_enabled && raw) > ramster_count_foreign_pages(false, 1); > + if (zero_filled) > + pampd = (void *)ZERO_FILLED; > out: > return pampd; > } > @@ -735,7 +778,8 @@ out: > */ > void zcache_pampd_create_finish(void *pampd, bool eph) > { > - zbud_create_finish((struct zbudref *)pampd, eph); > + if (pampd != (void *)ZERO_FILLED) > + zbud_create_finish((struct zbudref *)pampd, eph); > } > > /* > @@ -780,6 +824,14 @@ static int zcache_pampd_get_data(char *data, size_t > *sizep, bool raw, > BUG_ON(preemptible()); > BUG_ON(eph);/* fix later if shared pools get implemented */ > BUG_ON(pampd_is_remote(pampd)); > + > + if (pampd == (void *)ZERO_FILLED) { > + handle_zero_filled_page(data); > + if (!raw) > + *sizep = PAGE_SIZE; > + return 0; > + } > + > if (raw) > ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd, > sizep, eph); > @@ -801,12 +853,21 @@ static int zcache_pampd_get_data_and_free(char *data, > size_t *sizep, bool raw, > struct tmem_oid *oid, uint32_t index) > { > int ret; > - bool eph = !is_persistent(pool); > + bool eph = !is_persistent(pool), zero_filled = false; > struct page *page = NULL; > unsigned int zsize, zpages; > > BUG_ON(preemptible()); > BUG_ON(pampd_is_r
[PATCH v3 2/5] zero-filled pages awareness
Compression of zero-filled pages can unneccessarily cause internal fragmentation, and thus waste memory. This special case can be optimized. This patch captures zero-filled pages, and marks their corresponding zcache backing page entry as zero-filled. Whenever such zero-filled page is retrieved, we fill the page frame with zero. Acked-by: Dan Magenheimer Signed-off-by: Wanpeng Li --- drivers/staging/zcache/zcache-main.c | 81 +++--- 1 files changed, 75 insertions(+), 6 deletions(-) diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index d73dd4b..6c35c7d 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c @@ -59,6 +59,12 @@ static inline void frontswap_tmem_exclusive_gets(bool b) } #endif +/* + * mark pampd to special value in order that later + * retrieve will identify zero-filled pages + */ +#define ZERO_FILLED 0x2 + /* enable (or fix code) when Seth's patches are accepted upstream */ #define zcache_writeback_enabled 0 @@ -543,7 +549,23 @@ static void *zcache_pampd_eph_create(char *data, size_t size, bool raw, { void *pampd = NULL, *cdata = data; unsigned clen = size; + bool zero_filled = false; struct page *page = (struct page *)(data), *newpage; + char *user_mem; + + user_mem = kmap_atomic(page); + + /* +* Compressing zero-filled pages will waste memory and introduce +* serious fragmentation, skip it to avoid overhead +*/ + if (page_is_zero_filled(user_mem)) { + kunmap_atomic(user_mem); + clen = 0; + zero_filled = true; + goto got_pampd; + } + kunmap_atomic(user_mem); if (!raw) { zcache_compress(page, &cdata, &clen); @@ -592,6 +614,8 @@ got_pampd: zcache_eph_zpages_max = zcache_eph_zpages; if (ramster_enabled && raw) ramster_count_foreign_pages(true, 1); + if (zero_filled) + pampd = (void *)ZERO_FILLED; out: return pampd; } @@ -601,14 +625,31 @@ static void *zcache_pampd_pers_create(char *data, size_t size, bool raw, { void *pampd = NULL, *cdata = data; unsigned clen = size; + bool zero_filled = false; struct page *page = (struct page *)(data), *newpage; unsigned long zbud_mean_zsize; unsigned long curr_pers_zpages, total_zsize; + char *user_mem; if (data == NULL) { BUG_ON(!ramster_enabled); goto create_pampd; } + + user_mem = kmap_atomic(page); + + /* +* Compressing zero-filled pages will waste memory and introduce +* serious fragmentation, skip it to avoid overhead +*/ + if (page_is_zero_filled(page)) { + kunmap_atomic(user_mem); + clen = 0; + zero_filled = true; + goto got_pampd; + } + kunmap_atomic(user_mem); + curr_pers_zpages = zcache_pers_zpages; /* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */ if (!raw) @@ -674,6 +715,8 @@ got_pampd: zcache_pers_zbytes_max = zcache_pers_zbytes; if (ramster_enabled && raw) ramster_count_foreign_pages(false, 1); + if (zero_filled) + pampd = (void *)ZERO_FILLED; out: return pampd; } @@ -735,7 +778,8 @@ out: */ void zcache_pampd_create_finish(void *pampd, bool eph) { - zbud_create_finish((struct zbudref *)pampd, eph); + if (pampd != (void *)ZERO_FILLED) + zbud_create_finish((struct zbudref *)pampd, eph); } /* @@ -780,6 +824,14 @@ static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw, BUG_ON(preemptible()); BUG_ON(eph);/* fix later if shared pools get implemented */ BUG_ON(pampd_is_remote(pampd)); + + if (pampd == (void *)ZERO_FILLED) { + handle_zero_filled_page(data); + if (!raw) + *sizep = PAGE_SIZE; + return 0; + } + if (raw) ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd, sizep, eph); @@ -801,12 +853,21 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw, struct tmem_oid *oid, uint32_t index) { int ret; - bool eph = !is_persistent(pool); + bool eph = !is_persistent(pool), zero_filled = false; struct page *page = NULL; unsigned int zsize, zpages; BUG_ON(preemptible()); BUG_ON(pampd_is_remote(pampd)); + + if (pampd == (void *)ZERO_FILLED) { + handle_zero_filled_page(data); + zero_filled = true; + if (!raw) + *sizep = PAGE_SIZE; + goto zero_fill; +