Re: [PATCH 3/4] staging: zsmalloc: add page alloc/free callbacks

2013-01-27 Thread Minchan Kim
On Sat, Jan 26, 2013 at 2:46 AM, Seth Jennings
 wrote:
> This patch allows users of zsmalloc to register the
> allocation and free routines used by zsmalloc to obtain
> more pages for the memory pool.  This allows the user
> more control over zsmalloc pool policy and behavior.
>
> If the user does not wish to control this, alloc_page() and
> __free_page() are used by default.
>
> Acked-by: Nitin Gupta 
> Signed-off-by: Seth Jennings 
Acked-by: Minchan Kim 

-- 
Kind regards,
Minchan Kim
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 3/4] staging: zsmalloc: add page alloc/free callbacks

2013-01-27 Thread Minchan Kim
On Sat, Jan 26, 2013 at 2:46 AM, Seth Jennings
sjenn...@linux.vnet.ibm.com wrote:
 This patch allows users of zsmalloc to register the
 allocation and free routines used by zsmalloc to obtain
 more pages for the memory pool.  This allows the user
 more control over zsmalloc pool policy and behavior.

 If the user does not wish to control this, alloc_page() and
 __free_page() are used by default.

 Acked-by: Nitin Gupta ngu...@vflare.org
 Signed-off-by: Seth Jennings sjenn...@linux.vnet.ibm.com
Acked-by: Minchan Kim minc...@kernel.org

-- 
Kind regards,
Minchan Kim
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 3/4] staging: zsmalloc: add page alloc/free callbacks

2013-01-25 Thread Seth Jennings
This patch allows users of zsmalloc to register the
allocation and free routines used by zsmalloc to obtain
more pages for the memory pool.  This allows the user
more control over zsmalloc pool policy and behavior.

If the user does not wish to control this, alloc_page() and
__free_page() are used by default.

Acked-by: Nitin Gupta 
Signed-off-by: Seth Jennings 
---
 drivers/staging/zram/zram_drv.c  |2 +-
 drivers/staging/zsmalloc/zsmalloc-main.c |   44 ++
 drivers/staging/zsmalloc/zsmalloc.h  |8 +-
 3 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2086682..32323c6 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -565,7 +565,7 @@ int zram_init_device(struct zram *zram)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
 
-   zram->mem_pool = zs_create_pool(GFP_KERNEL);
+   zram->mem_pool = zs_create_pool(GFP_KERNEL, NULL);
if (!zram->mem_pool) {
pr_err("Error creating memory pool\n");
ret = -ENOMEM;
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c 
b/drivers/staging/zsmalloc/zsmalloc-main.c
index 711a854..12f66c3 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -205,7 +205,7 @@ struct link_free {
 
 struct zs_pool {
struct size_class size_class[ZS_SIZE_CLASSES];
-   const char *name;
+   struct zs_ops *ops;
 };
 
 /*
@@ -240,6 +240,21 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+/* default page alloc/free ops */
+struct page *zs_alloc_page(gfp_t flags)
+{
+   return alloc_page(flags);
+}
+
+void zs_free_page(struct page *page)
+{
+   __free_page(page);
+}
+
+struct zs_ops zs_default_ops = {
+   .alloc = zs_alloc_page,
+   .free = zs_free_page
+};
 
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -476,7 +491,7 @@ static void reset_page(struct page *page)
reset_page_mapcount(page);
 }
 
-static void free_zspage(struct page *first_page)
+static void free_zspage(struct zs_ops *ops, struct page *first_page)
 {
struct page *nextp, *tmp, *head_extra;
 
@@ -486,7 +501,7 @@ static void free_zspage(struct page *first_page)
head_extra = (struct page *)page_private(first_page);
 
reset_page(first_page);
-   __free_page(first_page);
+   ops->free(first_page);
 
/* zspage with only 1 system page */
if (!head_extra)
@@ -495,10 +510,10 @@ static void free_zspage(struct page *first_page)
list_for_each_entry_safe(nextp, tmp, _extra->lru, lru) {
list_del(>lru);
reset_page(nextp);
-   __free_page(nextp);
+   ops->free(nextp);
}
reset_page(head_extra);
-   __free_page(head_extra);
+   ops->free(head_extra);
 }
 
 /* Initialize a newly allocated zspage */
@@ -550,7 +565,8 @@ static void init_zspage(struct page *first_page, struct 
size_class *class)
 /*
  * Allocate a zspage for the given size class
  */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+static struct page *alloc_zspage(struct zs_ops *ops, struct size_class *class,
+   gfp_t flags)
 {
int i, error;
struct page *first_page = NULL, *uninitialized_var(prev_page);
@@ -570,7 +586,7 @@ static struct page *alloc_zspage(struct size_class *class, 
gfp_t flags)
for (i = 0; i < class->pages_per_zspage; i++) {
struct page *page;
 
-   page = alloc_page(flags);
+   page = ops->alloc(flags);
if (!page)
goto cleanup;
 
@@ -602,7 +618,7 @@ static struct page *alloc_zspage(struct size_class *class, 
gfp_t flags)
 
 cleanup:
if (unlikely(error) && first_page) {
-   free_zspage(first_page);
+   free_zspage(ops, first_page);
first_page = NULL;
}
 
@@ -799,6 +815,7 @@ fail:
 /**
  * zs_create_pool - Creates an allocation pool to work from.
  * @flags: allocation flags used to allocate pool metadata
+ * @ops: allocation/free callbacks for expanding the pool
  *
  * This function must be called before anything when using
  * the zsmalloc allocator.
@@ -806,7 +823,7 @@ fail:
  * On success, a pointer to the newly created pool is returned,
  * otherwise NULL.
  */
-struct zs_pool *zs_create_pool(gfp_t flags)
+struct zs_pool *zs_create_pool(gfp_t flags, struct zs_ops *ops)
 {
int i, ovhd_size;
struct zs_pool *pool;
@@ -832,6 +849,11 @@ struct zs_pool *zs_create_pool(gfp_t flags)
 
}
 
+   if (ops)
+   pool->ops = ops;
+   else
+   pool->ops = _default_ops;
+
   

[PATCH 3/4] staging: zsmalloc: add page alloc/free callbacks

2013-01-25 Thread Seth Jennings
This patch allows users of zsmalloc to register the
allocation and free routines used by zsmalloc to obtain
more pages for the memory pool.  This allows the user
more control over zsmalloc pool policy and behavior.

If the user does not wish to control this, alloc_page() and
__free_page() are used by default.

Acked-by: Nitin Gupta ngu...@vflare.org
Signed-off-by: Seth Jennings sjenn...@linux.vnet.ibm.com
---
 drivers/staging/zram/zram_drv.c  |2 +-
 drivers/staging/zsmalloc/zsmalloc-main.c |   44 ++
 drivers/staging/zsmalloc/zsmalloc.h  |8 +-
 3 files changed, 41 insertions(+), 13 deletions(-)

diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2086682..32323c6 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -565,7 +565,7 @@ int zram_init_device(struct zram *zram)
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram-disk-queue);
 
-   zram-mem_pool = zs_create_pool(GFP_KERNEL);
+   zram-mem_pool = zs_create_pool(GFP_KERNEL, NULL);
if (!zram-mem_pool) {
pr_err(Error creating memory pool\n);
ret = -ENOMEM;
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c 
b/drivers/staging/zsmalloc/zsmalloc-main.c
index 711a854..12f66c3 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -205,7 +205,7 @@ struct link_free {
 
 struct zs_pool {
struct size_class size_class[ZS_SIZE_CLASSES];
-   const char *name;
+   struct zs_ops *ops;
 };
 
 /*
@@ -240,6 +240,21 @@ struct mapping_area {
enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+/* default page alloc/free ops */
+struct page *zs_alloc_page(gfp_t flags)
+{
+   return alloc_page(flags);
+}
+
+void zs_free_page(struct page *page)
+{
+   __free_page(page);
+}
+
+struct zs_ops zs_default_ops = {
+   .alloc = zs_alloc_page,
+   .free = zs_free_page
+};
 
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -476,7 +491,7 @@ static void reset_page(struct page *page)
reset_page_mapcount(page);
 }
 
-static void free_zspage(struct page *first_page)
+static void free_zspage(struct zs_ops *ops, struct page *first_page)
 {
struct page *nextp, *tmp, *head_extra;
 
@@ -486,7 +501,7 @@ static void free_zspage(struct page *first_page)
head_extra = (struct page *)page_private(first_page);
 
reset_page(first_page);
-   __free_page(first_page);
+   ops-free(first_page);
 
/* zspage with only 1 system page */
if (!head_extra)
@@ -495,10 +510,10 @@ static void free_zspage(struct page *first_page)
list_for_each_entry_safe(nextp, tmp, head_extra-lru, lru) {
list_del(nextp-lru);
reset_page(nextp);
-   __free_page(nextp);
+   ops-free(nextp);
}
reset_page(head_extra);
-   __free_page(head_extra);
+   ops-free(head_extra);
 }
 
 /* Initialize a newly allocated zspage */
@@ -550,7 +565,8 @@ static void init_zspage(struct page *first_page, struct 
size_class *class)
 /*
  * Allocate a zspage for the given size class
  */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+static struct page *alloc_zspage(struct zs_ops *ops, struct size_class *class,
+   gfp_t flags)
 {
int i, error;
struct page *first_page = NULL, *uninitialized_var(prev_page);
@@ -570,7 +586,7 @@ static struct page *alloc_zspage(struct size_class *class, 
gfp_t flags)
for (i = 0; i  class-pages_per_zspage; i++) {
struct page *page;
 
-   page = alloc_page(flags);
+   page = ops-alloc(flags);
if (!page)
goto cleanup;
 
@@ -602,7 +618,7 @@ static struct page *alloc_zspage(struct size_class *class, 
gfp_t flags)
 
 cleanup:
if (unlikely(error)  first_page) {
-   free_zspage(first_page);
+   free_zspage(ops, first_page);
first_page = NULL;
}
 
@@ -799,6 +815,7 @@ fail:
 /**
  * zs_create_pool - Creates an allocation pool to work from.
  * @flags: allocation flags used to allocate pool metadata
+ * @ops: allocation/free callbacks for expanding the pool
  *
  * This function must be called before anything when using
  * the zsmalloc allocator.
@@ -806,7 +823,7 @@ fail:
  * On success, a pointer to the newly created pool is returned,
  * otherwise NULL.
  */
-struct zs_pool *zs_create_pool(gfp_t flags)
+struct zs_pool *zs_create_pool(gfp_t flags, struct zs_ops *ops)
 {
int i, ovhd_size;
struct zs_pool *pool;
@@ -832,6 +849,11 @@ struct zs_pool *zs_create_pool(gfp_t flags)
 
}
 
+   if (ops)
+   pool-ops = ops;
+   else
+