Re: [RFC PATCH 1/3] CMA: generalize CMA reserved area management functionality

2014-06-09 Thread Joonsoo Kim
On Tue, Jun 03, 2014 at 08:56:00AM +0200, Michal Nazarewicz wrote:
 On Tue, Jun 03 2014, Joonsoo Kim wrote:
  Currently, there are two users on CMA functionality, one is the DMA
  subsystem and the other is the kvm on powerpc. They have their own code
  to manage CMA reserved area even if they looks really similar.
  From my guess, it is caused by some needs on bitmap management. Kvm side
  wants to maintain bitmap not for 1 page, but for more size. Eventually it
  use bitmap where one bit represents 64 pages.
 
  When I implement CMA related patches, I should change those two places
  to apply my change and it seem to be painful to me. I want to change
  this situation and reduce future code management overhead through
  this patch.
 
  This change could also help developer who want to use CMA in their
  new feature development, since they can use CMA easily without
  copying  pasting this reserved area management code.
 
  Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com
 
 Some small comments below, but in general
 
 Acked-by: Michal Nazarewicz min...@mina86.com

Hello, Michal.

Thanks!

 
 
  diff --git a/include/linux/cma.h b/include/linux/cma.h
  new file mode 100644
  index 000..60ba06f
  --- /dev/null
  +++ b/include/linux/cma.h
  @@ -0,0 +1,28 @@
  +/*
  + * Contiguous Memory Allocator
  + *
  + * Copyright LG Electronics Inc., 2014
  + * Written by:
  + * Joonsoo Kim iamjoonsoo@lge.com
  + *
  + * This program is free software; you can redistribute it and/or
  + * modify it under the terms of the GNU General Public License as
  + * published by the Free Software Foundation; either version 2 of the
  + * License or (at your optional) any later version of the license.
  + *
 
 Superfluous empty comment line.
 
 Also, I'm not certain whether this copyright notice is appropriate here,
 but that's another story.

Yeah, I will remove copyright notice in .h file.

 
  + */
  +
  +#ifndef __CMA_H__
  +#define __CMA_H__
  +
  +struct cma;
  +
  +extern struct page *cma_alloc(struct cma *cma, unsigned long count,
  +   unsigned long align);
  +extern bool cma_release(struct cma *cma, struct page *pages,
  +   unsigned long count);
  +extern int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t 
  base,
  +   phys_addr_t limit, phys_addr_t alignment,
  +   unsigned long bitmap_shift, bool fixed,
  +   struct cma **res_cma);
  +#endif
 
  diff --git a/mm/cma.c b/mm/cma.c
  new file mode 100644
  index 000..0dae88d
  --- /dev/null
  +++ b/mm/cma.c
  @@ -0,0 +1,329 @@
 
  +static int __init cma_activate_area(struct cma *cma)
  +{
  +   int max_bitmapno = cma_bitmap_max_no(cma);
  +   int bitmap_size = BITS_TO_LONGS(max_bitmapno) * sizeof(long);
  +   unsigned long base_pfn = cma-base_pfn, pfn = base_pfn;
  +   unsigned i = cma-count  pageblock_order;
  +   struct zone *zone;
  +
  +   pr_debug(%s()\n, __func__);
  +   if (!cma-count)
  +   return 0;
 
 Alternatively:
 
 + if (!i)
 + return 0;

I prefer cma-count than i, since it represents what it does itself.

  +
  +   cma-bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  +   if (!cma-bitmap)
  +   return -ENOMEM;
  +
  +   WARN_ON_ONCE(!pfn_valid(pfn));
  +   zone = page_zone(pfn_to_page(pfn));
  +
  +   do {
  +   unsigned j;
  +
  +   base_pfn = pfn;
  +   for (j = pageblock_nr_pages; j; --j, pfn++) {
  +   WARN_ON_ONCE(!pfn_valid(pfn));
  +   /*
  +* alloc_contig_range requires the pfn range
  +* specified to be in the same zone. Make this
  +* simple by forcing the entire CMA resv range
  +* to be in the same zone.
  +*/
  +   if (page_zone(pfn_to_page(pfn)) != zone)
  +   goto err;
  +   }
  +   init_cma_reserved_pageblock(pfn_to_page(base_pfn));
  +   } while (--i);
  +
  +   mutex_init(cma-lock);
  +   return 0;
  +
  +err:
  +   kfree(cma-bitmap);
  +   return -EINVAL;
  +}
 
  +static int __init cma_init_reserved_areas(void)
  +{
  +   int i;
  +
  +   for (i = 0; i  cma_area_count; i++) {
  +   int ret = cma_activate_area(cma_areas[i]);
  +
  +   if (ret)
  +   return ret;
  +   }
  +
  +   return 0;
  +}
 
 Or even:
 
 static int __init cma_init_reserved_areas(void)
 {
   int i, ret = 0;
   for (i = 0; !ret  i  cma_area_count; ++i)
   ret = cma_activate_area(cma_areas[i]);
   return ret;
 }

I think that originial implementation is better, since it seems
more readable to me.

  +int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t base,
  +   phys_addr_t limit, phys_addr_t alignment,
  +   unsigned long bitmap_shift, bool fixed,
  +   

Re: [RFC PATCH 1/3] CMA: generalize CMA reserved area management functionality

2014-06-09 Thread Joonsoo Kim
On Thu, Jun 05, 2014 at 11:09:05PM +0530, Aneesh Kumar K.V wrote:
 Joonsoo Kim iamjoonsoo@lge.com writes:
 
  Currently, there are two users on CMA functionality, one is the DMA
  subsystem and the other is the kvm on powerpc. They have their own code
  to manage CMA reserved area even if they looks really similar.
  From my guess, it is caused by some needs on bitmap management. Kvm side
  wants to maintain bitmap not for 1 page, but for more size. Eventually it
  use bitmap where one bit represents 64 pages.
 
  When I implement CMA related patches, I should change those two places
  to apply my change and it seem to be painful to me. I want to change
  this situation and reduce future code management overhead through
  this patch.
 
  This change could also help developer who want to use CMA in their
  new feature development, since they can use CMA easily without
  copying  pasting this reserved area management code.
 
  Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com
 
 
 The way patches are split makes it difficult to review. Will it be
 possible to make changes against one implementation and them move that
 to generic code. That helps in finding out what exactly changed.
 

Hello,

You are right! I will respin this patchset as the form you
recommended.

 
  diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
  index 00e13ce..b3fe1cc 100644
  --- a/drivers/base/Kconfig
  +++ b/drivers/base/Kconfig
  @@ -283,7 +283,7 @@ config CMA_ALIGNMENT
 
If unsure, leave the default value 8.
 
  -config CMA_AREAS
  +config DMA_CMA_AREAS
  int Maximum count of the CMA device-private areas
  default 7
  help
 
 for ex: The above can be a seperate patch along with further DMA related
 cleanups . 

Okay.

 
  diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
  index 83969f8..48cdac8 100644
  --- a/drivers/base/dma-contiguous.c
  +++ b/drivers/base/dma-contiguous.c
  @@ -186,7 +186,7 @@ static int __init cma_activate_area(struct cma *cma)
  return 0;
   }
 
  -static struct cma cma_areas[MAX_CMA_AREAS];
  +static struct cma cma_areas[MAX_DMA_CMA_AREAS];
   static unsigned cma_area_count;
 
   static int __init cma_init_reserved_areas(void)
  diff --git a/include/linux/cma.h b/include/linux/cma.h
  new file mode 100644
  index 000..60ba06f
  --- /dev/null
  +++ b/include/linux/cma.h
  @@ -0,0 +1,28 @@
  +/*
  + * Contiguous Memory Allocator
  + *
  + * Copyright LG Electronics Inc., 2014
  + * Written by:
  + * Joonsoo Kim iamjoonsoo@lge.com
  + *
  + * This program is free software; you can redistribute it and/or
  + * modify it under the terms of the GNU General Public License as
  + * published by the Free Software Foundation; either version 2 of the
  + * License or (at your optional) any later version of the license.
  + *
  + */
  +
  +#ifndef __CMA_H__
  +#define __CMA_H__
  +
  +struct cma;
  +
  +extern struct page *cma_alloc(struct cma *cma, unsigned long count,
  +   unsigned long align);
  +extern bool cma_release(struct cma *cma, struct page *pages,
  +   unsigned long count);
  +extern int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t 
  base,
  +   phys_addr_t limit, phys_addr_t alignment,
  +   unsigned long bitmap_shift, bool fixed,
  +   struct cma **res_cma);
  +#endif
  diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
  index 772eab5..dfb1dc9 100644
  --- a/include/linux/dma-contiguous.h
  +++ b/include/linux/dma-contiguous.h
  @@ -63,7 +63,7 @@ struct device;
* There is always at least global CMA area and a few optional device
* private areas configured in kernel .config.
*/
  -#define MAX_CMA_AREAS  (1 + CONFIG_CMA_AREAS)
  +#define MAX_DMA_CMA_AREAS  (1 + CONFIG_DMA_CMA_AREAS)
 
   extern struct cma *dma_contiguous_default_area;
 
  @@ -123,7 +123,7 @@ bool dma_release_from_contiguous(struct device *dev, 
  struct page *pages,
 
   #else
 
  -#define MAX_CMA_AREAS  (0)
  +#define MAX_DMA_CMA_AREAS  (0)
 
   static inline struct cma *dev_get_cma_area(struct device *dev)
   {
  diff --git a/mm/Kconfig b/mm/Kconfig
  index 7511b4a..0877ddc 100644
  --- a/mm/Kconfig
  +++ b/mm/Kconfig
  @@ -515,6 +515,17 @@ config CMA_DEBUG
processing calls such as dma_alloc_from_contiguous().
This option does not affect warning and error messages.
 
  +config CMA_AREAS
  +   int Maximum count of the CMA areas
  +   depends on CMA
  +   default 7
  +   help
  + CMA allows to create CMA areas for particular purpose, mainly,
  + used as device private area. This parameter sets the maximum
  + number of CMA area in the system.
  +
  + If unsure, leave the default value 7.
  +
 
 If we have 7 declare continugous request, a kvm cma allocation request will 
 fail ?

Yes. If you need more, you can increase it. :)

   config ZBUD
  tristate
   

Re: [RFC PATCH 1/3] CMA: generalize CMA reserved area management functionality

2014-06-05 Thread Aneesh Kumar K.V
Joonsoo Kim iamjoonsoo@lge.com writes:

 Currently, there are two users on CMA functionality, one is the DMA
 subsystem and the other is the kvm on powerpc. They have their own code
 to manage CMA reserved area even if they looks really similar.
 From my guess, it is caused by some needs on bitmap management. Kvm side
 wants to maintain bitmap not for 1 page, but for more size. Eventually it
 use bitmap where one bit represents 64 pages.

 When I implement CMA related patches, I should change those two places
 to apply my change and it seem to be painful to me. I want to change
 this situation and reduce future code management overhead through
 this patch.

 This change could also help developer who want to use CMA in their
 new feature development, since they can use CMA easily without
 copying  pasting this reserved area management code.

 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com


The way patches are split makes it difficult to review. Will it be
possible to make changes against one implementation and them move that
to generic code. That helps in finding out what exactly changed.


 diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
 index 00e13ce..b3fe1cc 100644
 --- a/drivers/base/Kconfig
 +++ b/drivers/base/Kconfig
 @@ -283,7 +283,7 @@ config CMA_ALIGNMENT

 If unsure, leave the default value 8.

 -config CMA_AREAS
 +config DMA_CMA_AREAS
   int Maximum count of the CMA device-private areas
   default 7
   help

for ex: The above can be a seperate patch along with further DMA related
cleanups . 


 diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
 index 83969f8..48cdac8 100644
 --- a/drivers/base/dma-contiguous.c
 +++ b/drivers/base/dma-contiguous.c
 @@ -186,7 +186,7 @@ static int __init cma_activate_area(struct cma *cma)
   return 0;
  }

 -static struct cma cma_areas[MAX_CMA_AREAS];
 +static struct cma cma_areas[MAX_DMA_CMA_AREAS];
  static unsigned cma_area_count;

  static int __init cma_init_reserved_areas(void)
 diff --git a/include/linux/cma.h b/include/linux/cma.h
 new file mode 100644
 index 000..60ba06f
 --- /dev/null
 +++ b/include/linux/cma.h
 @@ -0,0 +1,28 @@
 +/*
 + * Contiguous Memory Allocator
 + *
 + * Copyright LG Electronics Inc., 2014
 + * Written by:
 + *   Joonsoo Kim iamjoonsoo@lge.com
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License as
 + * published by the Free Software Foundation; either version 2 of the
 + * License or (at your optional) any later version of the license.
 + *
 + */
 +
 +#ifndef __CMA_H__
 +#define __CMA_H__
 +
 +struct cma;
 +
 +extern struct page *cma_alloc(struct cma *cma, unsigned long count,
 + unsigned long align);
 +extern bool cma_release(struct cma *cma, struct page *pages,
 + unsigned long count);
 +extern int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t base,
 + phys_addr_t limit, phys_addr_t alignment,
 + unsigned long bitmap_shift, bool fixed,
 + struct cma **res_cma);
 +#endif
 diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
 index 772eab5..dfb1dc9 100644
 --- a/include/linux/dma-contiguous.h
 +++ b/include/linux/dma-contiguous.h
 @@ -63,7 +63,7 @@ struct device;
   * There is always at least global CMA area and a few optional device
   * private areas configured in kernel .config.
   */
 -#define MAX_CMA_AREAS(1 + CONFIG_CMA_AREAS)
 +#define MAX_DMA_CMA_AREAS  (1 + CONFIG_DMA_CMA_AREAS)

  extern struct cma *dma_contiguous_default_area;

 @@ -123,7 +123,7 @@ bool dma_release_from_contiguous(struct device *dev, 
 struct page *pages,

  #else

 -#define MAX_CMA_AREAS(0)
 +#define MAX_DMA_CMA_AREAS(0)

  static inline struct cma *dev_get_cma_area(struct device *dev)
  {
 diff --git a/mm/Kconfig b/mm/Kconfig
 index 7511b4a..0877ddc 100644
 --- a/mm/Kconfig
 +++ b/mm/Kconfig
 @@ -515,6 +515,17 @@ config CMA_DEBUG
 processing calls such as dma_alloc_from_contiguous().
 This option does not affect warning and error messages.

 +config CMA_AREAS
 + int Maximum count of the CMA areas
 + depends on CMA
 + default 7
 + help
 +   CMA allows to create CMA areas for particular purpose, mainly,
 +   used as device private area. This parameter sets the maximum
 +   number of CMA area in the system.
 +
 +   If unsure, leave the default value 7.
 +

If we have 7 declare continugous request, a kvm cma allocation request will 
fail ?

  config ZBUD
   tristate
   default n
 diff --git a/mm/Makefile b/mm/Makefile
 index 1eaa70b..bc0422b 100644
 --- a/mm/Makefile
 +++ b/mm/Makefile
 @@ -62,3 +62,4 @@ obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
  obj-$(CONFIG_ZBUD)   += zbud.o
  obj-$(CONFIG_ZSMALLOC)   += zsmalloc.o
  

Re: [RFC PATCH 1/3] CMA: generalize CMA reserved area management functionality

2014-06-03 Thread Michal Nazarewicz
On Tue, Jun 03 2014, Joonsoo Kim wrote:
 Currently, there are two users on CMA functionality, one is the DMA
 subsystem and the other is the kvm on powerpc. They have their own code
 to manage CMA reserved area even if they looks really similar.
 From my guess, it is caused by some needs on bitmap management. Kvm side
 wants to maintain bitmap not for 1 page, but for more size. Eventually it
 use bitmap where one bit represents 64 pages.

 When I implement CMA related patches, I should change those two places
 to apply my change and it seem to be painful to me. I want to change
 this situation and reduce future code management overhead through
 this patch.

 This change could also help developer who want to use CMA in their
 new feature development, since they can use CMA easily without
 copying  pasting this reserved area management code.

 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

Some small comments below, but in general

Acked-by: Michal Nazarewicz min...@mina86.com


 diff --git a/include/linux/cma.h b/include/linux/cma.h
 new file mode 100644
 index 000..60ba06f
 --- /dev/null
 +++ b/include/linux/cma.h
 @@ -0,0 +1,28 @@
 +/*
 + * Contiguous Memory Allocator
 + *
 + * Copyright LG Electronics Inc., 2014
 + * Written by:
 + *   Joonsoo Kim iamjoonsoo@lge.com
 + *
 + * This program is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU General Public License as
 + * published by the Free Software Foundation; either version 2 of the
 + * License or (at your optional) any later version of the license.
 + *

Superfluous empty comment line.

Also, I'm not certain whether this copyright notice is appropriate here,
but that's another story.

 + */
 +
 +#ifndef __CMA_H__
 +#define __CMA_H__
 +
 +struct cma;
 +
 +extern struct page *cma_alloc(struct cma *cma, unsigned long count,
 + unsigned long align);
 +extern bool cma_release(struct cma *cma, struct page *pages,
 + unsigned long count);
 +extern int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t base,
 + phys_addr_t limit, phys_addr_t alignment,
 + unsigned long bitmap_shift, bool fixed,
 + struct cma **res_cma);
 +#endif

 diff --git a/mm/cma.c b/mm/cma.c
 new file mode 100644
 index 000..0dae88d
 --- /dev/null
 +++ b/mm/cma.c
 @@ -0,0 +1,329 @@

 +static int __init cma_activate_area(struct cma *cma)
 +{
 + int max_bitmapno = cma_bitmap_max_no(cma);
 + int bitmap_size = BITS_TO_LONGS(max_bitmapno) * sizeof(long);
 + unsigned long base_pfn = cma-base_pfn, pfn = base_pfn;
 + unsigned i = cma-count  pageblock_order;
 + struct zone *zone;
 +
 + pr_debug(%s()\n, __func__);
 + if (!cma-count)
 + return 0;

Alternatively:

+   if (!i)
+   return 0;

 +
 + cma-bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 + if (!cma-bitmap)
 + return -ENOMEM;
 +
 + WARN_ON_ONCE(!pfn_valid(pfn));
 + zone = page_zone(pfn_to_page(pfn));
 +
 + do {
 + unsigned j;
 +
 + base_pfn = pfn;
 + for (j = pageblock_nr_pages; j; --j, pfn++) {
 + WARN_ON_ONCE(!pfn_valid(pfn));
 + /*
 +  * alloc_contig_range requires the pfn range
 +  * specified to be in the same zone. Make this
 +  * simple by forcing the entire CMA resv range
 +  * to be in the same zone.
 +  */
 + if (page_zone(pfn_to_page(pfn)) != zone)
 + goto err;
 + }
 + init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 + } while (--i);
 +
 + mutex_init(cma-lock);
 + return 0;
 +
 +err:
 + kfree(cma-bitmap);
 + return -EINVAL;
 +}

 +static int __init cma_init_reserved_areas(void)
 +{
 + int i;
 +
 + for (i = 0; i  cma_area_count; i++) {
 + int ret = cma_activate_area(cma_areas[i]);
 +
 + if (ret)
 + return ret;
 + }
 +
 + return 0;
 +}

Or even:

static int __init cma_init_reserved_areas(void)
{
int i, ret = 0;
for (i = 0; !ret  i  cma_area_count; ++i)
ret = cma_activate_area(cma_areas[i]);
return ret;
}

 +int __init cma_declare_contiguous(phys_addr_t size, phys_addr_t base,
 + phys_addr_t limit, phys_addr_t alignment,
 + unsigned long bitmap_shift, bool fixed,
 + struct cma **res_cma)
 +{
 + struct cma *cma = cma_areas[cma_area_count];

Perhaps it would make sense to move this initialisation to the far end
of this function?

 + int ret = 0;
 +
 + pr_debug(%s(size %lx, base %08lx, limit %08lx, alignment %08lx)\n,
 + __func__, (unsigned long)size, (unsigned long)base,