Re: [RFC] mm: cma: move init_cma_reserved_pageblock to cma.c

2014-06-26 Thread Joonsoo Kim
On Wed, Jun 25, 2014 at 12:48:02AM +0200, Michal Nazarewicz wrote:
> With [f495d26: “generalize CMA reserved area management
> functionality”] patch CMA has its place under mm directory now so
> there is no need to shoehorn a highly CMA specific functions inside of
> page_alloc.c.
> 
> As such move init_cma_reserved_pageblock from mm/page_alloc.c to
> mm/cma.c, rename it to cma_init_reserved_pageblock and refactor
> a little.
> 
> Most importantly, if a !pfn_valid(pfn) is encountered, just
> return -EINVAL instead of warning and trying to continue the
> initialisation of the area.  It's not clear, to me at least, what good
> is continuing the work on a PFN that is known to be invalid.
> 
> Signed-off-by: Michal Nazarewicz 

Acked-by: Joonsoo Kim 

One question below.

> ---
>  include/linux/gfp.h |  3 --
>  mm/cma.c| 85 
> +
>  mm/page_alloc.c | 31 ---
>  3 files changed, 66 insertions(+), 53 deletions(-)
> 
> diff --git a/include/linux/gfp.h b/include/linux/gfp.h
> index 5e7219d..107793e9 100644
> --- a/include/linux/gfp.h
> +++ b/include/linux/gfp.h
> @@ -415,9 +415,6 @@ extern int alloc_contig_range(unsigned long start, 
> unsigned long end,
> unsigned migratetype);
>  extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
>  
> -/* CMA stuff */
> -extern void init_cma_reserved_pageblock(struct page *page);
> -
>  #endif
>  
>  #endif /* __LINUX_GFP_H */
> diff --git a/mm/cma.c b/mm/cma.c
> index c17751c..843b2b6 100644
> --- a/mm/cma.c
> +++ b/mm/cma.c
> @@ -28,11 +28,14 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  #include 
>  #include 
>  
> +#include "internal.h"
> +
>  struct cma {
>   unsigned long   base_pfn;
>   unsigned long   count;
> @@ -83,37 +86,81 @@ static void cma_clear_bitmap(struct cma *cma, unsigned 
> long pfn, int count)
>   mutex_unlock(&cma->lock);
>  }
>  
> +/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
> +static int __init cma_init_reserved_pageblock(struct zone *zone,
> +   unsigned long pageblock_pfn)
> +{
> + unsigned long pfn, nr_pages, i;
> + struct page *page, *p;
> + unsigned order;
> +
> + pfn = pageblock_pfn;
> + if (!pfn_valid(pfn))
> + goto invalid_pfn;
> + page = pfn_to_page(pfn);
> +
> + p = page;
> + i = pageblock_nr_pages;
> + do {
> + if (!pfn_valid(pfn))
> + goto invalid_pfn;
> +
> + /*
> +  * alloc_contig_range requires the pfn range specified to be
> +  * in the same zone. Make this simple by forcing the entire
> +  * CMA resv range to be in the same zone.
> +  */
> + if (page_zone(p) != zone) {
> + pr_err("pfn %lu belongs to %s, expecting %s\n",
> +pfn, page_zone(p)->name, zone->name);
> + return -EINVAL;
> + }
> +
> + __ClearPageReserved(p);
> + set_page_count(p, 0);
> + } while (++p, ++pfn, --i);

So, when we meet fail condition, __ClearPageReserved, set_page_count()
are already executed for some pages. Is that no problem?

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC] mm: cma: move init_cma_reserved_pageblock to cma.c

2014-06-24 Thread Michal Nazarewicz
With [f495d26: “generalize CMA reserved area management
functionality”] patch CMA has its place under mm directory now so
there is no need to shoehorn a highly CMA specific functions inside of
page_alloc.c.

As such move init_cma_reserved_pageblock from mm/page_alloc.c to
mm/cma.c, rename it to cma_init_reserved_pageblock and refactor
a little.

Most importantly, if a !pfn_valid(pfn) is encountered, just
return -EINVAL instead of warning and trying to continue the
initialisation of the area.  It's not clear, to me at least, what good
is continuing the work on a PFN that is known to be invalid.

Signed-off-by: Michal Nazarewicz 
---
 include/linux/gfp.h |  3 --
 mm/cma.c| 85 +
 mm/page_alloc.c | 31 ---
 3 files changed, 66 insertions(+), 53 deletions(-)

diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5e7219d..107793e9 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -415,9 +415,6 @@ extern int alloc_contig_range(unsigned long start, unsigned 
long end,
  unsigned migratetype);
 extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
 
-/* CMA stuff */
-extern void init_cma_reserved_pageblock(struct page *page);
-
 #endif
 
 #endif /* __LINUX_GFP_H */
diff --git a/mm/cma.c b/mm/cma.c
index c17751c..843b2b6 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -28,11 +28,14 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
 #include 
 
+#include "internal.h"
+
 struct cma {
unsigned long   base_pfn;
unsigned long   count;
@@ -83,37 +86,81 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long 
pfn, int count)
mutex_unlock(&cma->lock);
 }
 
+/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
+static int __init cma_init_reserved_pageblock(struct zone *zone,
+ unsigned long pageblock_pfn)
+{
+   unsigned long pfn, nr_pages, i;
+   struct page *page, *p;
+   unsigned order;
+
+   pfn = pageblock_pfn;
+   if (!pfn_valid(pfn))
+   goto invalid_pfn;
+   page = pfn_to_page(pfn);
+
+   p = page;
+   i = pageblock_nr_pages;
+   do {
+   if (!pfn_valid(pfn))
+   goto invalid_pfn;
+
+   /*
+* alloc_contig_range requires the pfn range specified to be
+* in the same zone. Make this simple by forcing the entire
+* CMA resv range to be in the same zone.
+*/
+   if (page_zone(p) != zone) {
+   pr_err("pfn %lu belongs to %s, expecting %s\n",
+  pfn, page_zone(p)->name, zone->name);
+   return -EINVAL;
+   }
+
+   __ClearPageReserved(p);
+   set_page_count(p, 0);
+   } while (++p, ++pfn, --i);
+
+   /* Return all the pages to buddy allocator as MIGRATE_CMA. */
+   set_pageblock_migratetype(page, MIGRATE_CMA);
+
+   order = min_t(unsigned, pageblock_order, MAX_ORDER - 1);
+   nr_pages = min_t(unsigned long, pageblock_nr_pages, MAX_ORDER_NR_PAGES);
+
+   p = page;
+   i = pageblock_nr_pages;
+   do {
+   set_page_refcounted(p);
+   __free_pages(p, order);
+   p += nr_pages;
+   } while (i -= nr_pages);
+
+   adjust_managed_page_count(page, pageblock_nr_pages);
+   return 0;
+
+invalid_pfn:
+   pr_err("invalid pfn: %lu\n", pfn);
+   return -EINVAL;
+}
+
 static int __init cma_activate_area(struct cma *cma)
 {
int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
-   unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
unsigned i = cma->count >> pageblock_order;
+   unsigned long pfn = cma->base_pfn;
struct zone *zone;
 
-   cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+   if (WARN_ON(!pfn_valid(pfn)))
+   return -EINVAL;
 
+   cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!cma->bitmap)
return -ENOMEM;
 
-   WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
-
do {
-   unsigned j;
-
-   base_pfn = pfn;
-   for (j = pageblock_nr_pages; j; --j, pfn++) {
-   WARN_ON_ONCE(!pfn_valid(pfn));
-   /*
-* alloc_contig_range requires the pfn range
-* specified to be in the same zone. Make this
-* simple by forcing the entire CMA resv range
-* to be in the same zone.
-*/
-   if (page_zone(pfn_to_page(pfn)) != zone)
-   goto err;
-   }
-   init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+   if (cma_init_reserved_