Re: [RFC PATCH 5/5] mm, hugetlb: further simplify hugetlb allocation API

2017-12-14 Thread Mike Kravetz
On 12/04/2017 06:01 AM, Michal Hocko wrote:
> From: Michal Hocko 
> 
> Hugetlb allocator has several layer of allocation functions depending
> and the purpose of the allocation. There are two allocators depending
> on whether the page can be allocated from the page allocator or we need
> a contiguous allocator. This is currently opencoded in alloc_fresh_huge_page
> which is the only path that might allocate giga pages which require the
> later allocator. Create alloc_fresh_huge_page which hides this
> implementation detail and use it in all callers which hardcoded the
> buddy allocator path (__hugetlb_alloc_buddy_huge_page). This shouldn't
> introduce any funtional change because both migration and surplus
> allocators exlude giga pages explicitly.
> 
> While we are at it let's do some renaming. The current scheme is not
> consistent and overly painfull to read and understand. Get rid of prefix
> underscores from most functions. There is no real reason to make names
> longer.
> * alloc_fresh_huge_page is the new layer to abstract underlying
>   allocator
> * __hugetlb_alloc_buddy_huge_page becomes shorter and neater
>   alloc_buddy_huge_page.
> * Former alloc_fresh_huge_page becomes alloc_pool_huge_page because we put
>   the new page directly to the pool
> * alloc_surplus_huge_page can drop the opencoded prep_new_huge_page code
>   as it uses alloc_fresh_huge_page now
> * others lose their excessive prefix underscores to make names shorter
> 

This patch will need to be modified to take into account the incremental
diff to patch 4 in this series.  Other than that, the changes look good.

Reviewed-by: Mike Kravetz 
-- 
Mike Kravetz

> Signed-off-by: Michal Hocko 
> ---
>  mm/hugetlb.c | 74 
> +---
>  1 file changed, 41 insertions(+), 33 deletions(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0c7dc269b6c0..73c74851a304 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1376,7 +1376,7 @@ pgoff_t __basepage_index(struct page *page)
>   return (index << compound_order(page_head)) + compound_idx;
>  }
>  
> -static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
> +static struct page *alloc_buddy_huge_page(struct hstate *h,
>   gfp_t gfp_mask, int nid, nodemask_t *nmask)
>  {
>   int order = huge_page_order(h);
> @@ -1394,34 +1394,49 @@ static struct page 
> *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
>   return page;
>  }
>  
> +/*
> + * Common helper to allocate a fresh hugetlb page. All specific allocators
> + * should use this function to get new hugetlb pages
> + */
> +static struct page *alloc_fresh_huge_page(struct hstate *h,
> + gfp_t gfp_mask, int nid, nodemask_t *nmask)
> +{
> + struct page *page;
> +
> + if (hstate_is_gigantic(h))
> + page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
> + else
> + page = alloc_buddy_huge_page(h, gfp_mask,
> + nid, nmask);
> + if (!page)
> + return NULL;
> +
> + if (hstate_is_gigantic(h))
> + prep_compound_gigantic_page(page, huge_page_order(h));
> + prep_new_huge_page(h, page, page_to_nid(page));
> +
> + return page;
> +}
> +
>  /*
>   * Allocates a fresh page to the hugetlb allocator pool in the node 
> interleaved
>   * manner.
>   */
> -static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
> +static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
>  {
>   struct page *page;
>   int nr_nodes, node;
>   gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
>  
>   for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
> - if (hstate_is_gigantic(h))
> - page = alloc_gigantic_page(h, gfp_mask,
> - node, nodes_allowed);
> - else
> - page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
> - node, nodes_allowed);
> + page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
>   if (page)
>   break;
> -
>   }
>  
>   if (!page)
>   return 0;
>  
> - if (hstate_is_gigantic(h))
> - prep_compound_gigantic_page(page, huge_page_order(h));
> - prep_new_huge_page(h, page, page_to_nid(page));
>   put_page(page); /* free it into the hugepage allocator */
>  
>   return 1;
> @@ -1535,7 +1550,7 @@ int dissolve_free_huge_pages(unsigned long start_pfn, 
> unsigned long end_pfn)
>  /*
>   * Allocates a fresh surplus page from the page allocator.
>   */
> -static struct page *__alloc_surplus_huge_page(struct hstate *h, gfp_t 
> gfp_mask,
> +static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
>   int nid, nodemask_t *nmask)
>  {
>   struct page *page = NULL;
> @@ -1548,7 +1563,7 @@ static struct page *__alloc_surplus_h

[RFC PATCH 5/5] mm, hugetlb: further simplify hugetlb allocation API

2017-12-04 Thread Michal Hocko
From: Michal Hocko 

Hugetlb allocator has several layer of allocation functions depending
and the purpose of the allocation. There are two allocators depending
on whether the page can be allocated from the page allocator or we need
a contiguous allocator. This is currently opencoded in alloc_fresh_huge_page
which is the only path that might allocate giga pages which require the
later allocator. Create alloc_fresh_huge_page which hides this
implementation detail and use it in all callers which hardcoded the
buddy allocator path (__hugetlb_alloc_buddy_huge_page). This shouldn't
introduce any funtional change because both migration and surplus
allocators exlude giga pages explicitly.

While we are at it let's do some renaming. The current scheme is not
consistent and overly painfull to read and understand. Get rid of prefix
underscores from most functions. There is no real reason to make names
longer.
* alloc_fresh_huge_page is the new layer to abstract underlying
  allocator
* __hugetlb_alloc_buddy_huge_page becomes shorter and neater
  alloc_buddy_huge_page.
* Former alloc_fresh_huge_page becomes alloc_pool_huge_page because we put
  the new page directly to the pool
* alloc_surplus_huge_page can drop the opencoded prep_new_huge_page code
  as it uses alloc_fresh_huge_page now
* others lose their excessive prefix underscores to make names shorter

Signed-off-by: Michal Hocko 
---
 mm/hugetlb.c | 74 +---
 1 file changed, 41 insertions(+), 33 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0c7dc269b6c0..73c74851a304 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1376,7 +1376,7 @@ pgoff_t __basepage_index(struct page *page)
return (index << compound_order(page_head)) + compound_idx;
 }
 
-static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
+static struct page *alloc_buddy_huge_page(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask)
 {
int order = huge_page_order(h);
@@ -1394,34 +1394,49 @@ static struct page 
*__hugetlb_alloc_buddy_huge_page(struct hstate *h,
return page;
 }
 
+/*
+ * Common helper to allocate a fresh hugetlb page. All specific allocators
+ * should use this function to get new hugetlb pages
+ */
+static struct page *alloc_fresh_huge_page(struct hstate *h,
+   gfp_t gfp_mask, int nid, nodemask_t *nmask)
+{
+   struct page *page;
+
+   if (hstate_is_gigantic(h))
+   page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
+   else
+   page = alloc_buddy_huge_page(h, gfp_mask,
+   nid, nmask);
+   if (!page)
+   return NULL;
+
+   if (hstate_is_gigantic(h))
+   prep_compound_gigantic_page(page, huge_page_order(h));
+   prep_new_huge_page(h, page, page_to_nid(page));
+
+   return page;
+}
+
 /*
  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
  * manner.
  */
-static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
+static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
struct page *page;
int nr_nodes, node;
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
 
for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
-   if (hstate_is_gigantic(h))
-   page = alloc_gigantic_page(h, gfp_mask,
-   node, nodes_allowed);
-   else
-   page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask,
-   node, nodes_allowed);
+   page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
if (page)
break;
-
}
 
if (!page)
return 0;
 
-   if (hstate_is_gigantic(h))
-   prep_compound_gigantic_page(page, huge_page_order(h));
-   prep_new_huge_page(h, page, page_to_nid(page));
put_page(page); /* free it into the hugepage allocator */
 
return 1;
@@ -1535,7 +1550,7 @@ int dissolve_free_huge_pages(unsigned long start_pfn, 
unsigned long end_pfn)
 /*
  * Allocates a fresh surplus page from the page allocator.
  */
-static struct page *__alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
+static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nmask)
 {
struct page *page = NULL;
@@ -1548,7 +1563,7 @@ static struct page *__alloc_surplus_huge_page(struct 
hstate *h, gfp_t gfp_mask,
goto out_unlock;
spin_unlock(&hugetlb_lock);
 
-   page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
+   page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
if (!page)
goto out_unlock;
 
@@ -1567,12 +1582,6 @@ static struct page *__alloc_surplus_huge_page(struct 
hstate *h, gfp_t gfp