> 
> "mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm
> introduces a formal to pass the gfp mask for khugepaged's hugepage
> allocation.  This is just too ugly to live.
> 
> alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by
> anything in GFP_RECLAIM_MASK, which is the only thing that matters for
> memcg reclaim, so just determine the gfp flags once in
> collapse_huge_page() and avoid the complexity.
> 
> Signed-off-by: David Rientjes <rient...@google.com>
> ---
Acked-by: Hillf Danton <hillf...@alibaba-inc.com>

>  -mm: intended to be folded into
>       mm-memcg-sync-allocation-and-memcg-charge-gfp-flags-for-thp.patch
> 
>  mm/huge_memory.c | 21 ++++++++-------------
>  1 file changed, 8 insertions(+), 13 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2373,16 +2373,12 @@ static bool khugepaged_prealloc_page(struct page 
> **hpage, bool *wait)
>  }
> 
>  static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
>                      struct vm_area_struct *vma, unsigned long address,
>                      int node)
>  {
>       VM_BUG_ON_PAGE(*hpage, *hpage);
> 
> -     /* Only allocate from the target node */
> -     *gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> -             __GFP_THISNODE;
> -
>       /*
>        * Before allocating the hugepage, release the mmap_sem read lock.
>        * The allocation can take potentially a long time if it involves
> @@ -2391,7 +2387,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, 
> struct mm_struct *mm,
>        */
>       up_read(&mm->mmap_sem);
> 
> -     *hpage = alloc_pages_exact_node(node, *gfp, HPAGE_PMD_ORDER);
> +     *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
>       if (unlikely(!*hpage)) {
>               count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
>               *hpage = ERR_PTR(-ENOMEM);
> @@ -2445,18 +2441,13 @@ static bool khugepaged_prealloc_page(struct page 
> **hpage, bool *wait)
>  }
> 
>  static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
>                      struct vm_area_struct *vma, unsigned long address,
>                      int node)
>  {
>       up_read(&mm->mmap_sem);
>       VM_BUG_ON(!*hpage);
> 
> -     /*
> -      * khugepaged_alloc_hugepage is doing the preallocation, use the same
> -      * gfp flags here.
> -      */
> -     *gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), 0);
>       return  *hpage;
>  }
>  #endif
> @@ -2495,8 +2486,12 @@ static void collapse_huge_page(struct mm_struct *mm,
> 
>       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> 
> +     /* Only allocate from the target node */
> +     gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> +             __GFP_THISNODE;
> +
>       /* release the mmap_sem read lock. */
> -     new_page = khugepaged_alloc_page(hpage, &gfp, mm, vma, address, node);
> +     new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
>       if (!new_page)
>               return;
> 
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majord...@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"d...@kvack.org";> em...@kvack.org </a>
> 
> 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to