Convert vma_alloc_anon_folio_pmd() to pass __GFP_ZERO instead of
zeroing at the callsite. post_alloc_hook uses the fault address
passed through vma_alloc_folio for cache-friendly zeroing.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Reviewed-by: Gregory Price <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 mm/huge_memory.c | 10 +---------
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d689e6491ddb..9845c920c29c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1333,7 +1333,7 @@ EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
                unsigned long addr)
 {
-       gfp_t gfp = vma_thp_gfp_mask(vma);
+       gfp_t gfp = vma_thp_gfp_mask(vma) | __GFP_ZERO;
        const int order = HPAGE_PMD_ORDER;
        struct folio *folio;
 
@@ -1356,14 +1356,6 @@ static struct folio *vma_alloc_anon_folio_pmd(struct 
vm_area_struct *vma,
        }
        folio_throttle_swaprate(folio, gfp);
 
-       /*
-       * When a folio is not zeroed during allocation (__GFP_ZERO not used)
-       * or user folios require special handling, folio_zero_user() is used to
-       * make sure that the page corresponding to the faulting address will be
-       * hot in the cache after zeroing.
-       */
-       if (user_alloc_needs_zeroing())
-               folio_zero_user(folio, addr);
        /*
         * The memory barrier inside __folio_mark_uptodate makes sure that
         * folio_zero_user writes become visible before the set_pmd_at()
-- 
MST


Reply via email to