Use __GFP_PREZEROED and folio_test_clear_prezeroed() to skip
folio_zero_user() in the PMD THP anonymous page allocation path
when the page is already zeroed.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 mm/huge_memory.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8e2746ea74ad..3b9b53fad0f1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1256,7 +1256,7 @@ EXPORT_SYMBOL_GPL(thp_get_unmapped_area);
 static struct folio *vma_alloc_anon_folio_pmd(struct vm_area_struct *vma,
                unsigned long addr)
 {
-       gfp_t gfp = vma_thp_gfp_mask(vma);
+       gfp_t gfp = vma_thp_gfp_mask(vma) | __GFP_PREZEROED;
        const int order = HPAGE_PMD_ORDER;
        struct folio *folio;
 
@@ -1285,7 +1285,7 @@ static struct folio *vma_alloc_anon_folio_pmd(struct 
vm_area_struct *vma,
        * make sure that the page corresponding to the faulting address will be
        * hot in the cache after zeroing.
        */
-       if (user_alloc_needs_zeroing())
+       if (user_alloc_needs_zeroing() && !folio_test_clear_prezeroed(folio))
                folio_zero_user(folio, addr);
        /*
         * The memory barrier inside __folio_mark_uptodate makes sure that
-- 
MST


Reply via email to