Use __GFP_PREZEROED and folio_test_clear_prezeroed() to skip folio_zero_user() in the mTHP anonymous page allocation path when the page is already zeroed.
Signed-off-by: Michael S. Tsirkin <[email protected]> Assisted-by: Claude:claude-opus-4-6 --- mm/memory.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 07778814b4a8..2f61321a81fd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5176,7 +5176,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) goto fallback; /* Try allocating the highest of the remaining orders. */ - gfp = vma_thp_gfp_mask(vma); + gfp = vma_thp_gfp_mask(vma) | __GFP_PREZEROED; while (orders) { addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); folio = vma_alloc_folio(gfp, order, vma, addr); @@ -5194,7 +5194,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) * that the page corresponding to the faulting address * will be hot in the cache after zeroing. */ - if (user_alloc_needs_zeroing()) + if (user_alloc_needs_zeroing() && + !folio_test_clear_prezeroed(folio)) folio_zero_user(folio, vmf->address); return folio; } -- MST

