Move prep_compound_page() before post_alloc_hook() in prep_new_page().

The next patch adds a folio_zero_user() call to post_alloc_hook(),
which uses folio_nr_pages() to determine how many pages to zero.
Without compound metadata set up first, folio_nr_pages() returns 1
for higher-order allocations, so only the first page would be zeroed.

All other operations in post_alloc_hook() (arch_alloc_page, KASAN,
debug, page owner, etc.) use raw page pointers with explicit order
counts and are unaffected by this reordering.

Also reorder compaction_alloc_noprof() for consistency. Compaction
currently passes USER_ADDR_NONE so folio_zero_user() is not called
there, but keeping the same ordering avoids a future tripping hazard.

Reviewed-by: Gregory Price <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
Assisted-by: cursor-agent:GPT-5.4-xhigh
---
 mm/compaction.c | 4 ++--
 mm/page_alloc.c | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 72684fe81e83..4336e433c99b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1849,10 +1849,10 @@ static struct folio *compaction_alloc_noprof(struct 
folio *src, unsigned long da
                set_page_private(&freepage[size], start_order);
        }
        dst = (struct folio *)freepage;
-       post_alloc_hook(&dst->page, order, __GFP_MOVABLE, USER_ADDR_NONE);
-       set_page_refcounted(&dst->page);
        if (order)
                prep_compound_page(&dst->page, order);
+       post_alloc_hook(&dst->page, order, __GFP_MOVABLE, USER_ADDR_NONE);
+       set_page_refcounted(&dst->page);
        cc->nr_freepages -= 1 << order;
        cc->nr_migratepages -= 1 << order;
        return page_rmappable_folio(&dst->page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8c673095c11f..db387dd6b813 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1874,11 +1874,11 @@ static void prep_new_page(struct page *page, unsigned 
int order, gfp_t gfp_flags
                                                        unsigned int 
alloc_flags,
                                                        unsigned long user_addr)
 {
-       post_alloc_hook(page, order, gfp_flags, user_addr);
-
        if (order && (gfp_flags & __GFP_COMP))
                prep_compound_page(page, order);
 
+       post_alloc_hook(page, order, gfp_flags, user_addr);
+
        /*
         * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
         * allocate the page. The expectation is that the caller is taking
-- 
MST


Reply via email to