The previous patch skips zeroing in post_alloc_hook() when
__GFP_ZERO is used.  However, several page allocation paths
zero pages via folio_zero_user() or clear_user_highpage() after
allocation, not via __GFP_ZERO.

Add __GFP_PREZEROED gfp flag that tells post_alloc_hook() to
preserve the MAGIC_PAGE_ZEROED sentinel in page->private so the
caller can detect pre-zeroed pages and skip its own zeroing.
Add folio_test_clear_prezeroed() helper to check and clear
the sentinel.

Signed-off-by: Michael S. Tsirkin <[email protected]>
Assisted-by: Claude:claude-opus-4-6
---
 include/linux/gfp_types.h |  5 +++++
 include/linux/mm.h        | 16 ++++++++++++++++
 mm/page_alloc.c           |  8 +++++++-
 3 files changed, 28 insertions(+), 1 deletion(-)

diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 6c75df30a281..903f87c7fec9 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -56,6 +56,7 @@ enum {
        ___GFP_NOLOCKDEP_BIT,
 #endif
        ___GFP_NO_OBJ_EXT_BIT,
+       ___GFP_PREZEROED_BIT,
        ___GFP_LAST_BIT
 };
 
@@ -97,6 +98,7 @@ enum {
 #define ___GFP_NOLOCKDEP       0
 #endif
 #define ___GFP_NO_OBJ_EXT       BIT(___GFP_NO_OBJ_EXT_BIT)
+#define ___GFP_PREZEROED       BIT(___GFP_PREZEROED_BIT)
 
 /*
  * Physical address zone modifiers (see linux/mmzone.h - low four bits)
@@ -292,6 +294,9 @@ enum {
 #define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO)
 #define __GFP_SKIP_KASAN ((__force gfp_t)___GFP_SKIP_KASAN)
 
+/* Caller handles pre-zeroed pages; preserve MAGIC_PAGE_ZEROED in private */
+#define __GFP_PREZEROED ((__force gfp_t)___GFP_PREZEROED)
+
 /* Disable lockdep for GFP context tracking */
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 59fc77c4c90e..caa1de31bbca 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4820,6 +4820,22 @@ static inline bool user_alloc_needs_zeroing(void)
  */
 #define MAGIC_PAGE_ZEROED      0x5A45524FU     /* ZERO */
 
+/**
+ * folio_test_clear_prezeroed - test and clear the pre-zeroed marker.
+ * @folio: the folio to test.
+ *
+ * Returns true if the folio was pre-zeroed by the host, and clears
+ * the marker.  Callers can skip their own zeroing.
+ */
+static inline bool folio_test_clear_prezeroed(struct folio *folio)
+{
+       if (page_private(&folio->page) == MAGIC_PAGE_ZEROED) {
+               set_page_private(&folio->page, 0);
+               return true;
+       }
+       return false;
+}
+
 int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user 
*status);
 int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
 int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index efb65eee826b..fba8321c45ed 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1867,7 +1867,13 @@ inline void post_alloc_hook(struct page *page, unsigned 
int order,
        bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
        int i;
 
-       set_page_private(page, 0);
+       /*
+        * If the page is pre-zeroed and the caller opted in via
+        * __GFP_PREZEROED, preserve the marker so the caller can
+        * skip its own zeroing.  Otherwise always clear private.
+        */
+       if (!(prezeroed && (gfp_flags & __GFP_PREZEROED)))
+               set_page_private(page, 0);
 
        /*
         * If the page is pre-zeroed, skip memory initialization.
-- 
MST


Reply via email to