Similarly to should_failslab(), remove the overhead of calling the
noinline function should_fail_alloc_page() with a static key that guards
the allocation hotpath callsite and is controlled by the fault and error
injection frameworks.

Signed-off-by: Vlastimil Babka <vba...@suse.cz>
---
 mm/fail_page_alloc.c |  3 ++-
 mm/internal.h        |  2 ++
 mm/page_alloc.c      | 11 ++++++++---
 3 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c
index b1b09cce9394..0906b76d78e8 100644
--- a/mm/fail_page_alloc.c
+++ b/mm/fail_page_alloc.c
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/fault-inject.h>
 #include <linux/mm.h>
+#include "internal.h"
 
 static struct {
        struct fault_attr attr;
@@ -9,7 +10,7 @@ static struct {
        bool ignore_gfp_reclaim;
        u32 min_order;
 } fail_page_alloc = {
-       .attr = FAULT_ATTR_INITIALIZER,
+       .attr = FAULT_ATTR_INITIALIZER_KEY(&should_fail_alloc_page_active.key),
        .ignore_gfp_reclaim = true,
        .ignore_gfp_highmem = true,
        .min_order = 1,
diff --git a/mm/internal.h b/mm/internal.h
index b2c75b12014e..8539e39b02e6 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -410,6 +410,8 @@ extern char * const zone_names[MAX_NR_ZONES];
 /* perform sanity checks on struct pages being allocated or freed */
 DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
 
+DECLARE_STATIC_KEY_FALSE(should_fail_alloc_page_active);
+
 extern int min_free_kbytes;
 
 void setup_per_zone_wmarks(void);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2e22ce5675ca..e5dc3bafa549 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -274,6 +274,8 @@ int user_min_free_kbytes = -1;
 static int watermark_boost_factor __read_mostly = 15000;
 static int watermark_scale_factor = 10;
 
+DEFINE_STATIC_KEY_FALSE(should_fail_alloc_page_active);
+
 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 int movable_zone;
 EXPORT_SYMBOL(movable_zone);
@@ -3012,7 +3014,7 @@ noinline bool should_fail_alloc_page(gfp_t gfp_mask, 
unsigned int order)
 {
        return __should_fail_alloc_page(gfp_mask, order);
 }
-ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
+ALLOW_ERROR_INJECTION_KEY(should_fail_alloc_page, TRUE, 
&should_fail_alloc_page_active);
 
 static inline long __zone_watermark_unusable_free(struct zone *z,
                                unsigned int order, unsigned int alloc_flags)
@@ -4430,8 +4432,11 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, 
unsigned int order,
 
        might_alloc(gfp_mask);
 
-       if (should_fail_alloc_page(gfp_mask, order))
-               return false;
+       if (static_branch_unlikely(&should_fail_alloc_page_active)) {
+               if (should_fail_alloc_page(gfp_mask, order)) {
+                       return false;
+               }
+       }
 
        *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
 

-- 
2.45.1


Reply via email to