Various locations in mm/ open-code cpuset filtering with:

  cpusets_enabled() && ALLOC_CPUSET && !__cpuset_zone_allowed()

This pattern does not account for N_MEMORY_PRIVATE nodes on systems
without cpusets, so private-node zones can leak into allocation
paths that should only see general-purpose memory.

Add numa_zone_allowed() which consolidates zone filtering. It checks
cpuset membership when cpusets are enabled, and otherwise gates
N_MEMORY_PRIVATE zones behind __GFP_PRIVATE globally.

Replace the open-coded patterns in mm/ with the new helper.

Signed-off-by: Gregory Price <[email protected]>
---
 mm/compaction.c |  6 ++----
 mm/hugetlb.c    |  2 +-
 mm/internal.h   |  7 +++++++
 mm/page_alloc.c | 31 ++++++++++++++++++++-----------
 mm/slub.c       |  3 ++-
 5 files changed, 32 insertions(+), 17 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 1e8f8eca318c..6a65145b03d8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2829,10 +2829,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, 
unsigned int order,
                                        ac->highest_zoneidx, ac->nodemask) {
                enum compact_result status;
 
-               if (cpusets_enabled() &&
-                       (alloc_flags & ALLOC_CPUSET) &&
-                       !__cpuset_zone_allowed(zone, gfp_mask))
-                               continue;
+               if (!numa_zone_alloc_allowed(alloc_flags, zone, gfp_mask))
+                       continue;
 
                if (prio > MIN_COMPACT_PRIORITY
                                        && compaction_deferred(zone, order)) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51273baec9e5..f2b914ab5910 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1353,7 +1353,7 @@ static struct folio 
*dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_
        for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), 
nmask) {
                struct folio *folio;
 
-               if (!cpuset_zone_allowed(zone, gfp_mask))
+               if (!numa_zone_alloc_allowed(ALLOC_CPUSET, zone, gfp_mask))
                        continue;
                /*
                 * no need to ask again on the same node. Pool is node rather 
than
diff --git a/mm/internal.h b/mm/internal.h
index 23ee14790227..97023748e6a9 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1206,6 +1206,8 @@ extern int node_reclaim_mode;
 
 extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
 extern int find_next_best_node(int node, nodemask_t *used_node_mask);
+extern bool numa_zone_alloc_allowed(int alloc_flags, struct zone *zone,
+                             gfp_t gfp_mask);
 #else
 #define node_reclaim_mode 0
 
@@ -1218,6 +1220,11 @@ static inline int find_next_best_node(int node, 
nodemask_t *used_node_mask)
 {
        return NUMA_NO_NODE;
 }
+static inline bool numa_zone_alloc_allowed(int alloc_flags, struct zone *zone,
+                                    gfp_t gfp_mask)
+{
+       return true;
+}
 #endif
 
 static inline bool node_reclaim_enabled(void)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2facee0805da..47f2619d3840 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3690,6 +3690,21 @@ static bool zone_allows_reclaim(struct zone *local_zone, 
struct zone *zone)
        return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
                                node_reclaim_distance;
 }
+
+/* Returns true if allocation from this zone is permitted */
+bool numa_zone_alloc_allowed(int alloc_flags, struct zone *zone, gfp_t 
gfp_mask)
+{
+       /* Gate N_MEMORY_PRIVATE zones behind __GFP_PRIVATE */
+       if (!(gfp_mask & __GFP_PRIVATE) &&
+           node_state(zone_to_nid(zone), N_MEMORY_PRIVATE))
+               return false;
+
+       /* If cpusets is being used, check mems_allowed */
+       if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET))
+               return cpuset_zone_allowed(zone, gfp_mask);
+
+       return true;
+}
 #else  /* CONFIG_NUMA */
 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
 {
@@ -3781,10 +3796,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int 
order, int alloc_flags,
                struct page *page;
                unsigned long mark;
 
-               if (cpusets_enabled() &&
-                       (alloc_flags & ALLOC_CPUSET) &&
-                       !__cpuset_zone_allowed(zone, gfp_mask))
-                               continue;
+               if (!numa_zone_alloc_allowed(alloc_flags, zone, gfp_mask))
+                       continue;
                /*
                 * When allocating a page cache page for writing, we
                 * want to get it from a node that is within its dirty
@@ -4585,10 +4598,8 @@ should_reclaim_retry(gfp_t gfp_mask, unsigned order,
                unsigned long min_wmark = min_wmark_pages(zone);
                bool wmark;
 
-               if (cpusets_enabled() &&
-                       (alloc_flags & ALLOC_CPUSET) &&
-                       !__cpuset_zone_allowed(zone, gfp_mask))
-                               continue;
+               if (!numa_zone_alloc_allowed(alloc_flags, zone, gfp_mask))
+                       continue;
 
                available = reclaimable = zone_reclaimable_pages(zone);
                available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
@@ -5084,10 +5095,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int 
preferred_nid,
        for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, 
ac.nodemask) {
                unsigned long mark;
 
-               if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
-                   !__cpuset_zone_allowed(zone, gfp)) {
+               if (!numa_zone_alloc_allowed(alloc_flags, zone, gfp))
                        continue;
-               }
 
                if (nr_online_nodes > 1 && zone != 
zonelist_zone(ac.preferred_zoneref) &&
                    zone_to_nid(zone) != 
zonelist_node_idx(ac.preferred_zoneref)) {
diff --git a/mm/slub.c b/mm/slub.c
index 861592ac5425..e4bd6ede81d1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3595,7 +3595,8 @@ static struct slab *get_any_partial(struct kmem_cache *s,
 
                        n = get_node(s, zone_to_nid(zone));
 
-                       if (n && cpuset_zone_allowed(zone, pc->flags) &&
+                       if (n && numa_zone_alloc_allowed(ALLOC_CPUSET, zone,
+                                                  pc->flags) &&
                                        n->nr_partial > s->min_partial) {
                                slab = get_partial_node(s, n, pc);
                                if (slab) {
-- 
2.53.0


Reply via email to