Some private node services may need to update internal metadata when
a THP folio is split.  ZONE_DEVICE already has a split callback via
pgmap->ops; private nodes can provide the same capability.

Just like zone_device, some private node services may want to know
about a folio being split.  Add this optional callback to the ops
struct and add a wrapper for zone_device and private node callback
dispatch to be consolidated.

Wire this into __folio_split() where the zone_device check was made.

Signed-off-by: Gregory Price <[email protected]>
---
 include/linux/node_private.h | 33 +++++++++++++++++++++++++++++++++
 mm/huge_memory.c             |  6 ++++--
 2 files changed, 37 insertions(+), 2 deletions(-)

diff --git a/include/linux/node_private.h b/include/linux/node_private.h
index 09ea7c4cb13c..f9dd2d25c8a5 100644
--- a/include/linux/node_private.h
+++ b/include/linux/node_private.h
@@ -3,6 +3,7 @@
 #define _LINUX_NODE_PRIVATE_H
 
 #include <linux/completion.h>
+#include <linux/memremap.h>
 #include <linux/mm.h>
 #include <linux/nodemask.h>
 #include <linux/rcupdate.h>
@@ -44,11 +45,19 @@ struct vm_fault;
  *   Returns: true if handled (skip return to buddy)
  *            false if no op (return to buddy)
  *
+ * @folio_split: Notification that a folio on this private node is being split.
+ *    [folio-referenced callback]
+ *     Called from the folio split path via folio_managed_split_cb().
+ *     @folio is the original folio; @new_folio is the newly created folio,
+ *     or NULL when called for the final (original) folio after all sub-folios
+ *     have been split off.
+ *
  * @flags: Operation exclusion flags (NP_OPS_* constants).
  *
  */
 struct node_private_ops {
        bool (*free_folio)(struct folio *folio);
+       void (*folio_split)(struct folio *folio, struct folio *new_folio);
        unsigned long flags;
 };
 
@@ -150,6 +159,24 @@ static inline bool zone_private_flags(struct zone *z, 
unsigned long flag)
        return node_private_flags(zone_to_nid(z)) & flag;
 }
 
+static inline void node_private_split_cb(struct folio *folio,
+                                        struct folio *new_folio)
+{
+       const struct node_private_ops *ops = folio_node_private_ops(folio);
+
+       if (ops && ops->folio_split)
+               ops->folio_split(folio, new_folio);
+}
+
+static inline void folio_managed_split_cb(struct folio *original_folio,
+                                         struct folio *new_folio)
+{
+       if (folio_is_zone_device(original_folio))
+               zone_device_private_split_cb(original_folio, new_folio);
+       else if (folio_is_private_node(original_folio))
+               node_private_split_cb(original_folio, new_folio);
+}
+
 #else /* !CONFIG_NUMA */
 
 static inline bool folio_is_private_node(struct folio *folio)
@@ -198,6 +225,12 @@ static inline bool zone_private_flags(struct zone *z, 
unsigned long flag)
        return false;
 }
 
+static inline void folio_managed_split_cb(struct folio *original_folio,
+                                         struct folio *new_folio)
+{
+       if (folio_is_zone_device(original_folio))
+               zone_device_private_split_cb(original_folio, new_folio);
+}
 #endif /* CONFIG_NUMA */
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40cf59301c21..2ecae494291a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -24,6 +24,7 @@
 #include <linux/freezer.h>
 #include <linux/mman.h>
 #include <linux/memremap.h>
+#include <linux/node_private.h>
 #include <linux/pagemap.h>
 #include <linux/debugfs.h>
 #include <linux/migrate.h>
@@ -3850,7 +3851,7 @@ static int __folio_freeze_and_split_unmapped(struct folio 
*folio, unsigned int n
 
                        next = folio_next(new_folio);
 
-                       zone_device_private_split_cb(folio, new_folio);
+                       folio_managed_split_cb(folio, new_folio);
 
                        folio_ref_unfreeze(new_folio,
                                           folio_cache_ref_count(new_folio) + 
1);
@@ -3889,7 +3890,8 @@ static int __folio_freeze_and_split_unmapped(struct folio 
*folio, unsigned int n
                        folio_put_refs(new_folio, nr_pages);
                }
 
-               zone_device_private_split_cb(folio, NULL);
+               folio_managed_split_cb(folio, NULL);
+
                /*
                 * Unfreeze @folio only after all page cache entries, which
                 * used to point to it, have been updated with new folios.
-- 
2.53.0


Reply via email to