From: Matthew Brost <[email protected]>

Add folio_split_unref helper which splits an unreferenced folio
(refcount == 0) into individual pages. Intended to be called on special
pages (e.g., device-private, DAX, etc.) when returning the folio to the
free page pool.

Cc: Balbir Singh <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: David Hildenbrand <[email protected]>
Cc: Lorenzo Stoakes <[email protected]>
Cc: Zi Yan <[email protected]>
Cc: Baolin Wang <[email protected]>
Cc: "Liam R. Howlett" <[email protected]>
Cc: Nico Pache <[email protected]>
Cc: Ryan Roberts <[email protected]>
Cc: Dev Jain <[email protected]>
Cc: Barry Song <[email protected]>
Cc: Lance Yang <[email protected]>
Cc: [email protected]
Cc: [email protected]
Suggested-by: Alistair Popple <[email protected]>
Signed-off-by: Matthew Brost <[email protected]>
Signed-off-by: Francois Dugast <[email protected]>
---
 include/linux/huge_mm.h |  1 +
 mm/huge_memory.c        | 39 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 40 insertions(+)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4d9f964dfde..18cb9728d8f1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -369,6 +369,7 @@ enum split_type {
        SPLIT_TYPE_NON_UNIFORM,
 };
 
+void folio_split_unref(struct folio *folio);
 int __split_huge_page_to_list_to_order(struct page *page, struct list_head 
*list,
                unsigned int new_order);
 int folio_split_unmapped(struct folio *folio, unsigned int new_order);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 40cf59301c21..0eb9e6ad8639 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3580,6 +3580,45 @@ static void __split_folio_to_order(struct folio *folio, 
int old_order,
                ClearPageCompound(&folio->page);
 }
 
+/**
+ * folio_split_unref() - split an unreferenced folio (refcount == 0)
+ * @folio: the to-be-split folio
+ *
+ * Split an unreferenced folio (refcount == 0) into individual pages.
+ * Intended to be called on special pages (e.g., device-private, DAX, etc.)
+ * when returning the folio to the free page pool.
+ */
+void folio_split_unref(struct folio *folio)
+{
+       struct dev_pagemap *pgmap = page_pgmap(&folio->page);
+       int order, i;
+
+       folio->mapping = NULL;
+       order = folio_order(folio);
+       if (!order)
+               return;
+
+       folio_reset_order(folio);
+
+       for (i = 0; i < (1UL << order); i++) {
+               struct page *page = folio_page(folio, i);
+               struct folio *new_folio = (struct folio *)page;
+
+               ClearPageHead(page);
+               clear_compound_head(page);
+
+               new_folio->mapping = NULL;
+               /*
+                * Reset pgmap which was over-written by
+                * prep_compound_page().
+                */
+               new_folio->pgmap = pgmap;
+               new_folio->share = 0;   /* fsdax only, unused for device 
private */
+               VM_WARN_ON_FOLIO(folio_ref_count(new_folio), new_folio);
+       }
+}
+EXPORT_SYMBOL_GPL(folio_split_unref);
+
 /**
  * __split_unmapped_folio() - splits an unmapped @folio to lower order folios 
in
  * two ways: uniform split or non-uniform split.
-- 
2.43.0

Reply via email to