After READ_ONLY_THP_FOR_FS is removed, FS either supports large folio or not. folio_split() can be used on a FS with large folio support without worrying about getting a THP on a FS without large folio support.
Signed-off-by: Zi Yan <[email protected]> --- include/linux/huge_mm.h | 28 ---------------------------- mm/truncate.c | 8 ++++---- 2 files changed, 4 insertions(+), 32 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1258fa37e85b..77c8f73f8839 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -389,27 +389,6 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o return split_huge_page_to_list_to_order(page, NULL, new_order); } -/** - * try_folio_split_to_order() - try to split a @folio at @page to @new_order - * using non uniform split. - * @folio: folio to be split - * @page: split to @new_order at the given page - * @new_order: the target split order - * - * Try to split a @folio at @page using non uniform split to @new_order, if - * non uniform split is not supported, fall back to uniform split. After-split - * folios are put back to LRU list. Use min_order_for_split() to get the lower - * bound of @new_order. - * - * Return: 0 - split is successful, otherwise split failed. - */ -static inline int try_folio_split_to_order(struct folio *folio, - struct page *page, unsigned int new_order) -{ - if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM)) - return split_huge_page_to_order(&folio->page, new_order); - return folio_split(folio, new_order, page, NULL); -} static inline int split_huge_page(struct page *page) { return split_huge_page_to_list_to_order(page, NULL, 0); @@ -641,13 +620,6 @@ static inline int split_folio_to_list(struct folio *folio, struct list_head *lis return -EINVAL; } -static inline int try_folio_split_to_order(struct folio *folio, - struct page *page, unsigned int new_order) -{ - VM_WARN_ON_ONCE_FOLIO(1, folio); - return -EINVAL; -} - static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) diff --git a/mm/truncate.c b/mm/truncate.c index 2931d66c16d0..6973b05ec4b8 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -177,7 +177,7 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) return 0; } -static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at, +static int folio_split_or_unmap(struct folio *folio, struct page *split_at, unsigned long min_order) { enum ttu_flags ttu_flags = @@ -186,7 +186,7 @@ static int try_folio_split_or_unmap(struct folio *folio, struct page *split_at, TTU_IGNORE_MLOCK; int ret; - ret = try_folio_split_to_order(folio, split_at, min_order); + ret = folio_split(folio, min_order, split_at, NULL); /* * If the split fails, unmap the folio, so it will be refaulted @@ -252,7 +252,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) min_order = mapping_min_folio_order(folio->mapping); split_at = folio_page(folio, PAGE_ALIGN_DOWN(offset) / PAGE_SIZE); - if (!try_folio_split_or_unmap(folio, split_at, min_order)) { + if (!folio_split_or_unmap(folio, split_at, min_order)) { /* * try to split at offset + length to make sure folios within * the range can be dropped, especially to avoid memory waste @@ -279,7 +279,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) /* make sure folio2 is large and does not change its mapping */ if (folio_test_large(folio2) && folio2->mapping == folio->mapping) - try_folio_split_or_unmap(folio2, split_at2, min_order); + folio_split_or_unmap(folio2, split_at2, min_order); folio_unlock(folio2); out: -- 2.43.0

