Add collapse_allowable_orders() to generalize THP order eligibility. The
function determines which THP orders are permitted based on collapse
context (khugepaged vs madv_collapse).

This consolidates collapse configuration logic and provides a clean
interface for future mTHP collapse support where the orders may be
different.

Reviewed-by: Baolin Wang <[email protected]>
Signed-off-by: Nico Pache <[email protected]>
---
 include/linux/khugepaged.h        |  6 ++----
 mm/huge_memory.c                  |  2 +-
 mm/khugepaged.c                   | 20 ++++++++++++++------
 mm/vma.c                          |  6 +++---
 tools/testing/vma/include/stubs.h |  3 +--
 5 files changed, 21 insertions(+), 16 deletions(-)

diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index d7a9053ff4fe..e87df2fa6931 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -13,8 +13,7 @@ extern void khugepaged_destroy(void);
 extern int start_stop_khugepaged(void);
 extern void __khugepaged_enter(struct mm_struct *mm);
 extern void __khugepaged_exit(struct mm_struct *mm);
-extern void khugepaged_enter_vma(struct vm_area_struct *vma,
-                                vm_flags_t vm_flags);
+extern void khugepaged_enter_vma(struct vm_area_struct *vma);
 extern void khugepaged_min_free_kbytes_update(void);
 extern bool current_is_khugepaged(void);
 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
@@ -38,8 +37,7 @@ static inline void khugepaged_fork(struct mm_struct *mm, 
struct mm_struct *oldmm
 static inline void khugepaged_exit(struct mm_struct *mm)
 {
 }
-static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
-                                       vm_flags_t vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma)
 {
 }
 static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5c128cdec810..1023698a8b96 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1557,7 +1557,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault 
*vmf)
        ret = vmf_anon_prepare(vmf);
        if (ret)
                return ret;
-       khugepaged_enter_vma(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma);
 
        if (!(vmf->flags & FAULT_FLAG_WRITE) &&
                        !mm_forbids_zeropage(vma->vm_mm) &&
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a4f1c570b69b..fdbdc1a1cdd9 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -447,7 +447,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
                 * register it here without waiting a page fault that
                 * may not happen any time soon.
                 */
-               khugepaged_enter_vma(vma, *vm_flags);
+               khugepaged_enter_vma(vma);
                break;
        case MADV_NOHUGEPAGE:
                *vm_flags &= ~VM_HUGEPAGE;
@@ -546,12 +546,20 @@ void __khugepaged_enter(struct mm_struct *mm)
                wake_up_interruptible(&khugepaged_wait);
 }
 
-void khugepaged_enter_vma(struct vm_area_struct *vma,
-                         vm_flags_t vm_flags)
+/* Check what orders are allowed based on the vma and collapse type */
+static unsigned long collapse_allowable_orders(struct vm_area_struct *vma,
+               enum tva_type tva_flags)
+{
+       unsigned long orders = BIT(HPAGE_PMD_ORDER);
+
+       return thp_vma_allowable_orders(vma, vma->vm_flags, tva_flags, orders);
+}
+
+void khugepaged_enter_vma(struct vm_area_struct *vma)
 {
        if (!mm_flags_test(MMF_VM_HUGEPAGE, vma->vm_mm) &&
            hugepage_pmd_enabled()) {
-               if (thp_vma_allowable_order(vma, vm_flags, TVA_KHUGEPAGED, 
PMD_ORDER))
+               if (collapse_allowable_orders(vma, TVA_KHUGEPAGED))
                        __khugepaged_enter(vma->vm_mm);
        }
 }
@@ -2664,7 +2672,7 @@ static void collapse_scan_mm_slot(unsigned int 
progress_max,
                        cc->progress++;
                        break;
                }
-               if (!thp_vma_allowable_order(vma, vma->vm_flags, 
TVA_KHUGEPAGED, PMD_ORDER)) {
+               if (!collapse_allowable_orders(vma, TVA_KHUGEPAGED)) {
                        cc->progress++;
                        continue;
                }
@@ -2973,7 +2981,7 @@ int madvise_collapse(struct vm_area_struct *vma, unsigned 
long start,
        BUG_ON(vma->vm_start > start);
        BUG_ON(vma->vm_end < end);
 
-       if (!thp_vma_allowable_order(vma, vma->vm_flags, TVA_FORCED_COLLAPSE, 
PMD_ORDER))
+       if (!collapse_allowable_orders(vma, TVA_FORCED_COLLAPSE))
                return -EINVAL;
 
        cc = kmalloc_obj(*cc);
diff --git a/mm/vma.c b/mm/vma.c
index 377321b48734..c0398fb597b3 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -989,7 +989,7 @@ static __must_check struct vm_area_struct 
*vma_merge_existing_range(
                goto abort;
 
        vma_set_flags_mask(vmg->target, sticky_flags);
-       khugepaged_enter_vma(vmg->target, vmg->vm_flags);
+       khugepaged_enter_vma(vmg->target);
        vmg->state = VMA_MERGE_SUCCESS;
        return vmg->target;
 
@@ -1110,7 +1110,7 @@ struct vm_area_struct *vma_merge_new_range(struct 
vma_merge_struct *vmg)
         * following VMA if we have VMAs on both sides.
         */
        if (vmg->target && !vma_expand(vmg)) {
-               khugepaged_enter_vma(vmg->target, vmg->vm_flags);
+               khugepaged_enter_vma(vmg->target);
                vmg->state = VMA_MERGE_SUCCESS;
                return vmg->target;
        }
@@ -2589,7 +2589,7 @@ static int __mmap_new_vma(struct mmap_state *map, struct 
vm_area_struct **vmap,
         * call covers the non-merge case.
         */
        if (!vma_is_anonymous(vma))
-               khugepaged_enter_vma(vma, map->vm_flags);
+               khugepaged_enter_vma(vma);
        *vmap = vma;
        return 0;
 
diff --git a/tools/testing/vma/include/stubs.h 
b/tools/testing/vma/include/stubs.h
index a30b8bc84955..3d9a2daa2712 100644
--- a/tools/testing/vma/include/stubs.h
+++ b/tools/testing/vma/include/stubs.h
@@ -182,8 +182,7 @@ static inline bool mpol_equal(struct mempolicy *a, struct 
mempolicy *b)
        return true;
 }
 
-static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
-                         vm_flags_t vm_flags)
+static inline void khugepaged_enter_vma(struct vm_area_struct *vma)
 {
 }
 
-- 
2.53.0


Reply via email to