On Wed, Oct 22, 2025 at 12:37:07PM -0600, Nico Pache wrote:
> generalize the order of the __collapse_huge_page_* functions
> to support future mTHP collapse.
>
> mTHP collapse will not honor the khugepaged_max_ptes_shared or
> khugepaged_max_ptes_swap parameters, and will fail if it encounters a
> shared or swapped entry.
>
> No functional changes in this patch.
>
> Reviewed-by: Baolin Wang <[email protected]>
> Acked-by: David Hildenbrand <[email protected]>
> Co-developed-by: Dev Jain <[email protected]>
> Signed-off-by: Dev Jain <[email protected]>
> Signed-off-by: Nico Pache <[email protected]>

Thanks for addressing the v10 stuff (didn't check at v11).

Overall LGTM, so:

Reviewed-by: Lorenzo Stoakes <[email protected]>

Few minor nits below.

> ---
>  mm/khugepaged.c | 78 ++++++++++++++++++++++++++++++-------------------
>  1 file changed, 48 insertions(+), 30 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 36ee659acfbb..4ccebf5dda97 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -537,25 +537,25 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte,
>  }
>
>  static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
> -                                     unsigned long start_addr,
> -                                     pte_t *pte,
> -                                     struct collapse_control *cc,
> -                                     struct list_head *compound_pagelist)
> +             unsigned long start_addr, pte_t *pte, struct collapse_control 
> *cc,
> +             unsigned int order, struct list_head *compound_pagelist)

This series isn't the right place for it, but god do we need helper structs in
this code... :)

>  {
>       struct page *page = NULL;
>       struct folio *folio = NULL;
>       unsigned long addr = start_addr;
>       pte_t *_pte;
>       int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
> +     const unsigned long nr_pages = 1UL << order;
> +     int max_ptes_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - 
> order);

Nit, but we should const-ify this too.

>
> -     for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
> +     for (_pte = pte; _pte < pte + nr_pages;
>            _pte++, addr += PAGE_SIZE) {
>               pte_t pteval = ptep_get(_pte);
>               if (pte_none_or_zero(pteval)) {
>                       ++none_or_zero;
>                       if (!userfaultfd_armed(vma) &&
>                           (!cc->is_khugepaged ||
> -                          none_or_zero <= khugepaged_max_ptes_none)) {
> +                          none_or_zero <= max_ptes_none)) {
>                               continue;
>                       } else {
>                               result = SCAN_EXCEED_NONE_PTE;
> @@ -583,8 +583,14 @@ static int __collapse_huge_page_isolate(struct 
> vm_area_struct *vma,
>               /* See collapse_scan_pmd(). */
>               if (folio_maybe_mapped_shared(folio)) {
>                       ++shared;
> -                     if (cc->is_khugepaged &&
> -                         shared > khugepaged_max_ptes_shared) {
> +                     /*
> +                      * TODO: Support shared pages without leading to further
> +                      * mTHP collapses. Currently bringing in new pages via
> +                      * shared may cause a future higher order collapse on a
> +                      * rescan of the same range.
> +                      */

Yeah, I wish we could find a way to address this in some other way but given the
mire of THP code putting this comment here for now is probably the only sensible
way.

> +                     if (order != HPAGE_PMD_ORDER || (cc->is_khugepaged &&
> +                         shared > khugepaged_max_ptes_shared)) {
>                               result = SCAN_EXCEED_SHARED_PTE;
>                               count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
>                               goto out;
> @@ -677,18 +683,18 @@ static int __collapse_huge_page_isolate(struct 
> vm_area_struct *vma,
>  }
>
>  static void __collapse_huge_page_copy_succeeded(pte_t *pte,
> -                                             struct vm_area_struct *vma,
> -                                             unsigned long address,
> -                                             spinlock_t *ptl,
> -                                             struct list_head 
> *compound_pagelist)
> +             struct vm_area_struct *vma, unsigned long address,
> +             spinlock_t *ptl, unsigned int order,
> +             struct list_head *compound_pagelist)
>  {
> -     unsigned long end = address + HPAGE_PMD_SIZE;
> +     unsigned long end = address + (PAGE_SIZE << order);
>       struct folio *src, *tmp;
>       pte_t pteval;
>       pte_t *_pte;
>       unsigned int nr_ptes;
> +     const unsigned long nr_pages = 1UL << order;
>
> -     for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes,
> +     for (_pte = pte; _pte < pte + nr_pages; _pte += nr_ptes,
>            address += nr_ptes * PAGE_SIZE) {
>               nr_ptes = 1;
>               pteval = ptep_get(_pte);
> @@ -741,13 +747,11 @@ static void __collapse_huge_page_copy_succeeded(pte_t 
> *pte,
>  }
>
>  static void __collapse_huge_page_copy_failed(pte_t *pte,
> -                                          pmd_t *pmd,
> -                                          pmd_t orig_pmd,
> -                                          struct vm_area_struct *vma,
> -                                          struct list_head 
> *compound_pagelist)
> +             pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
> +             unsigned int order, struct list_head *compound_pagelist)
>  {
>       spinlock_t *pmd_ptl;
> -
> +     const unsigned long nr_pages = 1UL << order;
>       /*
>        * Re-establish the PMD to point to the original page table
>        * entry. Restoring PMD needs to be done prior to releasing
> @@ -761,7 +765,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
>        * Release both raw and compound pages isolated
>        * in __collapse_huge_page_isolate.
>        */
> -     release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
> +     release_pte_pages(pte, pte + nr_pages, compound_pagelist);
>  }
>
>  /*
> @@ -781,16 +785,16 @@ static void __collapse_huge_page_copy_failed(pte_t *pte,
>   */
>  static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
>               pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
> -             unsigned long address, spinlock_t *ptl,
> +             unsigned long address, spinlock_t *ptl, unsigned int order,
>               struct list_head *compound_pagelist)
>  {
>       unsigned int i;
>       int result = SCAN_SUCCEED;
> -
> +     const unsigned long nr_pages = 1UL << order;
>       /*
>        * Copying pages' contents is subject to memory poison at any iteration.
>        */
> -     for (i = 0; i < HPAGE_PMD_NR; i++) {
> +     for (i = 0; i < nr_pages; i++) {
>               pte_t pteval = ptep_get(pte + i);
>               struct page *page = folio_page(folio, i);
>               unsigned long src_addr = address + i * PAGE_SIZE;
> @@ -809,10 +813,10 @@ static int __collapse_huge_page_copy(pte_t *pte, struct 
> folio *folio,
>
>       if (likely(result == SCAN_SUCCEED))
>               __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
> -                                                 compound_pagelist);
> +                                                 order, compound_pagelist);
>       else
>               __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
> -                                              compound_pagelist);
> +                                              order, compound_pagelist);
>
>       return result;
>  }
> @@ -985,13 +989,12 @@ static int check_pmd_still_valid(struct mm_struct *mm,
>   * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
>   */
>  static int __collapse_huge_page_swapin(struct mm_struct *mm,
> -                                    struct vm_area_struct *vma,
> -                                    unsigned long start_addr, pmd_t *pmd,
> -                                    int referenced)
> +             struct vm_area_struct *vma, unsigned long start_addr,
> +             pmd_t *pmd, int referenced, unsigned int order)

Nit, super nit really, but since other __collapse_huge_page_*() functions have
..., order, param) as their last parameters, perhaps worth flipping referenced +
order here?

Not a big deal though.

>  {
>       int swapped_in = 0;
>       vm_fault_t ret = 0;
> -     unsigned long addr, end = start_addr + (HPAGE_PMD_NR * PAGE_SIZE);
> +     unsigned long addr, end = start_addr + (PAGE_SIZE << order);
>       int result;
>       pte_t *pte = NULL;
>       spinlock_t *ptl;
> @@ -1022,6 +1025,19 @@ static int __collapse_huge_page_swapin(struct 
> mm_struct *mm,
>               if (!is_swap_pte(vmf.orig_pte))
>                       continue;
>
> +             /*
> +              * TODO: Support swapin without leading to further mTHP
> +              * collapses. Currently bringing in new pages via swapin may
> +              * cause a future higher order collapse on a rescan of the same
> +              * range.
> +              */

Same comment as above re: this, i.e. that it's a pity but probably unavoidable
for now.

> +             if (order != HPAGE_PMD_ORDER) {
> +                     pte_unmap(pte);
> +                     mmap_read_unlock(mm);
> +                     result = SCAN_EXCEED_SWAP_PTE;
> +                     goto out;
> +             }
> +
>               vmf.pte = pte;
>               vmf.ptl = ptl;
>               ret = do_swap_page(&vmf);
> @@ -1142,7 +1158,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>                * that case.  Continuing to collapse causes inconsistency.
>                */
>               result = __collapse_huge_page_swapin(mm, vma, address, pmd,
> -                                                  referenced);
> +                                                  referenced, 
> HPAGE_PMD_ORDER);
>               if (result != SCAN_SUCCEED)
>                       goto out_nolock;
>       }
> @@ -1190,6 +1206,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>       pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
>       if (pte) {
>               result = __collapse_huge_page_isolate(vma, address, pte, cc,
> +                                                   HPAGE_PMD_ORDER,
>                                                     &compound_pagelist);
>               spin_unlock(pte_ptl);
>       } else {
> @@ -1220,6 +1237,7 @@ static int collapse_huge_page(struct mm_struct *mm, 
> unsigned long address,
>
>       result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
>                                          vma, address, pte_ptl,
> +                                        HPAGE_PMD_ORDER,
>                                          &compound_pagelist);
>       pte_unmap(pte);
>       if (unlikely(result != SCAN_SUCCEED))
> --
> 2.51.0
>

Reply via email to