OK I'll stop reporting the merge conflicts from the that series but yeah here too I guess it kills a whole bunch :)
On Mon, Dec 01, 2025 at 10:46:14AM -0700, Nico Pache wrote: > For khugepaged to support different mTHP orders, we must generalize this > to check if the PMD is not shared by another VMA and that the order is > enabled. > > No functional change in this patch. Also correct a comment about the > functionality of the revalidation. > > Reviewed-by: Wei Yang <[email protected]> > Reviewed-by: Lance Yang <[email protected]> > Reviewed-by: Baolin Wang <[email protected]> > Reviewed-by: Lorenzo Stoakes <[email protected]> > Acked-by: David Hildenbrand <[email protected]> > Co-developed-by: Dev Jain <[email protected]> > Signed-off-by: Dev Jain <[email protected]> > Signed-off-by: Nico Pache <[email protected]> > --- > mm/khugepaged.c | 20 +++++++++++--------- > 1 file changed, 11 insertions(+), 9 deletions(-) > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index 433ea7283488..69fc6b41f010 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -892,14 +892,13 @@ static int collapse_find_target_node(struct > collapse_control *cc) > > /* > * If mmap_lock temporarily dropped, revalidate vma > - * before taking mmap_lock. > + * after taking the mmap_lock again. > * Returns enum scan_result value. > */ > > static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long > address, > - bool expect_anon, > - struct vm_area_struct **vmap, > - struct collapse_control *cc) > + bool expect_anon, struct vm_area_struct **vmap, > + struct collapse_control *cc, unsigned int order) > { > struct vm_area_struct *vma; > enum tva_type type = cc->is_khugepaged ? TVA_KHUGEPAGED : > @@ -912,15 +911,16 @@ static int hugepage_vma_revalidate(struct mm_struct > *mm, unsigned long address, > if (!vma) > return SCAN_VMA_NULL; > > + /* Always check the PMD order to ensure its not shared by another VMA */ > if (!thp_vma_suitable_order(vma, address, PMD_ORDER)) > return SCAN_ADDRESS_RANGE; > - if (!thp_vma_allowable_order(vma, vma->vm_flags, type, PMD_ORDER)) > + if (!thp_vma_allowable_orders(vma, vma->vm_flags, type, BIT(order))) > return SCAN_VMA_CHECK; > /* > * Anon VMA expected, the address may be unmapped then > * remapped to file after khugepaged reaquired the mmap_lock. > * > - * thp_vma_allowable_order may return true for qualified file > + * thp_vma_allowable_orders may return true for qualified file > * vmas. > */ > if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) > @@ -1117,7 +1117,8 @@ static int collapse_huge_page(struct mm_struct *mm, > unsigned long address, > goto out_nolock; > > mmap_read_lock(mm); > - result = hugepage_vma_revalidate(mm, address, true, &vma, cc); > + result = hugepage_vma_revalidate(mm, address, true, &vma, cc, > + HPAGE_PMD_ORDER); > if (result != SCAN_SUCCEED) { > mmap_read_unlock(mm); > goto out_nolock; > @@ -1151,7 +1152,8 @@ static int collapse_huge_page(struct mm_struct *mm, > unsigned long address, > * mmap_lock. > */ > mmap_write_lock(mm); > - result = hugepage_vma_revalidate(mm, address, true, &vma, cc); > + result = hugepage_vma_revalidate(mm, address, true, &vma, cc, > + HPAGE_PMD_ORDER); > if (result != SCAN_SUCCEED) > goto out_up_write; > /* check if the pmd is still valid */ > @@ -2814,7 +2816,7 @@ int madvise_collapse(struct vm_area_struct *vma, > unsigned long start, > mmap_read_lock(mm); > mmap_locked = true; > result = hugepage_vma_revalidate(mm, addr, false, &vma, > - cc); > + cc, HPAGE_PMD_ORDER); > if (result != SCAN_SUCCEED) { > last_fail = result; > goto out_nolock; > -- > 2.51.1 >
