On Fri, Feb 27, 2026 at 09:08:39PM +0100, David Hildenbrand (Arm) wrote:
> __zap_vma_range() has two callers, whereby
> zap_page_range_single_batched() documents that the range must fit into
> the VMA range.
>
> So move adjusting the range to unmap_vmas() where it is actually
> required and add a safety check in __zap_vma_range() instead. In
> unmap_vmas(), we'd never expect to have empty ranges (otherwise, why
> have the vma in there in the first place).
>
> __zap_vma_range() will no longer be called with start == end, so
> cleanup the function a bit. While at it, simplify the overly long
> comment to its core message.
>
> We will no longer call uprobe_munmap() for start == end, which actually
> seems to be the right thing to do.
>
> Note that hugetlb_zap_begin()->...->adjust_range_if_pmd_sharing_possible()
> cannot result in the range exceeding the vma range.
>
> Signed-off-by: David Hildenbrand (Arm) <[email protected]>

LGTM, So:

Reviewed-by: Lorenzo Stoakes (Oracle) <[email protected]>

> ---
>  mm/memory.c | 58 +++++++++++++++++++++--------------------------------
>  1 file changed, 23 insertions(+), 35 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index f0aaec57a66b..fdcd2abf29c2 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -2073,44 +2073,28 @@ static void unmap_page_range(struct mmu_gather *tlb, 
> struct vm_area_struct *vma,
>       tlb_end_vma(tlb, vma);
>  }
>
> -
> -static void __zap_vma_range(struct mmu_gather *tlb,
> -             struct vm_area_struct *vma, unsigned long start_addr,
> -             unsigned long end_addr, struct zap_details *details)
> +static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct 
> *vma,
> +             unsigned long start, unsigned long end,
> +             struct zap_details *details)
>  {
> -     unsigned long start = max(vma->vm_start, start_addr);
> -     unsigned long end;
> -
> -     if (start >= vma->vm_end)
> -             return;
> -     end = min(vma->vm_end, end_addr);
> -     if (end <= vma->vm_start)
> -             return;
> +     VM_WARN_ON_ONCE(start >= end || !range_in_vma(vma, start, end));
>
>       if (vma->vm_file)
>               uprobe_munmap(vma, start, end);
>
> -     if (start != end) {
> -             if (unlikely(is_vm_hugetlb_page(vma))) {
> -                     /*
> -                      * It is undesirable to test vma->vm_file as it
> -                      * should be non-null for valid hugetlb area.
> -                      * However, vm_file will be NULL in the error
> -                      * cleanup path of mmap_region. When
> -                      * hugetlbfs ->mmap method fails,
> -                      * mmap_region() nullifies vma->vm_file
> -                      * before calling this function to clean up.
> -                      * Since no pte has actually been setup, it is
> -                      * safe to do nothing in this case.
> -                      */
> -                     if (vma->vm_file) {
> -                             zap_flags_t zap_flags = details ?
> -                                 details->zap_flags : 0;
> -                             __unmap_hugepage_range(tlb, vma, start, end,
> -                                                          NULL, zap_flags);
> -                     }
> -             } else
> -                     unmap_page_range(tlb, vma, start, end, details);
> +     if (unlikely(is_vm_hugetlb_page(vma))) {
> +             zap_flags_t zap_flags = details ? details->zap_flags : 0;
> +
> +             /*
> +              * vm_file will be NULL when we fail early while instantiating
> +              * a new mapping. In this case, no pages were mapped yet and
> +              * there is nothing to do.
> +              */
> +             if (!vma->vm_file)
> +                     return;
> +             __unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags);
> +     } else {
> +             unmap_page_range(tlb, vma, start, end, details);
>       }
>  }
>
> @@ -2174,8 +2158,9 @@ void unmap_vmas(struct mmu_gather *tlb, struct 
> unmap_desc *unmap)
>                               unmap->vma_start, unmap->vma_end);
>       mmu_notifier_invalidate_range_start(&range);
>       do {
> -             unsigned long start = unmap->vma_start;
> -             unsigned long end = unmap->vma_end;
> +             unsigned long start = max(vma->vm_start, unmap->vma_start);
> +             unsigned long end = min(vma->vm_end, unmap->vma_end);
> +
>               hugetlb_zap_begin(vma, &start, &end);
>               __zap_vma_range(tlb, vma, start, end, &details);
>               hugetlb_zap_end(vma, &details);
> @@ -2204,6 +2189,9 @@ void zap_page_range_single_batched(struct mmu_gather 
> *tlb,
>
>       VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm);
>
> +     if (unlikely(!size))
> +             return;
> +
>       mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
>                               address, end);
>       hugetlb_zap_begin(vma, &range.start, &range.end);
> --
> 2.43.0
>

Reply via email to