Rather than lumping everything together in do_mremap(), add a new helper
function, check_prep_vma(), to do the work relating to each VMA.

This further lays groundwork for subsequent patches which will allow for
batched VMA mremap().

Additionally, if we set vrm->new_addr == vrm->addr when prepping the VMA,
this avoids us needing to do so in the expand VMA mlocked case.

No functional change intended.

Signed-off-by: Lorenzo Stoakes <[email protected]>
Reviewed-by: Vlastimil Babka <[email protected]>
---
 mm/mremap.c | 58 ++++++++++++++++++++++++++---------------------------
 1 file changed, 28 insertions(+), 30 deletions(-)

diff --git a/mm/mremap.c b/mm/mremap.c
index a00da0288c37..d57645573e0d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1636,7 +1636,6 @@ static bool align_hugetlb(struct vma_remap_struct *vrm)
 static unsigned long expand_vma(struct vma_remap_struct *vrm)
 {
        unsigned long err;
-       unsigned long addr = vrm->addr;
 
        err = remap_is_valid(vrm);
        if (err)
@@ -1651,16 +1650,8 @@ static unsigned long expand_vma(struct vma_remap_struct 
*vrm)
                if (err)
                        return err;
 
-               /*
-                * We want to populate the newly expanded portion of the VMA to
-                * satisfy the expectation that mlock()'ing a VMA maintains all
-                * of its pages in memory.
-                */
-               if (vrm->mlocked)
-                       vrm->new_addr = addr;
-
                /* OK we're done! */
-               return addr;
+               return vrm->addr;
        }
 
        /*
@@ -1716,10 +1707,33 @@ static unsigned long mremap_at(struct vma_remap_struct 
*vrm)
        return -EINVAL;
 }
 
+static int check_prep_vma(struct vma_remap_struct *vrm)
+{
+       struct vm_area_struct *vma = vrm->vma;
+
+       if (!vma)
+               return -EFAULT;
+
+       /* If mseal()'d, mremap() is prohibited. */
+       if (!can_modify_vma(vma))
+               return -EPERM;
+
+       /* Align to hugetlb page size, if required. */
+       if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm))
+               return -EINVAL;
+
+       vrm_set_delta(vrm);
+       vrm->remap_type = vrm_remap_type(vrm);
+       /* For convenience, we set new_addr even if VMA won't move. */
+       if (!vrm_implies_new_addr(vrm))
+               vrm->new_addr = vrm->addr;
+
+       return 0;
+}
+
 static unsigned long do_mremap(struct vma_remap_struct *vrm)
 {
        struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
        unsigned long res;
 
        vrm->old_len = PAGE_ALIGN(vrm->old_len);
@@ -1733,26 +1747,10 @@ static unsigned long do_mremap(struct vma_remap_struct 
*vrm)
                return -EINTR;
        vrm->mmap_locked = true;
 
-       vma = vrm->vma = vma_lookup(mm, vrm->addr);
-       if (!vma) {
-               res = -EFAULT;
-               goto out;
-       }
-
-       /* If mseal()'d, mremap() is prohibited. */
-       if (!can_modify_vma(vma)) {
-               res = -EPERM;
-               goto out;
-       }
-
-       /* Align to hugetlb page size, if required. */
-       if (is_vm_hugetlb_page(vma) && !align_hugetlb(vrm)) {
-               res = -EINVAL;
+       vrm->vma = vma_lookup(current->mm, vrm->addr);
+       res = check_prep_vma(vrm);
+       if (res)
                goto out;
-       }
-
-       vrm_set_delta(vrm);
-       vrm->remap_type = vrm_remap_type(vrm);
 
        /* Actually execute mremap. */
        res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
-- 
2.50.0


Reply via email to