Write-locking VMAs before isolating them ensures that page fault
handlers don't operate on isolated VMAs.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
 mm/mmap.c  | 1 +
 mm/nommu.c | 5 +++++
 2 files changed, 6 insertions(+)

diff --git a/mm/mmap.c b/mm/mmap.c
index b3c247073aa0..5bdfd087b632 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2261,6 +2261,7 @@ int split_vma(struct vma_iterator *vmi, struct 
vm_area_struct *vma,
 static inline int munmap_sidetree(struct vm_area_struct *vma,
                                   struct ma_state *mas_detach)
 {
+       vma_start_write(vma);
        mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
        if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
                return -ENOMEM;
diff --git a/mm/nommu.c b/mm/nommu.c
index 57ba243c6a37..2ab162d773e2 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -588,6 +588,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma)
                       current->pid);
                return -ENOMEM;
        }
+       vma_start_write(vma);
        cleanup_vma_from_mm(vma);
 
        /* remove from the MM's tree and list */
@@ -1519,6 +1520,10 @@ void exit_mmap(struct mm_struct *mm)
         */
        mmap_write_lock(mm);
        for_each_vma(vmi, vma) {
+               /*
+                * No need to lock VMA because this is the only mm user and no
+                * page fault handled can race with it.
+                */
                cleanup_vma_from_mm(vma);
                delete_vma(mm, vma);
                cond_resched();
-- 
2.39.1

Reply via email to