mremap() is modifying the VMA layout and thus must be protected against the speculative page fault handler.
Signed-off-by: Laurent Dufour <[email protected]> --- mm/mremap.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/mremap.c b/mm/mremap.c index cd8a1b199ef9..9c7f69c9e80f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -300,6 +300,10 @@ static unsigned long move_vma(struct vm_area_struct *vma, if (!new_vma) return -ENOMEM; + write_seqcount_begin(&vma->vm_sequence); + write_seqcount_begin_nested(&new_vma->vm_sequence, + SINGLE_DEPTH_NESTING); + moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, need_rmap_locks); if (moved_len < old_len) { @@ -316,6 +320,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, */ move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, true); + write_seqcount_end(&vma->vm_sequence); vma = new_vma; old_len = new_len; old_addr = new_addr; @@ -324,7 +329,9 @@ static unsigned long move_vma(struct vm_area_struct *vma, mremap_userfaultfd_prep(new_vma, uf); arch_remap(mm, old_addr, old_addr + old_len, new_addr, new_addr + new_len); + write_seqcount_end(&vma->vm_sequence); } + write_seqcount_end(&new_vma->vm_sequence); /* Conceal VM_ACCOUNT so old reservation is not undone */ if (vm_flags & VM_ACCOUNT) { -- 2.7.4

