From: Shaohua Li <s...@fb.com>

Add API to enable/disable writeprotect a vma range. Unlike mprotect,
this doesn't split/merge vmas.

Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Rik van Riel <r...@redhat.com>
Cc: Kirill A. Shutemov <kir...@shutemov.name>
Cc: Mel Gorman <mgor...@suse.de>
Cc: Hugh Dickins <hu...@google.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Signed-off-by: Shaohua Li <s...@fb.com>
Signed-off-by: Andrea Arcangeli <aarca...@redhat.com>
[peterx:
 - use the helper to find VMA;
 - return -ENOENT if not found to match mcopy case;
 - use the new MM_CP_UFFD_WP* flags for change_protection
 - check against mmap_changing for failures]
Reviewed-by: Jerome Glisse <jgli...@redhat.com>
Reviewed-by: Mike Rapoport <r...@linux.vnet.ibm.com>
Signed-off-by: Peter Xu <pet...@redhat.com>
---
 include/linux/userfaultfd_k.h |  3 ++
 mm/userfaultfd.c              | 54 +++++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index dcd33172b728..a8e5f3ea9bb2 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -41,6 +41,9 @@ extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
                              unsigned long dst_start,
                              unsigned long len,
                              bool *mmap_changing);
+extern int mwriteprotect_range(struct mm_struct *dst_mm,
+                              unsigned long start, unsigned long len,
+                              bool enable_wp, bool *mmap_changing);
 
 /* mm helpers */
 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 6b9dd5b66f64..4208592c7ca3 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -638,3 +638,57 @@ ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned 
long start,
 {
        return __mcopy_atomic(dst_mm, start, 0, len, true, mmap_changing, 0);
 }
+
+int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start,
+                       unsigned long len, bool enable_wp, bool *mmap_changing)
+{
+       struct vm_area_struct *dst_vma;
+       pgprot_t newprot;
+       int err;
+
+       /*
+        * Sanitize the command parameters:
+        */
+       BUG_ON(start & ~PAGE_MASK);
+       BUG_ON(len & ~PAGE_MASK);
+
+       /* Does the address range wrap, or is the span zero-sized? */
+       BUG_ON(start + len <= start);
+
+       down_read(&dst_mm->mmap_sem);
+
+       /*
+        * If memory mappings are changing because of non-cooperative
+        * operation (e.g. mremap) running in parallel, bail out and
+        * request the user to retry later
+        */
+       err = -EAGAIN;
+       if (mmap_changing && READ_ONCE(*mmap_changing))
+               goto out_unlock;
+
+       err = -ENOENT;
+       dst_vma = vma_find_uffd(dst_mm, start, len);
+       /*
+        * Make sure the vma is not shared, that the dst range is
+        * both valid and fully within a single existing vma.
+        */
+       if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
+               goto out_unlock;
+       if (!userfaultfd_wp(dst_vma))
+               goto out_unlock;
+       if (!vma_is_anonymous(dst_vma))
+               goto out_unlock;
+
+       if (enable_wp)
+               newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
+       else
+               newprot = vm_get_page_prot(dst_vma->vm_flags);
+
+       change_protection(dst_vma, start, start + len, newprot,
+                         enable_wp ? MM_CP_UFFD_WP : MM_CP_UFFD_WP_RESOLVE);
+
+       err = 0;
+out_unlock:
+       up_read(&dst_mm->mmap_sem);
+       return err;
+}
-- 
2.21.0

Reply via email to