Utilize per-vma locks to stabilize vma after lookup without taking
mmap_lock during PROCMAP_QUERY ioctl execution. While we might take
mmap_lock for reading during contention, we do that momentarily only
to lock the vma.
This change is designed to reduce mmap_lock contention and prevent
PROCMAP_QUERY ioctl calls from blocking address space updates.

Signed-off-by: Suren Baghdasaryan <sur...@google.com>
---
 fs/proc/task_mmu.c | 56 ++++++++++++++++++++++++++++++++++++----------
 1 file changed, 44 insertions(+), 12 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 36d883c4f394..93ba35a84975 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -550,28 +550,60 @@ static int pid_maps_open(struct inode *inode, struct file 
*file)
                PROCMAP_QUERY_VMA_FLAGS                         \
 )
 
-static int query_vma_setup(struct mm_struct *mm)
+#ifdef CONFIG_PER_VMA_LOCK
+
+static int query_vma_setup(struct proc_maps_private *priv)
 {
-       return mmap_read_lock_killable(mm);
+       rcu_read_lock();
+       priv->locked_vma = NULL;
+       priv->mmap_locked = false;
+
+       return 0;
 }
 
-static void query_vma_teardown(struct mm_struct *mm, struct vm_area_struct 
*vma)
+static void query_vma_teardown(struct proc_maps_private *priv)
 {
-       mmap_read_unlock(mm);
+       unlock_vma(priv);
+       rcu_read_unlock();
+}
+
+static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private 
*priv,
+                                                    unsigned long addr)
+{
+       vma_iter_init(&priv->iter, priv->mm, addr);
+       return get_next_vma(priv, addr);
+}
+
+#else /* CONFIG_PER_VMA_LOCK */
+
+static int query_vma_setup(struct proc_maps_private *priv)
+{
+       return mmap_read_lock_killable(priv->mm);
+}
+
+static void query_vma_teardown(struct proc_maps_private *priv)
+{
+       mmap_read_unlock(priv->mm);
 }
 
-static struct vm_area_struct *query_vma_find_by_addr(struct mm_struct *mm, 
unsigned long addr)
+static struct vm_area_struct *query_vma_find_by_addr(struct proc_maps_private 
*priv,
+                                                    unsigned long addr)
 {
-       return find_vma(mm, addr);
+       return find_vma(priv->mm, addr);
 }
 
-static struct vm_area_struct *query_matching_vma(struct mm_struct *mm,
+#endif  /* CONFIG_PER_VMA_LOCK */
+
+static struct vm_area_struct *query_matching_vma(struct proc_maps_private 
*priv,
                                                 unsigned long addr, u32 flags)
 {
        struct vm_area_struct *vma;
 
 next_vma:
-       vma = query_vma_find_by_addr(mm, addr);
+       vma = query_vma_find_by_addr(priv, addr);
+       if (IS_ERR(vma))
+               return vma;
+
        if (!vma)
                goto no_vma;
 
@@ -647,13 +679,13 @@ static int do_procmap_query(struct proc_maps_private 
*priv, void __user *uarg)
        if (!mm || !mmget_not_zero(mm))
                return -ESRCH;
 
-       err = query_vma_setup(mm);
+       err = query_vma_setup(priv);
        if (err) {
                mmput(mm);
                return err;
        }
 
-       vma = query_matching_vma(mm, karg.query_addr, karg.query_flags);
+       vma = query_matching_vma(priv, karg.query_addr, karg.query_flags);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                vma = NULL;
@@ -738,7 +770,7 @@ static int do_procmap_query(struct proc_maps_private *priv, 
void __user *uarg)
        }
 
        /* unlock vma or mmap_lock, and put mm_struct before copying data to 
user */
-       query_vma_teardown(mm, vma);
+       query_vma_teardown(priv);
        mmput(mm);
 
        if (karg.vma_name_size && 
copy_to_user(u64_to_user_ptr(karg.vma_name_addr),
@@ -758,7 +790,7 @@ static int do_procmap_query(struct proc_maps_private *priv, 
void __user *uarg)
        return 0;
 
 out:
-       query_vma_teardown(mm, vma);
+       query_vma_teardown(priv);
        mmput(mm);
        kfree(name_buf);
        return err;
-- 
2.49.0.1266.g31b7d2e469-goog


Reply via email to