From: Davidlohr Bueso <d...@stgolabs.net>

The binder_alloc_free_page() shrinker callback can call
zap_page_range(), which needs mmap_sem. Use mm locking
wrappers, no change in semantics.

Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
---
 drivers/android/binder_alloc.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 5a426c877dfb..191724983638 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -194,6 +194,7 @@ static int binder_update_page_range(struct binder_alloc 
*alloc, int allocate,
        struct vm_area_struct *vma = NULL;
        struct mm_struct *mm = NULL;
        bool need_mm = false;
+       DEFINE_RANGE_LOCK_FULL(mmrange);
 
        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: %s pages %pK-%pK\n", alloc->pid,
@@ -219,7 +220,7 @@ static int binder_update_page_range(struct binder_alloc 
*alloc, int allocate,
                mm = alloc->vma_vm_mm;
 
        if (mm) {
-               down_write(&mm->mmap_sem);
+               mm_write_lock(mm, &mmrange);
                vma = alloc->vma;
        }
 
@@ -288,7 +289,7 @@ static int binder_update_page_range(struct binder_alloc 
*alloc, int allocate,
                /* vm_insert_page does not seem to increment the refcount */
        }
        if (mm) {
-               up_write(&mm->mmap_sem);
+               mm_write_unlock(mm, &mmrange);
                mmput(mm);
        }
        return 0;
@@ -321,7 +322,7 @@ static int binder_update_page_range(struct binder_alloc 
*alloc, int allocate,
        }
 err_no_vma:
        if (mm) {
-               up_write(&mm->mmap_sem);
+               mm_write_unlock(mm, &mmrange);
                mmput(mm);
        }
        return vma ? -ENOMEM : -ESRCH;
@@ -914,6 +915,7 @@ enum lru_status binder_alloc_free_page(struct list_head 
*item,
        uintptr_t page_addr;
        size_t index;
        struct vm_area_struct *vma;
+       DEFINE_RANGE_LOCK_FULL(mmrange);
 
        alloc = page->alloc;
        if (!mutex_trylock(&alloc->mutex))
@@ -929,7 +931,7 @@ enum lru_status binder_alloc_free_page(struct list_head 
*item,
                if (!mmget_not_zero(alloc->vma_vm_mm))
                        goto err_mmget;
                mm = alloc->vma_vm_mm;
-               if (!down_write_trylock(&mm->mmap_sem))
+               if (!mm_write_trylock(mm, &mmrange))
                        goto err_down_write_mmap_sem_failed;
        }
 
@@ -945,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head 
*item,
 
                trace_binder_unmap_user_end(alloc, index);
 
-               up_write(&mm->mmap_sem);
+               mm_write_unlock(mm, &mmrange);
                mmput(mm);
        }
 
-- 
2.13.6

Reply via email to