From: Davidlohr Bueso <d...@stgolabs.net>

... and use mm locking wrappers -- no change is semantics.

Signed-off-by: Davidlohr Bueso <dbu...@suse.de>
---
 kernel/exit.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/kernel/exit.c b/kernel/exit.c
index 42ca71a44c9a..a9540f157eb2 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -495,6 +495,7 @@ static void exit_mm(void)
 {
        struct mm_struct *mm = current->mm;
        struct core_state *core_state;
+       DEFINE_RANGE_LOCK_FULL(mmrange);
 
        mm_release(current, mm);
        if (!mm)
@@ -507,12 +508,12 @@ static void exit_mm(void)
         * will increment ->nr_threads for each thread in the
         * group with ->mm != NULL.
         */
-       down_read(&mm->mmap_sem);
+        mm_read_lock(mm, &mmrange);
        core_state = mm->core_state;
        if (core_state) {
                struct core_thread self;
 
-               up_read(&mm->mmap_sem);
+               mm_read_unlock(mm, &mmrange);
 
                self.task = current;
                self.next = xchg(&core_state->dumper.next, &self);
@@ -530,14 +531,14 @@ static void exit_mm(void)
                        freezable_schedule();
                }
                __set_current_state(TASK_RUNNING);
-               down_read(&mm->mmap_sem);
+               mm_read_lock(mm, &mmrange);
        }
        mmgrab(mm);
        BUG_ON(mm != current->active_mm);
        /* more a memory barrier than a real lock */
        task_lock(current);
        current->mm = NULL;
-       up_read(&mm->mmap_sem);
+       mm_read_unlock(mm, &mmrange);
        enter_lazy_tlb(mm, current);
        task_unlock(current);
        mm_update_next_owner(mm);
-- 
2.13.6

Reply via email to