On Tue, 19 May 2020 22:29:01 -0700 Michel Lespinasse <[email protected]> wrote:

> Convert the last few remaining mmap_sem rwsem calls to use the new
> mmap locking API. These were missed by coccinelle for some reason
> (I think coccinelle does not support some of the preprocessor
> constructs in these files ?)


From: Andrew Morton <[email protected]>
Subject: mmap-locking-api-convert-mmap_sem-call-sites-missed-by-coccinelle-fix

convert linux-next leftovers

Cc: Michel Lespinasse <[email protected]>
Cc: Daniel Jordan <[email protected]>
Cc: Laurent Dufour <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Jason Gunthorpe <[email protected]>
Cc: Jerome Glisse <[email protected]>
Cc: John Hubbard <[email protected]>
Cc: Liam Howlett <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ying Han <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
---

 arch/arm64/kvm/mmu.c |   14 +++++++-------
 lib/test_hmm.c       |   14 +++++++-------
 2 files changed, 14 insertions(+), 14 deletions(-)

--- 
a/lib/test_hmm.c~mmap-locking-api-convert-mmap_sem-call-sites-missed-by-coccinelle-fix
+++ a/lib/test_hmm.c
@@ -243,9 +243,9 @@ static int dmirror_range_fault(struct dm
                }
 
                range->notifier_seq = mmu_interval_read_begin(range->notifier);
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        if (ret == -EBUSY)
                                continue;
@@ -684,7 +684,7 @@ static int dmirror_migrate(struct dmirro
        if (!mmget_not_zero(mm))
                return -EINVAL;
 
-       down_read(&mm->mmap_sem);
+       mmap_read_lock(mm);
        for (addr = start; addr < end; addr = next) {
                vma = find_vma(mm, addr);
                if (!vma || addr < vma->vm_start ||
@@ -711,7 +711,7 @@ static int dmirror_migrate(struct dmirro
                dmirror_migrate_finalize_and_map(&args, dmirror);
                migrate_vma_finalize(&args);
        }
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
 
        /* Return the migrated data for verification. */
@@ -731,7 +731,7 @@ static int dmirror_migrate(struct dmirro
        return ret;
 
 out:
-       up_read(&mm->mmap_sem);
+       mmap_read_unlock(mm);
        mmput(mm);
        return ret;
 }
@@ -823,9 +823,9 @@ static int dmirror_range_snapshot(struct
 
                range->notifier_seq = mmu_interval_read_begin(range->notifier);
 
-               down_read(&mm->mmap_sem);
+               mmap_read_lock(mm);
                ret = hmm_range_fault(range);
-               up_read(&mm->mmap_sem);
+               mmap_read_unlock(mm);
                if (ret) {
                        if (ret == -EBUSY)
                                continue;
--- 
a/arch/arm64/kvm/mmu.c~mmap-locking-api-convert-mmap_sem-call-sites-missed-by-coccinelle-fix
+++ a/arch/arm64/kvm/mmu.c
@@ -1084,7 +1084,7 @@ void stage2_unmap_vm(struct kvm *kvm)
        int idx;
 
        idx = srcu_read_lock(&kvm->srcu);
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        spin_lock(&kvm->mmu_lock);
 
        slots = kvm_memslots(kvm);
@@ -1092,7 +1092,7 @@ void stage2_unmap_vm(struct kvm *kvm)
                stage2_unmap_memslot(kvm, memslot);
 
        spin_unlock(&kvm->mmu_lock);
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -1848,11 +1848,11 @@ static int user_mem_abort(struct kvm_vcp
        }
 
        /* Let's check if we will get back a huge page backed by hugetlbfs */
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        vma = find_vma_intersection(current->mm, hva, hva + 1);
        if (unlikely(!vma)) {
                kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
-               up_read(&current->mm->mmap_sem);
+               mmap_read_unlock(current->mm);
                return -EFAULT;
        }
 
@@ -1879,7 +1879,7 @@ static int user_mem_abort(struct kvm_vcp
        if (vma_pagesize == PMD_SIZE ||
            (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
                gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> 
PAGE_SHIFT;
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
 
        /* We need minimum second+third level pages */
        ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
@@ -2456,7 +2456,7 @@ int kvm_arch_prepare_memory_region(struc
            (kvm_phys_size(kvm) >> PAGE_SHIFT))
                return -EFAULT;
 
-       down_read(&current->mm->mmap_sem);
+       mmap_read_lock(current->mm);
        /*
         * A memory region could potentially cover multiple VMAs, and any holes
         * between them, so iterate over all of them to find out if we can map
@@ -2515,7 +2515,7 @@ int kvm_arch_prepare_memory_region(struc
                stage2_flush_memslot(kvm, memslot);
        spin_unlock(&kvm->mmu_lock);
 out:
-       up_read(&current->mm->mmap_sem);
+       mmap_read_unlock(current->mm);
        return ret;
 }
 
_

Reply via email to