Given that in kvm_create_vm() there is:
kvm->mm = current->mm;

And that on every kvm_*_ioctl we have:
if (kvm->mm != current->mm)
        return -EIO;

I see no reason to keep using current->mm instead of kvm->mm.

By doing so, we would reduce the use of 'global' variables on code, relying
more in the contents of kvm struct.

Signed-off-by: Leonardo Bras <leona...@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_64_mmu_hv.c |  4 ++--
 arch/powerpc/kvm/book3s_64_vio.c    | 10 ++++++----
 arch/powerpc/kvm/book3s_hv.c        | 10 +++++-----
 3 files changed, 13 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index d381526c5c9b..6c372f5c61b6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -284,7 +284,7 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, 
unsigned long flags,
        /* Protect linux PTE lookup from page table destruction */
        rcu_read_lock_sched();  /* this disables preemption too */
        ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
-                               current->mm->pgd, false, pte_idx_ret);
+                               kvm->mm->pgd, false, pte_idx_ret);
        rcu_read_unlock_sched();
        if (ret == H_TOO_HARD) {
                /* this can't happen */
@@ -573,7 +573,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
        is_ci = false;
        pfn = 0;
        page = NULL;
-       mm = current->mm;
+       mm = kvm->mm;
        pte_size = PAGE_SIZE;
        writing = (dsisr & DSISR_ISSTORE) != 0;
        /* If writing != 0, then the HPTE must allow writing, if we get here */
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 883a66e76638..ee6c103bb7d5 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -253,10 +253,11 @@ static int kvm_spapr_tce_release(struct inode *inode, 
struct file *filp)
                }
        }
 
+       account_locked_vm(kvm->mm,
+               kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
+
        kvm_put_kvm(stt->kvm);
 
-       account_locked_vm(current->mm,
-               kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
        call_rcu(&stt->rcu, release_spapr_tce_table);
 
        return 0;
@@ -272,6 +273,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 {
        struct kvmppc_spapr_tce_table *stt = NULL;
        struct kvmppc_spapr_tce_table *siter;
+       struct mm_struct *mm = kvm->mm;
        unsigned long npages, size = args->size;
        int ret = -ENOMEM;
 
@@ -280,7 +282,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                return -EINVAL;
 
        npages = kvmppc_tce_pages(size);
-       ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
+       ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
        if (ret)
                return ret;
 
@@ -326,7 +328,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 
        kfree(stt);
  fail_acct:
-       account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
+       account_locked_vm(mm, kvmppc_stt_pages(npages), false);
        return ret;
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ec5c0379296a..d3baa23396e6 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4263,7 +4263,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct 
kvm_vcpu *vcpu)
        user_vrsave = mfspr(SPRN_VRSAVE);
 
        vcpu->arch.wqp = &vcpu->arch.vcore->wq;
-       vcpu->arch.pgdir = current->mm->pgd;
+       vcpu->arch.pgdir = kvm->mm->pgd;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
@@ -4595,14 +4595,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu 
*vcpu)
 
        /* Look up the VMA for the start of this memory slot */
        hva = memslot->userspace_addr;
-       down_read(&current->mm->mmap_sem);
-       vma = find_vma(current->mm, hva);
+       down_read(&kvm->mm->mmap_sem);
+       vma = find_vma(kvm->mm, hva);
        if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
                goto up_out;
 
        psize = vma_kernel_pagesize(vma);
 
-       up_read(&current->mm->mmap_sem);
+       up_read(&kvm->mm->mmap_sem);
 
        /* We can handle 4k, 64k or 16M pages in the VRMA */
        if (psize >= 0x1000000)
@@ -4635,7 +4635,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
        return err;
 
  up_out:
-       up_read(&current->mm->mmap_sem);
+       up_read(&kvm->mm->mmap_sem);
        goto out_srcu;
 }
 
-- 
2.23.0

Reply via email to