This splits kvmppc_core_vcpu_create_hv() into three functions and
adds a new kvmppc_free_vcores() to free the kvmppc_vcore structures
that we allocate for a guest, which are currently being leaked.
The reason for the split is to make the split-out code available
for later use in converting PR kvm_vcpu structs to HV use.

Signed-off-by: Paul Mackerras <pau...@samba.org>
---
 arch/powerpc/kvm/book3s_hv.c | 95 +++++++++++++++++++++++++++-----------------
 1 file changed, 59 insertions(+), 36 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 13f79dd..c524d6b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -891,32 +891,51 @@ int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
        return r;
 }
 
-struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, unsigned int id)
+static int kvmppc_alloc_vcore(struct kvm_vcpu *vcpu, unsigned int id)
 {
-       struct kvm_vcpu *vcpu;
-       int err = -EINVAL;
-       int core;
+       struct kvm *kvm = vcpu->kvm;
        struct kvmppc_vcore *vcore;
+       int core;
 
        core = id / threads_per_core;
        if (core >= KVM_MAX_VCORES)
-               goto out;
+               return -EINVAL;
 
-       err = -ENOMEM;
-       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
-       if (!vcpu)
-               goto out;
+       vcore = kvm->arch.vcores[core];
+       if (!vcore) {
+               vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
+               if (!vcore)
+                       return -ENOMEM;
+               INIT_LIST_HEAD(&vcore->runnable_threads);
+               spin_lock_init(&vcore->lock);
+               init_waitqueue_head(&vcore->wq);
+               vcore->preempt_tb = TB_NIL;
+               kvm->arch.vcores[core] = vcore;
+               kvm->arch.online_vcores++;
+       }
 
-       err = kvm_vcpu_init(vcpu, kvm, id);
-       if (err)
-               goto free_vcpu;
+       spin_lock(&vcore->lock);
+       ++vcore->num_threads;
+       spin_unlock(&vcore->lock);
+       vcpu->arch.vcore = vcore;
+
+       return 0;
+}
 
+static void kvmppc_free_vcores(struct kvm *kvm)
+{
+       long int i;
+
+       for (i = 0; i < KVM_MAX_VCORES; ++i)
+               kfree(kvm->arch.vcores[i]);
+       kvm->arch.online_vcores = 0;
+}
+
+static void kvmppc_setup_hv_vcpu(struct kvm_vcpu *vcpu)
+{
        vcpu->arch.shared = &vcpu->arch.shregs;
        vcpu->arch.mmcr[0] = MMCR0_FC;
        vcpu->arch.ctrl = CTRL_RUNLATCH;
-       /* default to host PVR, since we can't spoof it */
-       vcpu->arch.pvr = mfspr(SPRN_PVR);
-       kvmppc_set_pvr_hv(vcpu, vcpu->arch.pvr);
        spin_lock_init(&vcpu->arch.vpa_update_lock);
        spin_lock_init(&vcpu->arch.tbacct_lock);
        vcpu->arch.busy_preempt = TB_NIL;
@@ -927,31 +946,34 @@ struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm 
*kvm, unsigned int id)
 
        init_waitqueue_head(&vcpu->arch.cpu_run);
 
-       mutex_lock(&kvm->lock);
-       vcore = kvm->arch.vcores[core];
-       if (!vcore) {
-               vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
-               if (vcore) {
-                       INIT_LIST_HEAD(&vcore->runnable_threads);
-                       spin_lock_init(&vcore->lock);
-                       init_waitqueue_head(&vcore->wq);
-                       vcore->preempt_tb = TB_NIL;
-               }
-               kvm->arch.vcores[core] = vcore;
-               kvm->arch.online_vcores++;
-       }
-       mutex_unlock(&kvm->lock);
+       vcpu->arch.cpu_type = KVM_CPU_3S_64;
+       kvmppc_sanity_check(vcpu);
+}
 
-       if (!vcore)
+struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, unsigned int id)
+{
+       struct kvm_vcpu *vcpu;
+       int err = -EINVAL;
+
+       err = -ENOMEM;
+       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!vcpu)
+               goto out;
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+       if (err)
                goto free_vcpu;
 
-       spin_lock(&vcore->lock);
-       ++vcore->num_threads;
-       spin_unlock(&vcore->lock);
-       vcpu->arch.vcore = vcore;
+       /* default to host PVR, since we can't spoof it */
+       vcpu->arch.pvr = mfspr(SPRN_PVR);
 
-       vcpu->arch.cpu_type = KVM_CPU_3S_64;
-       kvmppc_sanity_check(vcpu);
+       mutex_lock(&kvm->lock);
+       err = kvmppc_alloc_vcore(vcpu, id);
+       mutex_unlock(&kvm->lock);
+       if (err)
+               goto free_vcpu;
+
+       kvmppc_setup_hv_vcpu(vcpu);
 
        return vcpu;
 
@@ -1890,6 +1912,7 @@ void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
 {
        uninhibit_secondary_onlining();
 
+       kvmppc_free_vcores(kvm);
        if (kvm->arch.rma) {
                kvm_release_rma(kvm->arch.rma);
                kvm->arch.rma = NULL;
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to