Map vmx cpu to the KVM address space when a vmx cpu is created, and
unmap when it is freed.

Signed-off-by: Alexandre Chartre <alexandre.char...@oracle.com>
---
 arch/x86/kvm/vmx/vmx.c |   65 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 65 insertions(+), 0 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5b52e8c..cbbaf58 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6564,10 +6564,69 @@ static void vmx_vm_free(struct kvm *kvm)
        vfree(to_kvm_vmx(kvm));
 }
 
+static void vmx_unmap_vcpu(struct vcpu_vmx *vmx)
+{
+       pr_debug("unmapping vmx %p", vmx);
+
+       kvm_clear_range_mapping(vmx);
+       if (enable_pml)
+               kvm_clear_range_mapping(vmx->pml_pg);
+       kvm_clear_range_mapping(vmx->guest_msrs);
+       kvm_clear_range_mapping(vmx->vmcs01.vmcs);
+       kvm_clear_range_mapping(vmx->vmcs01.msr_bitmap);
+       kvm_clear_range_mapping(vmx->vcpu.arch.pio_data);
+       kvm_clear_range_mapping(vmx->vcpu.arch.apic);
+}
+
+static int vmx_map_vcpu(struct vcpu_vmx *vmx)
+{
+       int rv;
+
+       pr_debug("mapping vmx %p", vmx);
+
+       rv = kvm_copy_ptes(vmx, sizeof(struct vcpu_vmx));
+       if (rv)
+               goto out_unmap_vcpu;
+
+       if (enable_pml) {
+               rv = kvm_copy_ptes(vmx->pml_pg, PAGE_SIZE);
+               if (rv)
+                       goto out_unmap_vcpu;
+       }
+
+       rv = kvm_copy_ptes(vmx->guest_msrs, PAGE_SIZE);
+       if (rv)
+               goto out_unmap_vcpu;
+
+       rv = kvm_copy_ptes(vmx->vmcs01.vmcs, PAGE_SIZE << vmcs_config.order);
+       if (rv)
+               goto out_unmap_vcpu;
+
+       rv = kvm_copy_ptes(vmx->vmcs01.msr_bitmap, PAGE_SIZE);
+       if (rv)
+               goto out_unmap_vcpu;
+
+       rv = kvm_copy_ptes(vmx->vcpu.arch.pio_data, PAGE_SIZE);
+       if (rv)
+               goto out_unmap_vcpu;
+
+       rv = kvm_copy_ptes(vmx->vcpu.arch.apic, sizeof(struct kvm_lapic));
+       if (rv)
+               goto out_unmap_vcpu;
+
+       return 0;
+
+out_unmap_vcpu:
+       vmx_unmap_vcpu(vmx);
+       return rv;
+}
+
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (kvm_isolation())
+               vmx_unmap_vcpu(vmx);
        if (enable_pml)
                vmx_destroy_pml_buffer(vmx);
        free_vpid(vmx->vpid);
@@ -6679,6 +6738,12 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 
        vmx->ept_pointer = INVALID_PAGE;
 
+       if (kvm_isolation()) {
+               err = vmx_map_vcpu(vmx);
+               if (err)
+                       goto free_vmcs;
+       }
+
        return &vmx->vcpu;
 
 free_vmcs:
-- 
1.7.1

Reply via email to