This patch implements the VMPTRLD instruction.

Signed-off-by: Nadav Har'El <n...@il.ibm.com>
---
 arch/x86/kvm/vmx.c |   64 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 63 insertions(+), 1 deletion(-)

--- .before/arch/x86/kvm/vmx.c  2011-01-26 18:06:04.000000000 +0200
+++ .after/arch/x86/kvm/vmx.c   2011-01-26 18:06:04.000000000 +0200
@@ -4643,6 +4643,68 @@ static int handle_vmclear(struct kvm_vcp
        return 1;
 }
 
+/* Emulate the VMPTRLD instruction */
+static int handle_vmptrld(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       gva_t gva;
+       gpa_t vmcs12_addr;
+
+       if (!nested_vmx_check_permission(vcpu))
+               return 1;
+
+       if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+                       vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+               return 1;
+
+       if (kvm_read_guest_virt(gva, &vmcs12_addr, sizeof(vmcs12_addr),
+                               vcpu, NULL)) {
+               kvm_queue_exception(vcpu, PF_VECTOR);
+               return 1;
+       }
+
+       if (!IS_ALIGNED(vmcs12_addr, PAGE_SIZE)) {
+               nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
+               skip_emulated_instruction(vcpu);
+               return 1;
+       }
+
+       if (vmx->nested.current_vmptr != vmcs12_addr) {
+               struct vmcs12 *new_vmcs12;
+               struct page *page;
+               page = nested_get_page(vcpu, vmcs12_addr);
+               if (page == NULL) {
+                       nested_vmx_failInvalid(vcpu);
+                       skip_emulated_instruction(vcpu);
+                       return 1;
+               }
+               new_vmcs12 = kmap(page);
+               if (new_vmcs12->revision_id != VMCS12_REVISION) {
+                       kunmap(page);
+                       nested_release_page_clean(page);
+                       nested_vmx_failValid(vcpu,
+                               VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+                       skip_emulated_instruction(vcpu);
+                       return 1;
+               }
+               if (vmx->nested.current_vmptr != -1ull) {
+                       kunmap(vmx->nested.current_vmcs12_page);
+                       nested_release_page(vmx->nested.current_vmcs12_page);
+               }
+
+               vmx->nested.current_vmptr = vmcs12_addr;
+               vmx->nested.current_vmcs12 = new_vmcs12;
+               vmx->nested.current_vmcs12_page = page;
+
+               if (nested_create_current_vmcs(vcpu))
+                       return -ENOMEM;
+       }
+
+       nested_vmx_succeed(vcpu);
+       skip_emulated_instruction(vcpu);
+       return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -4666,7 +4728,7 @@ static int (*kvm_vmx_exit_handlers[])(st
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
        [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
        [EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
-       [EXIT_REASON_VMPTRLD]                 = handle_vmx_insn,
+       [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
        [EXIT_REASON_VMPTRST]                 = handle_vmx_insn,
        [EXIT_REASON_VMREAD]                  = handle_vmx_insn,
        [EXIT_REASON_VMRESUME]                = handle_vmx_insn,
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to