This patch add code to check for any CRx read, write, and
the selective-cr0 intercepts for instructions emulated in
software by KVM.

Signed-off-by: Joerg Roedel <joerg.roe...@amd.com>
---
 arch/x86/kvm/svm.c |   73 +++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 72 insertions(+), 1 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d1721c2..29f0491 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3622,7 +3622,78 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
 static int svm_insn_intercepted(struct kvm_vcpu *vcpu,
                                struct x86_emulate_ctxt *ctxt)
 {
-       return X86EMUL_CONTINUE;
+       struct decode_cache *c = &ctxt->decode;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb *vmcb = svm->vmcb;
+       int vmexit, ret;
+
+       if (!is_nested(svm))
+               return X86EMUL_CONTINUE;
+
+       ret = X86EMUL_CONTINUE;
+
+       if (!c->twobyte)
+               goto out;
+
+       switch (c->b) {
+       case 0x01:
+               /* 0x0f 0x01 and modrm_mod == 3 encodes special instructions */
+               if (c->modrm_mod == 3)
+                       break;
+
+               switch (c->modrm_reg) {
+               case 0x04: /* SMSW */
+                       vmcb->control.exit_code = SVM_EXIT_READ_CR0;
+                       break;
+               case 0x06:  { /* LMSW */
+                       u64 cr0, val;
+
+                       vmcb->control.exit_code = SVM_EXIT_WRITE_CR0;
+
+                       if (svm->nested.intercept_cr_write & INTERCEPT_CR0_MASK)
+                               break;
+
+                       /* check for selective-cr0 special case */
+                       cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK & 0xf;
+                       val = c->src.val     & ~SVM_CR0_SELECTIVE_MASK & 0xf;
+
+                       if (cr0 ^ val)
+                               vmcb->control.exit_code = 
SVM_EXIT_CR0_SEL_WRITE;
+
+                       break;
+                       }
+               }
+       case 0x06: /* CLTS */
+               vmcb->control.exit_code = SVM_EXIT_WRITE_CR0;
+               break;
+       case 0x20: /* CR read  */
+               vmcb->control.exit_code = SVM_EXIT_READ_CR0 + c->modrm_reg;
+               break;
+       case 0x22: /* CR write */
+               vmcb->control.exit_code = SVM_EXIT_WRITE_CR0 + c->modrm_reg;
+               if (c->modrm_reg == 0 &&
+                   !(svm->nested.intercept_cr_write & INTERCEPT_CR0_MASK)) {
+                       /* check for selective-cr0 special case */
+                       u64 cr0, val;
+
+                       cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
+                       val = c->src.val     & ~SVM_CR0_SELECTIVE_MASK;
+
+                       if (cr0 ^ val)
+                               vmcb->control.exit_code = 
SVM_EXIT_CR0_SEL_WRITE;
+               }
+               break;
+       }
+
+       vmcb->control.next_rip = ctxt->eip;
+       vmexit = nested_svm_exit_handled(svm);
+
+       ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
+                                          : X86EMUL_CONTINUE;
+
+out:
+
+       return ret;
 }
 
 static struct kvm_x86_ops svm_x86_ops = {
-- 
1.7.1


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to