From: Tom Lendacky <thomas.lenda...@amd.com>

For SEV-ES guests, the interception of control register write access
is not recommended. Control register interception occurs prior to the
control register being modified and the hypervisor is unable to modify
the control register itself because the register is located in the
encrypted register state.

SEV-ES support introduces new control register write traps. These traps
provide intercept support of a control register write after the control
register has been modified. The new control register value is provided in
the VMCB EXITINFO1 field, allowing the hypervisor to track the setting
of the guest control registers.

Add support to track the value of the guest CR0 register using the control
register write trap so that the hypervisor understands the guest operating
mode.

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/include/uapi/asm/svm.h | 17 ++++++++++++++
 arch/x86/kvm/svm/svm.c          | 24 +++++++++++++++++++
 arch/x86/kvm/x86.c              | 41 +++++++++++++++++++--------------
 4 files changed, 66 insertions(+), 17 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4fe718e339c9..068853bcbc74 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1473,6 +1473,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, 
u8 vector);
 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
                    int reason, bool has_error_code, u32 error_code);
 
+int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long 
cr0);
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 6e3f92e17655..14b0d97b50e2 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -78,6 +78,22 @@
 #define SVM_EXIT_XSETBV        0x08d
 #define SVM_EXIT_RDPRU         0x08e
 #define SVM_EXIT_EFER_WRITE_TRAP               0x08f
+#define SVM_EXIT_CR0_WRITE_TRAP                        0x090
+#define SVM_EXIT_CR1_WRITE_TRAP                        0x091
+#define SVM_EXIT_CR2_WRITE_TRAP                        0x092
+#define SVM_EXIT_CR3_WRITE_TRAP                        0x093
+#define SVM_EXIT_CR4_WRITE_TRAP                        0x094
+#define SVM_EXIT_CR5_WRITE_TRAP                        0x095
+#define SVM_EXIT_CR6_WRITE_TRAP                        0x096
+#define SVM_EXIT_CR7_WRITE_TRAP                        0x097
+#define SVM_EXIT_CR8_WRITE_TRAP                        0x098
+#define SVM_EXIT_CR9_WRITE_TRAP                        0x099
+#define SVM_EXIT_CR10_WRITE_TRAP               0x09a
+#define SVM_EXIT_CR11_WRITE_TRAP               0x09b
+#define SVM_EXIT_CR12_WRITE_TRAP               0x09c
+#define SVM_EXIT_CR13_WRITE_TRAP               0x09d
+#define SVM_EXIT_CR14_WRITE_TRAP               0x09e
+#define SVM_EXIT_CR15_WRITE_TRAP               0x09f
 #define SVM_EXIT_INVPCID       0x0a2
 #define SVM_EXIT_NPF           0x400
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI           0x401
@@ -186,6 +202,7 @@
        { SVM_EXIT_MWAIT,       "mwait" }, \
        { SVM_EXIT_XSETBV,      "xsetbv" }, \
        { SVM_EXIT_EFER_WRITE_TRAP,     "write_efer_trap" }, \
+       { SVM_EXIT_CR0_WRITE_TRAP,      "write_cr0_trap" }, \
        { SVM_EXIT_INVPCID,     "invpcid" }, \
        { SVM_EXIT_NPF,         "npf" }, \
        { SVM_EXIT_AVIC_INCOMPLETE_IPI,         "avic_incomplete_ipi" }, \
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index e16c1b49b34f..7a5adc2326fe 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2466,6 +2466,29 @@ static int cr_interception(struct vcpu_svm *svm)
        return kvm_complete_insn_gp(&svm->vcpu, err);
 }
 
+static int cr_trap(struct vcpu_svm *svm)
+{
+       unsigned long old_value, new_value;
+       unsigned int cr;
+       int ret;
+
+       new_value = (unsigned long)svm->vmcb->control.exit_info_1;
+
+       cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
+       switch (cr) {
+       case 0:
+               old_value = kvm_read_cr0(&svm->vcpu);
+
+               ret = __kvm_set_cr0(&svm->vcpu, old_value, new_value);
+               break;
+       default:
+               WARN(1, "unhandled CR%d write trap", cr);
+               ret = 1;
+       }
+
+       return kvm_complete_insn_gp(&svm->vcpu, ret);
+}
+
 static int dr_interception(struct vcpu_svm *svm)
 {
        int reg, dr;
@@ -3047,6 +3070,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm 
*svm) = {
        [SVM_EXIT_XSETBV]                       = xsetbv_interception,
        [SVM_EXIT_RDPRU]                        = rdpru_interception,
        [SVM_EXIT_EFER_WRITE_TRAP]              = efer_trap,
+       [SVM_EXIT_CR0_WRITE_TRAP]               = cr_trap,
        [SVM_EXIT_INVPCID]                      = invpcid_interception,
        [SVM_EXIT_NPF]                          = npf_interception,
        [SVM_EXIT_RSM]                          = rsm_interception,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5f1835cca28d..bc9beb1c4c8c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -804,11 +804,33 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(pdptrs_changed);
 
+int __kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long 
cr0)
+{
+       unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
+
+       kvm_x86_ops.set_cr0(vcpu, cr0);
+
+       if ((cr0 ^ old_cr0) & X86_CR0_PG) {
+               kvm_clear_async_pf_completion_queue(vcpu);
+               kvm_async_pf_hash_reset(vcpu);
+       }
+
+       if ((cr0 ^ old_cr0) & update_bits)
+               kvm_mmu_reset_context(vcpu);
+
+       if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
+           kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
+           !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+               kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__kvm_set_cr0);
+
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        unsigned long old_cr0 = kvm_read_cr0(vcpu);
        unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG;
-       unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
 
        cr0 |= X86_CR0_ET;
 
@@ -845,22 +867,7 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
                return 1;
 
-       kvm_x86_ops.set_cr0(vcpu, cr0);
-
-       if ((cr0 ^ old_cr0) & X86_CR0_PG) {
-               kvm_clear_async_pf_completion_queue(vcpu);
-               kvm_async_pf_hash_reset(vcpu);
-       }
-
-       if ((cr0 ^ old_cr0) & update_bits)
-               kvm_mmu_reset_context(vcpu);
-
-       if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
-           kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
-           !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
-               kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
-
-       return 0;
+       return __kvm_set_cr0(vcpu, old_cr0, cr0);
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr0);
 
-- 
2.28.0

Reply via email to