For AMD SEV guests, update the cr3_lm_rsvd_bits to mask
the memory encryption bit in reserved bits.

Signed-off-by: Babu Moger <[email protected]>
---
 arch/x86/kvm/svm/svm.c |   11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 2f32fd09e259..b418eeabcccc 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3741,6 +3741,7 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t 
gfn, bool is_mmio)
 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       struct kvm_cpuid_entry2 *best;
 
        vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
                                    boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3753,6 +3754,16 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu 
*vcpu)
        /* Check again if INVPCID interception if required */
        svm_check_invpcid(svm);
 
+       /*
+        * For sev guests, update the cr3_lm_rsvd_bits to mask the memory
+        * encryption bit from reserved bits
+        */
+       if (sev_guest(vcpu->kvm)) {
+               best = kvm_find_cpuid_entry(vcpu, 0x8000001F, 0);
+               if (best)
+                       vcpu->arch.cr3_lm_rsvd_bits &= ~(1UL << (best->ebx & 
0x3f));
+       }
+
        if (!kvm_vcpu_apicv_active(vcpu))
                return;
 

Reply via email to