Now that the set of host user MSRs that need to be individually
saved/restored are the same with/without SEV-ES, we can drop the
.sev_es_restored flag and just iterate through the list unconditionally
for both cases. A subsequent patch can then move these loops to a
common path.

Signed-off-by: Michael Roth <michael.r...@amd.com>
---
 arch/x86/kvm/svm/sev.c | 16 ++++------------
 arch/x86/kvm/svm/svm.c |  6 ++----
 arch/x86/kvm/svm/svm.h |  7 ++-----
 3 files changed, 8 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index a3e2b29f484d..87167ef8ca23 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2083,12 +2083,8 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
         * Certain MSRs are restored on VMEXIT, only save ones that aren't
         * restored.
         */
-       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
-               if (host_save_user_msrs[i].sev_es_restored)
-                       continue;
-
-               rdmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
-       }
+       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+               rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 
        /* XCR0 is restored on VMEXIT, save the current host value */
        hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
@@ -2109,12 +2105,8 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
         * Certain MSRs are restored on VMEXIT and were saved with vmsave in
         * sev_es_vcpu_load() above. Only restore ones that weren't.
         */
-       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) {
-               if (host_save_user_msrs[i].sev_es_restored)
-                       continue;
-
-               wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
-       }
+       for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
+               wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
 
 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index bdc1921094dc..ae897aaa4471 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1423,8 +1423,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                sev_es_vcpu_load(svm, cpu);
        } else {
                for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
-                       rdmsrl(host_save_user_msrs[i].index,
-                              svm->host_user_msrs[i]);
+                       rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 
                vmsave(__sme_page_pa(sd->save_area));
        }
@@ -1459,8 +1458,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
                sev_es_vcpu_put(svm);
        } else {
                for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
-                       wrmsrl(host_save_user_msrs[i].index,
-                              svm->host_user_msrs[i]);
+                       wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
        }
 }
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 525f1bf57917..66d83dfefe18 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -23,11 +23,8 @@
 
 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
 
-static const struct svm_host_save_msrs {
-       u32 index;              /* Index of the MSR */
-       bool sev_es_restored;   /* True if MSR is restored on SEV-ES VMEXIT */
-} host_save_user_msrs[] = {
-       { .index = MSR_TSC_AUX,                 .sev_es_restored = false },
+static const u32 host_save_user_msrs[] = {
+       MSR_TSC_AUX,
 };
 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
 
-- 
2.25.1

Reply via email to