4.9-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Tom Lendacky <thomas.lenda...@amd.com>

commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream

Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
speculative store bypass disable (SSBD) under SVM.  This will allow guests
to use SSBD on hardware that uses non-architectural mechanisms for enabling
SSBD.

[ tglx: Folded the migration fixup from Paolo Bonzini ]

Signed-off-by: Tom Lendacky <thomas.lenda...@amd.com>
Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Signed-off-by: David Woodhouse <d...@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gre...@linuxfoundation.org>
---
 arch/x86/include/asm/kvm_host.h |    2 +-
 arch/x86/kernel/cpu/common.c    |    3 ++-
 arch/x86/kvm/cpuid.c            |   11 +++++++++--
 arch/x86/kvm/cpuid.h            |    9 +++++++++
 arch/x86/kvm/svm.c              |   21 +++++++++++++++++++--
 arch/x86/kvm/vmx.c              |   18 +++++++++++++++---
 arch/x86/kvm/x86.c              |   13 ++++---------
 7 files changed, 59 insertions(+), 18 deletions(-)

--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -864,7 +864,7 @@ struct kvm_x86_ops {
        int (*hardware_setup)(void);               /* __init */
        void (*hardware_unsetup)(void);            /* __exit */
        bool (*cpu_has_accelerated_tpr)(void);
-       bool (*cpu_has_high_real_mode_segbase)(void);
+       bool (*has_emulated_msr)(int index);
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
 
        int (*vm_init)(struct kvm *kvm);
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -735,7 +735,8 @@ static void init_speculation_control(str
        if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
                set_cpu_cap(c, X86_FEATURE_STIBP);
 
-       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+       if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+           cpu_has(c, X86_FEATURE_VIRT_SSBD))
                set_cpu_cap(c, X86_FEATURE_SSBD);
 
        if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct
 
        /* cpuid 0x80000008.ebx */
        const u32 kvm_cpuid_8000_0008_ebx_x86_features =
-               F(AMD_IBPB) | F(AMD_IBRS);
+               F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
 
        /* cpuid 0xC0000001.edx */
        const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct
                        g_phys_as = phys_as;
                entry->eax = g_phys_as | (virt_as << 8);
                entry->edx = 0;
-               /* IBRS and IBPB aren't necessarily present in hardware cpuid */
+               /*
+                * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+                * hardware cpuid
+                */
                if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
                        entry->ebx |= F(AMD_IBPB);
                if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
                        entry->ebx |= F(AMD_IBRS);
+               if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+                       entry->ebx |= F(VIRT_SSBD);
                entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
                cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+               if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+                       entry->ebx |= F(VIRT_SSBD);
                break;
        }
        case 0x80000019:
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -190,6 +190,15 @@ static inline bool guest_cpuid_has_arch_
        return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
 }
 
+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+       return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
+}
+
+
 
 /*
  * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3557,6 +3557,13 @@ static int svm_get_msr(struct kvm_vcpu *
 
                msr_info->data = svm->spec_ctrl;
                break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               if (!msr_info->host_initiated &&
+                   !guest_cpuid_has_virt_ssbd(vcpu))
+                       return 1;
+
+               msr_info->data = svm->virt_spec_ctrl;
+               break;
        case MSR_IA32_UCODE_REV:
                msr_info->data = 0x01000065;
                break;
@@ -3691,6 +3698,16 @@ static int svm_set_msr(struct kvm_vcpu *
                        break;
                set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
                break;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               if (!msr->host_initiated &&
+                   !guest_cpuid_has_virt_ssbd(vcpu))
+                       return 1;
+
+               if (data & ~SPEC_CTRL_SSBD)
+                       return 1;
+
+               svm->virt_spec_ctrl = data;
+               break;
        case MSR_STAR:
                svm->vmcb->save.star = data;
                break;
@@ -5150,7 +5167,7 @@ static bool svm_cpu_has_accelerated_tpr(
        return false;
 }
 
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
 {
        return true;
 }
@@ -5467,7 +5484,7 @@ static struct kvm_x86_ops svm_x86_ops __
        .hardware_enable = svm_hardware_enable,
        .hardware_disable = svm_hardware_disable,
        .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
-       .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+       .has_emulated_msr = svm_has_emulated_msr,
 
        .vcpu_create = svm_create_vcpu,
        .vcpu_free = svm_free_vcpu,
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8691,9 +8691,21 @@ static void vmx_handle_external_intr(str
        }
 }
 
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
 {
-       return enable_unrestricted_guest || emulate_invalid_guest_state;
+       switch (index) {
+       case MSR_IA32_SMBASE:
+               /*
+                * We cannot do SMM unless we can run the guest in big
+                * real mode.
+                */
+               return enable_unrestricted_guest || emulate_invalid_guest_state;
+       case MSR_AMD64_VIRT_SPEC_CTRL:
+               /* This is AMD only.  */
+               return false;
+       default:
+               return true;
+       }
 }
 
 static bool vmx_mpx_supported(void)
@@ -11346,7 +11358,7 @@ static struct kvm_x86_ops vmx_x86_ops __
        .hardware_enable = hardware_enable,
        .hardware_disable = hardware_disable,
        .cpu_has_accelerated_tpr = report_flexpriority,
-       .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+       .has_emulated_msr = vmx_has_emulated_msr,
 
        .vcpu_create = vmx_create_vcpu,
        .vcpu_free = vmx_free_vcpu,
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = {
        MSR_IA32_MCG_CTL,
        MSR_IA32_MCG_EXT_CTL,
        MSR_IA32_SMBASE,
+       MSR_AMD64_VIRT_SPEC_CTRL,
 };
 
 static unsigned num_emulated_msrs;
@@ -2664,7 +2665,7 @@ int kvm_vm_ioctl_check_extension(struct
                 * fringe case that is not enabled except via specific settings
                 * of the module parameters.
                 */
-               r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+               r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
                break;
        case KVM_CAP_COALESCED_MMIO:
                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
@@ -4226,14 +4227,8 @@ static void kvm_init_msr_list(void)
        num_msrs_to_save = j;
 
        for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
-               switch (emulated_msrs[i]) {
-               case MSR_IA32_SMBASE:
-                       if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
-                               continue;
-                       break;
-               default:
-                       break;
-               }
+               if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+                       continue;
 
                if (j < i)
                        emulated_msrs[j] = emulated_msrs[i];


Reply via email to