From: Paolo Bonzini <pbonz...@redhat.com>

Direct access to MSR_IA32_SPEC_CTRL is important
for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
IBRS on VM entry and set restore host values on VM exit.
it yet).

TBD: need to check msr's can be passed through even if feature is not
emuerated by the CPU.

[peterz: rebased, folded feedback from Tom]

Cc: Asit Mallick <asit.k.mall...@intel.com>
Cc: Arjan Van De Ven <arjan.van.de....@intel.com>
Cc: Dave Hansen <dave.han...@intel.com>
Cc: Andi Kleen <a...@linux.intel.com>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Tim Chen <tim.c.c...@linux.intel.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Dan Williams <dan.j.willi...@intel.com>
Cc: Jun Nakajima <jun.nakaj...@intel.com>
Cc: David Woodhouse <d...@amazon.co.uk>
Cc: Greg KH <gre...@linuxfoundation.org>
Cc: Andy Lutomirski <l...@kernel.org>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
Signed-off-by: Ashok Raj <ashok....@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/kvm/svm.c |   13 +++++++++++++
 1 file changed, 13 insertions(+)

--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -184,6 +184,8 @@ struct vcpu_svm {
                u64 gs_base;
        } host;
 
+       u64 spec_ctrl;
+
        u32 *msrpm;
 
        ulong nmi_iret_rip;
@@ -249,6 +251,7 @@ static const struct svm_direct_access_ms
        { .index = MSR_CSTAR,                           .always = true  },
        { .index = MSR_SYSCALL_MASK,                    .always = true  },
 #endif
+       { .index = MSR_IA32_SPEC_CTRL,          .always = true  },
        { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
        { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
        { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
@@ -3577,6 +3580,9 @@ static int svm_get_msr(struct kvm_vcpu *
        case MSR_VM_CR:
                msr_info->data = svm->nested.vm_cr_msr;
                break;
+       case MSR_IA32_SPEC_CTRL:
+               msr_info->data = svm->spec_ctrl;
+               break;
        case MSR_IA32_UCODE_REV:
                msr_info->data = 0x01000065;
                break;
@@ -3725,6 +3731,9 @@ static int svm_set_msr(struct kvm_vcpu *
        case MSR_VM_IGNNE:
                vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", 
ecx, data);
                break;
+       case MSR_IA32_SPEC_CTRL:
+               svm->spec_ctrl = data;
+               break;
        case MSR_IA32_APICBASE:
                if (kvm_vcpu_apicv_active(vcpu))
                        avic_update_vapic_bar(to_svm(vcpu), data);
@@ -4911,6 +4920,8 @@ static void svm_vcpu_run(struct kvm_vcpu
 
        clgi();
 
+       restore_indirect_branch_speculation(svm->spec_ctrl);
+
        local_irq_enable();
 
        asm volatile (
@@ -4989,6 +5000,8 @@ static void svm_vcpu_run(struct kvm_vcpu
        /* Eliminate branch target predictions from guest mode */
        vmexit_fill_RSB();
 
+       svm->spec_ctrl = stop_indirect_branch_speculation_and_save();
+
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else


Reply via email to