On 08/01/18 21:18, Jim Mattson wrote:
Guest usage of MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD should be
predicated on guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL).

On Mon, Jan 8, 2018 at 10:08 AM, Paolo Bonzini <pbonz...@redhat.com> wrote:
Direct access to MSR_IA32_SPEC_CTRL and MSR_IA32_PRED_CMD is important
for performance.  Allow load/store of MSR_IA32_SPEC_CTRL, restore guest
IBRS on VM entry and set it to 0 on VM exit (because Linux does not use
it yet).

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
  arch/x86/kvm/vmx.c | 32 ++++++++++++++++++++++++++++++++
  1 file changed, 32 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 669f5f74857d..d00bcad7336e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -120,6 +120,8 @@
  module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
  #endif

+static bool __read_mostly have_spec_ctrl;
+
  #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
  #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
  #define KVM_VM_CR0_ALWAYS_ON                                           \
@@ -609,6 +611,8 @@ struct vcpu_vmx {
         u64                   msr_host_kernel_gs_base;
         u64                   msr_guest_kernel_gs_base;
  #endif
+       u64                   spec_ctrl;
+
         u32 vm_entry_controls_shadow;
         u32 vm_exit_controls_shadow;
         u32 secondary_exec_control;
@@ -3361,6 +3365,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
         case MSR_IA32_TSC:
                 msr_info->data = guest_read_tsc(vcpu);
                 break;
+       case MSR_IA32_SPEC_CTRL:
+               msr_info->data = to_vmx(vcpu)->spec_ctrl;
+               break;

I have:

if (!have_spec_ctrl ||
     (!msr_info->host_initiated &&
      !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
return 1;
msr_info->data = to_vmx(vcpu)->msr_ia32_spec_ctrl;
break;

         case MSR_IA32_SYSENTER_CS:
                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
                 break;
@@ -3500,6 +3507,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
         case MSR_IA32_TSC:
                 kvm_write_tsc(vcpu, msr_info);
                 break;
+       case MSR_IA32_SPEC_CTRL:
+               to_vmx(vcpu)->spec_ctrl = msr_info->data;
+               break;

I have:

if (!have_spec_ctrl ||
     (!msr_info->host_initiated &&
      !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL)))
return 1;
to_vmx(vcpu)->msr_ia32_spec_ctrl = data;
break;

         case MSR_IA32_CR_PAT:
                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -7062,6 +7072,17 @@ static __init int hardware_setup(void)
                 goto out;
         }

+       /*
+        * FIXME: this is only needed until SPEC_CTRL is supported
+        * by upstream Linux in cpufeatures, then it can be replaced
+        * with static_cpu_has.
+        */
+       have_spec_ctrl = cpu_has_spec_ctrl();
+       if (have_spec_ctrl)
+               pr_info("kvm: SPEC_CTRL available\n");
+       else
+               pr_info("kvm: SPEC_CTRL not available\n");
+
         if (boot_cpu_has(X86_FEATURE_NX))
                 kvm_enable_efer_bits(EFER_NX);

@@ -7131,6 +7152,8 @@ static __init int hardware_setup(void)
         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_SPEC_CTRL, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_PRED_CMD, false);

I have a lot of changes to MSR permission bitmap handling, but these
intercepts should only be disabled when guest_cpuid_has(vcpu,
X86_FEATURE_SPEC_CTRL).

         memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
                         vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -9597,6 +9620,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)

         pt_guest_enter(vmx);

+       if (have_spec_ctrl && vmx->spec_ctrl != 0)
+               wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+

Here, I have:

/*
* If the guest is allowed to write to MSR_IA32_SPEC_CTRL,
* store it on VM-exit.
*/
if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
add_autostore_msr(vmx, MSR_IA32_SPEC_CTRL);
else
clear_autostore_msr(vmx, MSR_IA32_SPEC_CTRL);

/*
* If the guest's IA32_SPEC_CTRL MSR doesn't match the host's
* IA32_SPEC_CTRL MSR, then add the MSR to the atomic switch
* MSRs, so that the guest value will be loaded on VM-entry
* and the host value will be loaded on VM-exit.
*/
if (vmx->msr_ia32_spec_ctrl != spec_ctrl_enabled())
add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL,
       vmx->msr_ia32_spec_ctrl,
       spec_ctrl_enabled());
else
clear_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL);


I totally agree with this.

This exactly solves the issue I mentioned before of restoring the guest value of MSR_IA32_SPEC_CTRL using WRMSR before calling atomic_switch_perf_msrs() which does an indirect branch.

         atomic_switch_perf_msrs(vmx);

         vmx_arm_hv_timer(vcpu);
@@ -9707,6 +9733,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  #endif
               );

+       if (have_spec_ctrl) {
+               rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+               if (vmx->spec_ctrl)
+                       wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+       }
+

I know the VM-exit MSR load and store lists are probably slower, but
I'm a little uncomfortable restoring the host's IA32_SPEC_CTRL MSR
late if the guest has it clear and the host has it set.

Again, I totally agree. This is a better approach for handling this.


         /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
         if (vmx->host_debugctlmsr)
                 update_debugctlmsr(vmx->host_debugctlmsr);
--
1.8.3.1


Reply via email to