On 09/25/2018 05:43 PM, Tim Chen wrote:
> From: Thomas Lendacky <[email protected]>
> 
> We extend the app to app spectre v2 mitigation using STIBP
> to the AMD cpus. We need to take care of special
> cases for AMD cpu's update of SPEC_CTRL MSR to avoid double
> writing of MSRs from update to SSBD and STIBP.

Tom, if this patch looks okay to you, can I add your sign off?

Tim

> 
> Originally-by: Thomas Lendacky <[email protected]>
> Signed-off-by: Tim Chen <[email protected]>
> ---
>  arch/x86/kernel/process.c | 48 
> +++++++++++++++++++++++++++++++++++++----------
>  1 file changed, 38 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
> index cb24014..4a3a672 100644
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -399,6 +399,10 @@ static __always_inline void set_spec_ctrl_state(unsigned 
> long tifn)
>  {
>       u64 msr = x86_spec_ctrl_base;
>  
> +     /*
> +      * AMD cpu may have used a different method to update SSBD, so
> +      * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
> +      */
>       if (static_cpu_has(X86_FEATURE_SSBD))
>               msr |= ssbd_tif_to_spec_ctrl(tifn);
>  
> @@ -408,20 +412,45 @@ static __always_inline void 
> set_spec_ctrl_state(unsigned long tifn)
>       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
>  }
>  
> -static __always_inline void __speculative_store_bypass_update(unsigned long 
> tifn)
> +static __always_inline void __speculative_store_bypass_update(unsigned long 
> tifp,
> +                                                           unsigned long 
> tifn)
>  {
> -     if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> -             amd_set_ssb_virt_state(tifn);
> -     else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> -             amd_set_core_ssb_state(tifn);
> -     else
> -             set_spec_ctrl_state(tifn);
> +     bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
> +     bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
> +
> +     if (!ssbd && !stibp)
> +             return;
> +
> +     if (ssbd) {
> +             /*
> +              * For AMD, try these methods first.  The ssbd variable will
> +              * reflect if the SPEC_CTRL MSR method is needed.
> +              */
> +             ssbd = false;
> +
> +             if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
> +                     amd_set_ssb_virt_state(tifn);
> +             else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
> +                     amd_set_core_ssb_state(tifn);
> +             else
> +                     ssbd = true;
> +     }
> +
> +     /* Avoid a possible extra MSR write, recheck the flags */
> +     if (!ssbd && !stibp)
> +             return;
> +
> +     set_spec_ctrl_state(tifn);
>  }
>  
>  void speculative_store_bypass_update(unsigned long tif)
>  {
> +     /*
> +      * On this path we're forcing the update, so use ~tif as the
> +      * previous flags.
> +      */
>       preempt_disable();
> -     __speculative_store_bypass_update(tif);
> +     __speculative_store_bypass_update(~tif, tif);
>       preempt_enable();
>  }
>  
> @@ -457,8 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct 
> task_struct *next_p,
>       if ((tifp ^ tifn) & _TIF_NOCPUID)
>               set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
>  
> -     if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
> -             __speculative_store_bypass_update(tifn);
> +     __speculative_store_bypass_update(tifp, tifn);
>  }
>  
>  /*
> 

Reply via email to