On 09/20/2018 04:38 PM, Lendacky, Thomas wrote:
> On 09/19/2018 04:35 PM, Tim Chen wrote:
>> This patchset provides an option to apply IBPB and STIBP mitigation
>> to only non-dumpable processes.
>>
>> Jiri's patch to harden spectre_v2 makes IBPB and STIBP available for
>> general spectre v2 app to app mitigation.  IBPB will be issued for
>> switching to an app that's not ptraceable by the previous
>> app and STIBP will be always turned on.
>>
>> However, leaving STIBP on all the time is expensive for certain
>> applications that have frequent indirect branches. One such application
>> is perlbench in the SpecInt Rate 2006 test suite which shows a
>> 21% reduction in throughput.  Other application like bzip2 in
>> the same test suite with  minimal indirct branches have
>> only a 0.7% reduction in throughput. IBPB will also impose
>> overhead during context switches.
>>
>> App to app exploit is in general difficult
>> due to address space layout randomization in apps and
>> the need to know an app's address space layout ahead of time.
>> Users may not wish to incur app to app performance
>> overhead from IBPB and STIBP for general non security sensitive apps
>> and use these mitigations only for non-dumpable apps.
>>
>> The first patch provides a lite option for spectre_v2 app to app
>> mitigation where IBPB is only issued for security sensitive
>> non-dumpable app.  The second patch extends this option
>> where STIBP is only issued for non-dumpable app.
>>
>> The changes apply to intel cpus affected by spectre_v2. Tom,
>> can you update the STIBP changes for AMD cpus on  
>> __speculative_store_bypass_update and x86_virt_spec_ctrl
>> to update the SPEC_CTRL msr for AMD cpu?
> 
> Hi Tim,
> 
> Let me think about this a bit, since it can get a bit tricky if
> I want to avoid multiple MSR writes when only one may have been
> needed (assuming SSBD is not using the SPEC_CTRL MSR).

I think something along the lines of the following would work and
prevent any extra MSR writes for AMD when SSBD is not using the
SPEC_CTRL MSR. Let me know what you think (tglx, especially, since
he was heavily involved in this part):

diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 878301d..d093d85 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -397,26 +397,59 @@ static __always_inline void 
amd_set_ssb_virt_state(unsigned long tifn)
 
 static __always_inline void intel_set_spec_ctrl_state(unsigned long tifn)
 {
-       u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn)
-                                    | stibp_tif_to_spec_ctrl(tifn);
+       u64 msr = x86_spec_ctrl_base;
+
+       /*
+        * AMD we may have used a different method to update SSBD, so
+        * we need to be sure we are using the SPEC_CTRL MSR for SSBD.
+        */
+       if (static_cpu_has(X86_FEATURE_SSBD))
+               x86_spec_ctrl_base |= ssbd_tif_to_spec_ctrl(tifn);
+
+       x86_spec_ctrl_base |= stibp_tif_to_spec_ctrl(tifn);
 
        wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }
 
-static __always_inline void __speculative_store_bypass_update(unsigned long 
tifn)
+static __always_inline void __speculative_store_bypass_update(unsigned long 
tifp,
+                                                             unsigned long 
tifn)
 {
-       if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
-               amd_set_ssb_virt_state(tifn);
-       else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
-               amd_set_core_ssb_state(tifn);
-       else
-               intel_set_spec_ctrl_state(tifn);
+       bool stibp = !!((tifp ^ tifn) & _TIF_STIBP);
+       bool ssbd = !!((tifp ^ tifn) & _TIF_SSBD);
+
+       if (!ssbd && !stibp)
+               return;
+
+       if (ssbd) {
+               /*
+                * For AMD, try these methods first.  The ssbd variable will
+                * reflect if the SPEC_CTRL MSR method is needed.
+                */
+               ssbd = false;
+
+               if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+                       amd_set_ssb_virt_state(tifn);
+               else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+                       amd_set_core_ssb_state(tifn);
+               else
+                       ssbd = true;
+       }
+
+       /* Avoid a possible extra MSR write, recheck the flags */
+       if (!ssbd && !stibp)
+               return;
+
+       intel_set_spec_ctrl_state(tifn);
 }
 
 void speculative_store_bypass_update(unsigned long tif)
 {
+       /*
+        * On this path we're forcing the update, so use ~tif as the
+        * previous flags.
+        */
        preempt_disable();
-       __speculative_store_bypass_update(tif);
+       __speculative_store_bypass_update(~tif, tif);
        preempt_enable();
 }
 
@@ -452,8 +485,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct 
task_struct *next_p,
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
 
-       if ((tifp ^ tifn) & (_TIF_SSBD | _TIF_STIBP))
-               __speculative_store_bypass_update(tifn);
+       __speculative_store_bypass_update(tifp, tifn);
 }
 
 /*

Thanks,
Tom

> 
> Thanks,
> Tom
> 
>>
>> Thanks.
>>
>> Tim
>>
>> Tim Chen (2):
>>   x86/speculation: Option to select app to app mitigation for spectre_v2
>>   x86/speculation: Provide application property based STIBP protection
>>
>>  Documentation/admin-guide/kernel-parameters.txt |  11 +++
>>  arch/x86/include/asm/msr-index.h                |   3 +-
>>  arch/x86/include/asm/nospec-branch.h            |   9 ++
>>  arch/x86/include/asm/spec-ctrl.h                |  12 +++
>>  arch/x86/include/asm/thread_info.h              |   4 +-
>>  arch/x86/kernel/cpu/bugs.c                      | 105 
>> ++++++++++++++++++++++--
>>  arch/x86/kernel/process.c                       |   9 +-
>>  arch/x86/mm/tlb.c                               |  41 ++++++++-
>>  8 files changed, 179 insertions(+), 15 deletions(-)
>>

Reply via email to