Author: kib
Date: Wed May 27 18:23:14 2020
New Revision: 361557
URL: https://svnweb.freebsd.org/changeset/base/361557

Log:
  MFC r361302:
  amd64: Add a knob to flush RSB on context switches if machine has SMEP.

Modified:
  stable/12/sys/amd64/amd64/cpu_switch.S
  stable/12/sys/amd64/amd64/initcpu.c
  stable/12/sys/amd64/amd64/support.S
  stable/12/sys/i386/i386/support.s
  stable/12/sys/x86/include/x86_var.h
  stable/12/sys/x86/x86/cpu_machdep.c
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- stable/12/sys/amd64/amd64/cpu_switch.S      Wed May 27 18:17:40 2020        
(r361556)
+++ stable/12/sys/amd64/amd64/cpu_switch.S      Wed May 27 18:23:14 2020        
(r361557)
@@ -233,6 +233,8 @@ done_load_dr:
        movq    %rax,(%rsp)
        movq    PCPU(CURTHREAD),%rdi
        call    fpu_activate_sw
+       cmpb    $0,cpu_flush_rsb_ctxsw(%rip)
+       jne     rsb_flush
        ret
 
        /*

Modified: stable/12/sys/amd64/amd64/initcpu.c
==============================================================================
--- stable/12/sys/amd64/amd64/initcpu.c Wed May 27 18:17:40 2020        
(r361556)
+++ stable/12/sys/amd64/amd64/initcpu.c Wed May 27 18:23:14 2020        
(r361557)
@@ -238,12 +238,24 @@ initializecpu(void)
                cr4 |= CR4_PKE;
 
        /*
+        * If SMEP is present, we only need to flush RSB (by default)
+        * on context switches, to prevent cross-process ret2spec
+        * attacks.  Do it automatically if ibrs_disable is set, to
+        * complete the mitigation.
+        *
         * Postpone enabling the SMEP on the boot CPU until the page
         * tables are switched from the boot loader identity mapping
         * to the kernel tables.  The boot loader enables the U bit in
         * its tables.
         */
-       if (!IS_BSP()) {
+       if (IS_BSP()) {
+               if (cpu_stdext_feature & CPUID_STDEXT_SMEP &&
+                   !TUNABLE_INT_FETCH(
+                   "machdep.mitigations.cpu_flush_rsb_ctxsw",
+                   &cpu_flush_rsb_ctxsw) &&
+                   hw_ibrs_disable)
+                       cpu_flush_rsb_ctxsw = 1;
+       } else {
                if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
                        cr4 |= CR4_SMEP;
                if (cpu_stdext_feature & CPUID_STDEXT_SMAP)

Modified: stable/12/sys/amd64/amd64/support.S
==============================================================================
--- stable/12/sys/amd64/amd64/support.S Wed May 27 18:17:40 2020        
(r361556)
+++ stable/12/sys/amd64/amd64/support.S Wed May 27 18:23:14 2020        
(r361557)
@@ -1528,23 +1528,27 @@ ENTRY(pmap_pti_pcid_invlrng)
        retq
 
        .altmacro
-       .macro  ibrs_seq_label l
-handle_ibrs_\l:
+       .macro  rsb_seq_label l
+rsb_seq_\l:
        .endm
-       .macro  ibrs_call_label l
-       call    handle_ibrs_\l
+       .macro  rsb_call_label l
+       call    rsb_seq_\l
        .endm
-       .macro  ibrs_seq count
+       .macro  rsb_seq count
        ll=1
        .rept   \count
-       ibrs_call_label %(ll)
+       rsb_call_label  %(ll)
        nop
-       ibrs_seq_label %(ll)
+       rsb_seq_label %(ll)
        addq    $8,%rsp
        ll=ll+1
        .endr
        .endm
 
+ENTRY(rsb_flush)
+       rsb_seq 32
+       ret
+
 /* all callers already saved %rax, %rdx, and %rcx */
 ENTRY(handle_ibrs_entry)
        cmpb    $0,hw_ibrs_ibpb_active(%rip)
@@ -1556,8 +1560,7 @@ ENTRY(handle_ibrs_entry)
        wrmsr
        movb    $1,PCPU(IBPB_SET)
        testl   $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
-       jne     1f
-       ibrs_seq 32
+       je      rsb_flush
 1:     ret
 END(handle_ibrs_entry)
 

Modified: stable/12/sys/i386/i386/support.s
==============================================================================
--- stable/12/sys/i386/i386/support.s   Wed May 27 18:17:40 2020        
(r361556)
+++ stable/12/sys/i386/i386/support.s   Wed May 27 18:23:14 2020        
(r361557)
@@ -445,6 +445,28 @@ msr_onfault:
        movl    $EFAULT,%eax
        ret
 
+       .altmacro
+       .macro  rsb_seq_label l
+rsb_seq_\l:
+       .endm
+       .macro  rsb_call_label l
+       call    rsb_seq_\l
+       .endm
+       .macro  rsb_seq count
+       ll=1
+       .rept   \count
+       rsb_call_label  %(ll)
+       nop
+       rsb_seq_label %(ll)
+       addl    $4,%esp
+       ll=ll+1
+       .endr
+       .endm
+
+ENTRY(rsb_flush)
+       rsb_seq 32
+       ret
+
 ENTRY(handle_ibrs_entry)
        cmpb    $0,hw_ibrs_ibpb_active
        je      1f
@@ -455,10 +477,9 @@ ENTRY(handle_ibrs_entry)
        wrmsr
        movb    $1,PCPU(IBPB_SET)
        /*
-        * i386 does not implement SMEP, but the 4/4 split makes this not
-        * that important.
+        * i386 does not implement SMEP.
         */
-1:     ret
+1:     jmp     rsb_flush
 END(handle_ibrs_entry)
 
 ENTRY(handle_ibrs_exit)

Modified: stable/12/sys/x86/include/x86_var.h
==============================================================================
--- stable/12/sys/x86/include/x86_var.h Wed May 27 18:17:40 2020        
(r361556)
+++ stable/12/sys/x86/include/x86_var.h Wed May 27 18:23:14 2020        
(r361557)
@@ -88,6 +88,7 @@ extern        int     hw_ibrs_ibpb_active;
 extern int     hw_mds_disable;
 extern int     hw_ssb_active;
 extern int     x86_taa_enable;
+extern int     cpu_flush_rsb_ctxsw;
 
 struct pcb;
 struct thread;

Modified: stable/12/sys/x86/x86/cpu_machdep.c
==============================================================================
--- stable/12/sys/x86/x86/cpu_machdep.c Wed May 27 18:17:40 2020        
(r361556)
+++ stable/12/sys/x86/x86/cpu_machdep.c Wed May 27 18:23:14 2020        
(r361557)
@@ -1332,6 +1332,11 @@ SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
     sysctl_taa_state_handler, "A",
     "TAA Mitigation state");
 
+int __read_frequently cpu_flush_rsb_ctxsw;
+SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
+    CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
+    "Flush Return Stack Buffer on context switch");
+
 /*
  * Enable and restore kernel text write permissions.
  * Callers must ensure that disable_wp()/restore_wp() are executed
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to