Author: kib
Date: Wed May 27 18:55:24 2020
New Revision: 361561
URL: https://svnweb.freebsd.org/changeset/base/361561

Log:
  MFC r361302:
  amd64: Add a knob to flush RSB on context switches if machine has SMEP.

Modified:
  stable/11/sys/amd64/amd64/cpu_switch.S
  stable/11/sys/amd64/amd64/initcpu.c
  stable/11/sys/amd64/amd64/support.S
  stable/11/sys/i386/i386/support.s
  stable/11/sys/x86/include/x86_var.h
  stable/11/sys/x86/x86/cpu_machdep.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/amd64/cpu_switch.S
==============================================================================
--- stable/11/sys/amd64/amd64/cpu_switch.S      Wed May 27 18:32:12 2020        
(r361560)
+++ stable/11/sys/amd64/amd64/cpu_switch.S      Wed May 27 18:55:24 2020        
(r361561)
@@ -235,6 +235,8 @@ done_load_dr:
        movq    %rax,(%rsp)
        movq    PCPU(CURTHREAD),%rdi
        call    fpu_activate_sw
+       cmpb    $0,cpu_flush_rsb_ctxsw(%rip)
+       jne     rsb_flush
        ret
 
        /*

Modified: stable/11/sys/amd64/amd64/initcpu.c
==============================================================================
--- stable/11/sys/amd64/amd64/initcpu.c Wed May 27 18:32:12 2020        
(r361560)
+++ stable/11/sys/amd64/amd64/initcpu.c Wed May 27 18:55:24 2020        
(r361561)
@@ -232,13 +232,27 @@ initializecpu(void)
                cr4 |= CR4_FSGSBASE;
 
        /*
+        * If SMEP is present, we only need to flush RSB (by default)
+        * on context switches, to prevent cross-process ret2spec
+        * attacks.  Do it automatically if ibrs_disable is set, to
+        * complete the mitigation.
+        *
         * Postpone enabling the SMEP on the boot CPU until the page
         * tables are switched from the boot loader identity mapping
         * to the kernel tables.  The boot loader enables the U bit in
         * its tables.
         */
-       if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
-               cr4 |= CR4_SMEP;
+       if (IS_BSP()) {
+               if (cpu_stdext_feature & CPUID_STDEXT_SMEP &&
+                   !TUNABLE_INT_FETCH(
+                   "machdep.mitigations.cpu_flush_rsb_ctxsw",
+                   &cpu_flush_rsb_ctxsw) &&
+                   hw_ibrs_disable)
+                       cpu_flush_rsb_ctxsw = 1;
+       } else {
+               if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
+                       cr4 |= CR4_SMEP;
+       }
        load_cr4(cr4);
        if ((amd_feature & AMDID_NX) != 0) {
                msr = rdmsr(MSR_EFER) | EFER_NXE;

Modified: stable/11/sys/amd64/amd64/support.S
==============================================================================
--- stable/11/sys/amd64/amd64/support.S Wed May 27 18:32:12 2020        
(r361560)
+++ stable/11/sys/amd64/amd64/support.S Wed May 27 18:55:24 2020        
(r361561)
@@ -832,23 +832,27 @@ ENTRY(pmap_pti_pcid_invlrng)
        retq
 
        .altmacro
-       .macro  ibrs_seq_label l
-handle_ibrs_\l:
+       .macro  rsb_seq_label l
+rsb_seq_\l:
        .endm
-       .macro  ibrs_call_label l
-       call    handle_ibrs_\l
+       .macro  rsb_call_label l
+       call    rsb_seq_\l
        .endm
-       .macro  ibrs_seq count
+       .macro  rsb_seq count
        ll=1
        .rept   \count
-       ibrs_call_label %(ll)
+       rsb_call_label  %(ll)
        nop
-       ibrs_seq_label %(ll)
+       rsb_seq_label %(ll)
        addq    $8,%rsp
        ll=ll+1
        .endr
        .endm
 
+ENTRY(rsb_flush)
+       rsb_seq 32
+       ret
+
 /* all callers already saved %rax, %rdx, and %rcx */
 ENTRY(handle_ibrs_entry)
        cmpb    $0,hw_ibrs_ibpb_active(%rip)
@@ -860,8 +864,7 @@ ENTRY(handle_ibrs_entry)
        wrmsr
        movb    $1,PCPU(IBPB_SET)
        testl   $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
-       jne     1f
-       ibrs_seq 32
+       je      rsb_flush
 1:     ret
 END(handle_ibrs_entry)
 

Modified: stable/11/sys/i386/i386/support.s
==============================================================================
--- stable/11/sys/i386/i386/support.s   Wed May 27 18:32:12 2020        
(r361560)
+++ stable/11/sys/i386/i386/support.s   Wed May 27 18:55:24 2020        
(r361561)
@@ -819,8 +819,30 @@ msr_onfault:
        movl    $EFAULT,%eax
        ret
 
-ENTRY(handle_ibrs_entry)
+       .altmacro
+       .macro  rsb_seq_label l
+rsb_seq_\l:
+       .endm
+       .macro  rsb_call_label l
+       call    rsb_seq_\l
+       .endm
+       .macro  rsb_seq count
+       ll=1
+       .rept   \count
+       rsb_call_label  %(ll)
+       nop
+       rsb_seq_label %(ll)
+       addl    $4,%esp
+       ll=ll+1
+       .endr
+       .endm
+
+ENTRY(rsb_flush)
+       rsb_seq 32
        ret
+
+ENTRY(handle_ibrs_entry)
+       jmp     rsb_flush
 END(handle_ibrs_entry)
 
 ENTRY(handle_ibrs_exit)

Modified: stable/11/sys/x86/include/x86_var.h
==============================================================================
--- stable/11/sys/x86/include/x86_var.h Wed May 27 18:32:12 2020        
(r361560)
+++ stable/11/sys/x86/include/x86_var.h Wed May 27 18:55:24 2020        
(r361561)
@@ -86,6 +86,7 @@ extern        int     hw_ibrs_ibpb_active;
 extern int     hw_mds_disable;
 extern int     hw_ssb_active;
 extern int     x86_taa_enable;
+extern int     cpu_flush_rsb_ctxsw;
 
 struct pcb;
 struct thread;

Modified: stable/11/sys/x86/x86/cpu_machdep.c
==============================================================================
--- stable/11/sys/x86/x86/cpu_machdep.c Wed May 27 18:32:12 2020        
(r361560)
+++ stable/11/sys/x86/x86/cpu_machdep.c Wed May 27 18:55:24 2020        
(r361561)
@@ -1360,3 +1360,7 @@ SYSCTL_PROC(_machdep_mitigations_taa, OID_AUTO, state,
     sysctl_taa_state_handler, "A",
     "TAA Mitigation state");
 
+int __read_frequently cpu_flush_rsb_ctxsw;
+SYSCTL_INT(_machdep_mitigations, OID_AUTO, flush_rsb_ctxsw,
+    CTLFLAG_RW | CTLFLAG_NOFETCH, &cpu_flush_rsb_ctxsw, 0,
+    "Flush Return Stack Buffer on context switch");
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to