Re: [PATCH v6 3/6] arch/x86/mm: Refactor cond_ibpb() to support other use cases

2020-05-13 Thread Thomas Gleixner
Balbir Singh  writes:
> @@ -550,8 +549,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
> mm_struct *next,
>* Avoid user/user BTB poisoning by flushing the branch
>* predictor when switching between processes. This stops
>* one process from doing Spectre-v2 attacks on another.
> +  * The hook can also be used for mitigations that rely
> +  * on switch_mm for hooks.

The new function name has absolutely nothing to do with IBPB and is
clearly talking about mitigations. So the IBPB comment wants to move and
that extra sentence you bolted on can go away. It's nonsensical word
salad anyway.
>  
>   /* Reinitialize tlbstate. */
> - this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
> + this_cpu_write(cpu_tlbstate.last_user_mm_spec, LAST_USER_MM_IBPB);

There is still no comment why this only needs MM_IBPB. I'll change this
to LAST_USER_MM_INIT and put that define close to the others.

Thanks,

tglx


[PATCH v6 3/6] arch/x86/mm: Refactor cond_ibpb() to support other use cases

2020-05-09 Thread Balbir Singh
cond_ibpb() has the necessary bits required to track the
previous mm in switch_mm_irqs_off(). This can be reused for
other use cases like L1D flushing (on context switch out).

Suggested-by: Thomas Gleixner 
Signed-off-by: Balbir Singh 
---
 arch/x86/include/asm/tlbflush.h |  2 +-
 arch/x86/mm/tlb.c   | 43 +
 2 files changed, 23 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 8c87a2e0b660..a927d40664df 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -83,7 +83,7 @@ struct tlb_state {
/* Last user mm for optimizing IBPB */
union {
struct mm_struct*last_user_mm;
-   unsigned long   last_user_mm_ibpb;
+   unsigned long   last_user_mm_spec;
};
 
u16 loaded_mm_asid;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index cf81902e6992..10056b8d8f01 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -43,10 +43,11 @@
  */
 
 /*
- * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
- * stored in cpu_tlb_state.last_user_mm_ibpb.
+ * Bits to mangle the TIF_SPEC_IB state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_spec.
  */
 #define LAST_USER_MM_IBPB  0x1UL
+#define LAST_USER_MM_SPEC_MASK (LAST_USER_MM_IBPB)
 
 /*
  * The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -345,19 +346,24 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
}
 }
 
-static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+static inline unsigned long mm_mangle_tif_spec_bits(struct task_struct *next)
 {
unsigned long next_tif = task_thread_info(next)->flags;
-   unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+   unsigned long spec_bits = (next_tif >> TIF_SPEC_IB) & 
LAST_USER_MM_SPEC_MASK;
 
-   return (unsigned long)next->mm | ibpb;
+   return (unsigned long)next->mm | spec_bits;
 }
 
-static void cond_ibpb(struct task_struct *next)
+static void cond_mitigation(struct task_struct *next)
 {
+   unsigned long prev_mm, next_mm;
+
if (!next || !next->mm)
return;
 
+   next_mm = mm_mangle_tif_spec_bits(next);
+   prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
+
/*
 * Both, the conditional and the always IBPB mode use the mm
 * pointer to avoid the IBPB when switching between tasks of the
@@ -368,8 +374,6 @@ static void cond_ibpb(struct task_struct *next)
 * exposed data is not really interesting.
 */
if (static_branch_likely(&switch_mm_cond_ibpb)) {
-   unsigned long prev_mm, next_mm;
-
/*
 * This is a bit more complex than the always mode because
 * it has to handle two cases:
@@ -399,20 +403,14 @@ static void cond_ibpb(struct task_struct *next)
 * Optimize this with reasonably small overhead for the
 * above cases. Mangle the TIF_SPEC_IB bit into the mm
 * pointer of the incoming task which is stored in
-* cpu_tlbstate.last_user_mm_ibpb for comparison.
-*/
-   next_mm = mm_mangle_tif_spec_ib(next);
-   prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
-
-   /*
+* cpu_tlbstate.last_user_mm_spec for comparison.
+*
 * Issue IBPB only if the mm's are different and one or
 * both have the IBPB bit set.
 */
if (next_mm != prev_mm &&
(next_mm | prev_mm) & LAST_USER_MM_IBPB)
indirect_branch_prediction_barrier();
-
-   this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
}
 
if (static_branch_unlikely(&switch_mm_always_ibpb)) {
@@ -421,11 +419,12 @@ static void cond_ibpb(struct task_struct *next)
 * different context than the user space task which ran
 * last on this CPU.
 */
-   if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+   if ((prev_mm & ~LAST_USER_MM_SPEC_MASK) !=
+   (unsigned long)next->mm)
indirect_branch_prediction_barrier();
-   this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
-   }
}
+
+   this_cpu_write(cpu_tlbstate.last_user_mm_spec, next_mm);
 }
 
 #ifdef CONFIG_PERF_EVENTS
@@ -550,8 +549,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
 * Avoid user/user BTB poisoning by flushing the branch
 * predictor when switching between processes. This stops
 * one process from doing Spectre-v2 attacks on another.
+*