[tip:x86/mm] x86/mm/tlb: Restructure switch_mm_irqs_off()

2018-10-09 Thread tip-bot for Rik van Riel
Commit-ID:  12c4d978fd170ccdd7260ec11f93b11e46904228
Gitweb: https://git.kernel.org/tip/12c4d978fd170ccdd7260ec11f93b11e46904228
Author: Rik van Riel 
AuthorDate: Tue, 25 Sep 2018 23:58:39 -0400
Committer:  Peter Zijlstra 
CommitDate: Tue, 9 Oct 2018 16:51:11 +0200

x86/mm/tlb: Restructure switch_mm_irqs_off()

Move some code that will be needed for the lazy -> !lazy state
transition when a lazy TLB CPU has gotten out of date.

No functional changes, since the if (real_prev == next) branch
always returns.

(cherry picked from commit 61d0beb5796ab11f7f3bf38cb2eccc6579aaa70b)
Cc: npig...@gmail.com
Cc: efa...@gmx.de
Cc: will.dea...@arm.com
Cc: Linus Torvalds 
Cc: Thomas Gleixner 
Cc: songliubrav...@fb.com
Cc: kernel-t...@fb.com
Cc: h...@zytor.com
Suggested-by: Andy Lutomirski 
Signed-off-by: Rik van Riel 
Acked-by: Dave Hansen 
Signed-off-by: Ingo Molnar 
Signed-off-by: Peter Zijlstra (Intel) 
Link: http://lkml.kernel.org/r/20180716190337.26133-4-r...@surriel.com
---
 arch/x86/mm/tlb.c | 66 +++
 1 file changed, 33 insertions(+), 33 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 54a5870190a6..9fb30d27854b 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -187,6 +187,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
+   bool need_flush;
+   u16 new_asid;
 
/*
 * NB: The scheduler will call us with prev == next when switching
@@ -252,8 +254,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
 
return;
} else {
-   u16 new_asid;
-   bool need_flush;
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
 
/*
@@ -308,44 +308,44 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
/* Let nmi_uaccess_okay() know that we're changing CR3. */
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
+   }
 
-   if (need_flush) {
-   this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, 
next->context.ctx_id);
-   this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, 
next_tlb_gen);
-   load_new_mm_cr3(next->pgd, new_asid, true);
-
-   /*
-* NB: This gets called via leave_mm() in the idle path
-* where RCU functions differently.  Tracing normally
-* uses RCU, so we need to use the _rcuidle variant.
-*
-* (There is no good reason for this.  The idle code 
should
-*  be rearranged to call this before rcu_idle_enter().)
-*/
-   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 
TLB_FLUSH_ALL);
-   } else {
-   /* The new ASID is already up to date. */
-   load_new_mm_cr3(next->pgd, new_asid, false);
-
-   /* See above wrt _rcuidle. */
-   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
-   }
+   if (need_flush) {
+   this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, 
next->context.ctx_id);
+   this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, 
next_tlb_gen);
+   load_new_mm_cr3(next->pgd, new_asid, true);
 
/*
-* Record last user mm's context id, so we can avoid
-* flushing branch buffer with IBPB if we switch back
-* to the same user.
+* NB: This gets called via leave_mm() in the idle path
+* where RCU functions differently.  Tracing normally
+* uses RCU, so we need to use the _rcuidle variant.
+*
+* (There is no good reason for this.  The idle code should
+*  be rearranged to call this before rcu_idle_enter().)
 */
-   if (next != &init_mm)
-   this_cpu_write(cpu_tlbstate.last_ctx_id, 
next->context.ctx_id);
-
-   /* Make sure we write CR3 before loaded_mm. */
-   barrier();
+   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 
TLB_FLUSH_ALL);
+   } else {
+   /* The new ASID is already up to date. */
+   load_new_mm_cr3(next->pgd, new_asid, false);
 
-   this_cpu_write(cpu_tlbstate.loaded_mm, next);
-   this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+   /* See above wrt _rcuidle. */
+   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
 
+   /*
+* Record last user mm's context id, 

[tip:x86/mm] x86/mm/tlb: Restructure switch_mm_irqs_off()

2018-07-17 Thread tip-bot for Rik van Riel
Commit-ID:  61d0beb5796ab11f7f3bf38cb2eccc6579aaa70b
Gitweb: https://git.kernel.org/tip/61d0beb5796ab11f7f3bf38cb2eccc6579aaa70b
Author: Rik van Riel 
AuthorDate: Mon, 16 Jul 2018 15:03:33 -0400
Committer:  Ingo Molnar 
CommitDate: Tue, 17 Jul 2018 09:35:32 +0200

x86/mm/tlb: Restructure switch_mm_irqs_off()

Move some code that will be needed for the lazy -> !lazy state
transition when a lazy TLB CPU has gotten out of date.

No functional changes, since the if (real_prev == next) branch
always returns.

Suggested-by: Andy Lutomirski 
Signed-off-by: Rik van Riel 
Acked-by: Dave Hansen 
Cc: Linus Torvalds 
Cc: Peter Zijlstra 
Cc: Thomas Gleixner 
Cc: efa...@gmx.de
Cc: kernel-t...@fb.com
Link: http://lkml.kernel.org/r/20180716190337.26133-4-r...@surriel.com
Signed-off-by: Ingo Molnar 
---
 arch/x86/mm/tlb.c | 60 +++
 1 file changed, 30 insertions(+), 30 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 9a893673c56b..4b73fe835c95 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -187,6 +187,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
+   bool need_flush;
+   u16 new_asid;
 
/*
 * NB: The scheduler will call us with prev == next when switching
@@ -252,8 +254,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
 
return;
} else {
-   u16 new_asid;
-   bool need_flush;
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
 
/*
@@ -297,41 +297,41 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
mm_struct *next,
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
 
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
+   }
 
-   if (need_flush) {
-   this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, 
next->context.ctx_id);
-   this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, 
next_tlb_gen);
-   load_new_mm_cr3(next->pgd, new_asid, true);
-
-   /*
-* NB: This gets called via leave_mm() in the idle path
-* where RCU functions differently.  Tracing normally
-* uses RCU, so we need to use the _rcuidle variant.
-*
-* (There is no good reason for this.  The idle code 
should
-*  be rearranged to call this before rcu_idle_enter().)
-*/
-   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 
TLB_FLUSH_ALL);
-   } else {
-   /* The new ASID is already up to date. */
-   load_new_mm_cr3(next->pgd, new_asid, false);
-
-   /* See above wrt _rcuidle. */
-   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
-   }
+   if (need_flush) {
+   this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, 
next->context.ctx_id);
+   this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, 
next_tlb_gen);
+   load_new_mm_cr3(next->pgd, new_asid, true);
 
/*
-* Record last user mm's context id, so we can avoid
-* flushing branch buffer with IBPB if we switch back
-* to the same user.
+* NB: This gets called via leave_mm() in the idle path
+* where RCU functions differently.  Tracing normally
+* uses RCU, so we need to use the _rcuidle variant.
+*
+* (There is no good reason for this.  The idle code should
+*  be rearranged to call this before rcu_idle_enter().)
 */
-   if (next != &init_mm)
-   this_cpu_write(cpu_tlbstate.last_ctx_id, 
next->context.ctx_id);
+   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 
TLB_FLUSH_ALL);
+   } else {
+   /* The new ASID is already up to date. */
+   load_new_mm_cr3(next->pgd, new_asid, false);
 
-   this_cpu_write(cpu_tlbstate.loaded_mm, next);
-   this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
+   /* See above wrt _rcuidle. */
+   trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
 
+   /*
+* Record last user mm's context id, so we can avoid
+* flushing branch buffer with IBPB if we switch back
+* to the same user.
+*/
+   if (next != &init_mm)
+   this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
+   this_cpu_write(cpu_tlbstate.loaded_mm, next);
+   this_cpu_w