trace_get_context_bit() and trace_recursive_lock() uses the same logic,
but the second reads the per_cpu variable only once.

Uses the trace_recursive_lock()'s logic in trace_get_context_bit().

Signed-off-by: Daniel Bristot de Oliveira <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Arnaldo Carvalho de Melo <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: "Joel Fernandes (Google)" <[email protected]>
Cc: Jiri Olsa <[email protected]>
Cc: Namhyung Kim <[email protected]>
Cc: Alexander Shishkin <[email protected]>
Cc: Tommaso Cucinotta <[email protected]>
Cc: Romulo Silva de Oliveira <[email protected]>
Cc: Clark Williams <[email protected]>
Cc: [email protected]
Cc: [email protected]
---
 kernel/trace/trace.h | 19 ++++++-------------
 1 file changed, 6 insertions(+), 13 deletions(-)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dad2f0cd7208..09318748fab8 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -635,20 +635,13 @@ enum {
 
 static __always_inline int trace_get_context_bit(void)
 {
-       int bit;
-
-       if (in_interrupt()) {
-               if (in_nmi())
-                       bit = TRACE_CTX_NMI;
+       unsigned long pc = preempt_count();
 
-               else if (in_irq())
-                       bit = TRACE_CTX_IRQ;
-               else
-                       bit = TRACE_CTX_SOFTIRQ;
-       } else
-               bit = TRACE_CTX_NORMAL;
-
-       return bit;
+       if (pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))
+               return pc & NMI_MASK ? TRACE_CTX_NMI :
+                       pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+       else
+               return TRACE_CTX_NORMAL;
 }
 
 static __always_inline int trace_test_and_set_recursion(int start, int max)
-- 
2.20.1

Reply via email to