From: Steven Rostedt (VMware) <rost...@goodmis.org> The use of the task->trace_recursion for the logic used for the function graph depth was a bit of an abuse of that variable. Now that there exists global vars that are per stack for registered graph traces, use that instead.
Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org> Signed-off-by: Masami Hiramatsu (Google) <mhira...@kernel.org> --- include/linux/trace_recursion.h | 29 ----------------------------- kernel/trace/trace.h | 34 ++++++++++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index 2efd5ec46d7f..00e792bf148d 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -44,25 +44,6 @@ enum { */ TRACE_IRQ_BIT, - /* - * In the very unlikely case that an interrupt came in - * at a start of graph tracing, and we want to trace - * the function in that interrupt, the depth can be greater - * than zero, because of the preempted start of a previous - * trace. In an even more unlikely case, depth could be 2 - * if a softirq interrupted the start of graph tracing, - * followed by an interrupt preempting a start of graph - * tracing in the softirq, and depth can even be 3 - * if an NMI came in at the start of an interrupt function - * that preempted a softirq start of a function that - * preempted normal context!!!! Luckily, it can't be - * greater than 3, so the next two bits are a mask - * of what the depth is when we set TRACE_GRAPH_FL - */ - - TRACE_GRAPH_DEPTH_START_BIT, - TRACE_GRAPH_DEPTH_END_BIT, - /* * To implement set_graph_notrace, if this bit is set, we ignore * function graph tracing of called functions, until the return @@ -78,16 +59,6 @@ enum { #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) -#define trace_recursion_depth() \ - (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3) -#define trace_recursion_set_depth(depth) \ - do { \ - current->trace_recursion &= \ - ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \ - current->trace_recursion |= \ - ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \ - } while (0) - #define TRACE_CONTEXT_BITS 4 #define TRACE_FTRACE_START TRACE_FTRACE_BIT diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d5501b1a10a6..d8cdcbb4e5a6 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -899,8 +899,38 @@ extern void free_fgraph_ops(struct trace_array *tr); enum { TRACE_GRAPH_FL = 1, + + /* + * In the very unlikely case that an interrupt came in + * at a start of graph tracing, and we want to trace + * the function in that interrupt, the depth can be greater + * than zero, because of the preempted start of a previous + * trace. In an even more unlikely case, depth could be 2 + * if a softirq interrupted the start of graph tracing, + * followed by an interrupt preempting a start of graph + * tracing in the softirq, and depth can even be 3 + * if an NMI came in at the start of an interrupt function + * that preempted a softirq start of a function that + * preempted normal context!!!! Luckily, it can't be + * greater than 3, so the next two bits are a mask + * of what the depth is when we set TRACE_GRAPH_FL + */ + + TRACE_GRAPH_DEPTH_START_BIT, + TRACE_GRAPH_DEPTH_END_BIT, }; +static inline unsigned long ftrace_graph_depth(unsigned long *task_var) +{ + return (*task_var >> TRACE_GRAPH_DEPTH_START_BIT) & 3; +} + +static inline void ftrace_graph_set_depth(unsigned long *task_var, int depth) +{ + *task_var &= ~(3 << TRACE_GRAPH_DEPTH_START_BIT); + *task_var |= (depth & 3) << TRACE_GRAPH_DEPTH_START_BIT; +} + #ifdef CONFIG_DYNAMIC_FTRACE extern struct ftrace_hash __rcu *ftrace_graph_hash; extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; @@ -933,7 +963,7 @@ ftrace_graph_addr(unsigned long *task_var, struct ftrace_graph_ent *trace) * when the depth is zero. */ *task_var |= TRACE_GRAPH_FL; - trace_recursion_set_depth(trace->depth); + ftrace_graph_set_depth(task_var, trace->depth); /* * If no irqs are to be traced, but a set_graph_function @@ -958,7 +988,7 @@ ftrace_graph_addr_finish(struct fgraph_ops *gops, struct ftrace_graph_ret *trace unsigned long *task_var = fgraph_get_task_var(gops); if ((*task_var & TRACE_GRAPH_FL) && - trace->depth == trace_recursion_depth()) + trace->depth == ftrace_graph_depth(task_var)) *task_var &= ~TRACE_GRAPH_FL; }