The field is used to keep track of the consecutive (on the same CPU) calls
of a single function. This information is needed in order to consolidate
the function tracing record in the cases when a single function is called
number of times.

Signed-off-by: Yordan Karadzhov (VMware) <y.kar...@gmail.com>
---
 kernel/trace/trace.c |  1 +
 kernel/trace/trace.h | 17 +++++++++++++++++
 2 files changed, 18 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e295c413580e..5f5fa08c0644 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8895,6 +8895,7 @@ static int __remove_instance(struct trace_array *tr)
        ftrace_clear_pids(tr);
        ftrace_destroy_function_files(tr);
        tracefs_remove(tr->dir);
+       free_percpu(tr->last_func_repeats);
        free_trace_buffers(tr);
 
        for (i = 0; i < tr->nr_topts; i++) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2be4a56879de..09bf12c038f4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -262,6 +262,16 @@ struct cond_snapshot {
        cond_update_fn_t                update;
 };
 
+/*
+ * struct trace_func_repeats - used to keep track of the consecutive
+ * (on the same CPU) calls of a single function.
+ */
+struct trace_func_repeats {
+       unsigned long ip;
+       unsigned long parent_ip;
+       unsigned long count;
+};
+
 /*
  * The trace array - an array of per-CPU trace arrays. This is the
  * highest level data structure that individual tracers deal with.
@@ -358,8 +368,15 @@ struct trace_array {
 #ifdef CONFIG_TRACER_SNAPSHOT
        struct cond_snapshot    *cond_snapshot;
 #endif
+       struct trace_func_repeats       __percpu *last_func_repeats;
 };
 
+static inline struct trace_func_repeats *
+tracer_alloc_func_repeats(struct trace_array *tr)
+{
+       return tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+}
+
 enum {
        TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
 };
-- 
2.25.1

Reply via email to