This patch only provides the implementation of the method. Later we will used it in a combination with a new option for function tracing.
Signed-off-by: Yordan Karadzhov (VMware) <y.kar...@gmail.com> --- kernel/trace/trace.c | 21 +++++++++++++++++++++ kernel/trace/trace.h | 4 ++++ 2 files changed, 25 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 5f5fa08c0644..5c62fda666af 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3109,6 +3109,27 @@ static void ftrace_trace_userstack(struct trace_array *tr, #endif /* CONFIG_STACKTRACE */ +void trace_last_func_repeats(struct trace_array *tr, + struct trace_func_repeats *last_info, + unsigned int trace_ctx) +{ + struct trace_buffer *buffer = tr->array_buffer.buffer; + struct func_repeats_entry *entry; + struct ring_buffer_event *event; + + event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, + sizeof(*entry), trace_ctx); + if (!event) + return; + + entry = ring_buffer_event_data(event); + entry->ip = last_info->ip; + entry->pip = last_info->parent_ip; + entry->count = last_info->count; + + __buffer_unlock_commit(buffer, event); +} + /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 09bf12c038f4..0ef823bb9594 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -696,6 +696,10 @@ static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, } #endif /* CONFIG_STACKTRACE */ +void trace_last_func_repeats(struct trace_array *tr, + struct trace_func_repeats *last_info, + unsigned int trace_ctx); + extern u64 ftrace_now(int cpu); extern void trace_find_cmdline(int pid, char comm[]); -- 2.25.1