Re: [PATCH v2 5/5] tracing: Add "func_no_repeats" option for function tracing

2021-04-05 Thread Steven Rostedt
On Mon, 29 Mar 2021 16:05:33 +0300
"Yordan Karadzhov (VMware)"  wrote:

> If the option is activated the function tracing record gets
> consolidated in the cases when a single function is called number
> of times consecutively. Instead of having an identical record for
> each call of the function we will record only the first call
> following by event showing the number of repeats.
> 
> Signed-off-by: Yordan Karadzhov (VMware) 
> 
> fix last

You seem to have left the above extra text "fix last" in both versions.

But the rest of the patch looks fine.

-- Steve


[PATCH v2 5/5] tracing: Add "func_no_repeats" option for function tracing

2021-03-29 Thread Yordan Karadzhov (VMware)
If the option is activated the function tracing record gets
consolidated in the cases when a single function is called number
of times consecutively. Instead of having an identical record for
each call of the function we will record only the first call
following by event showing the number of repeats.

Signed-off-by: Yordan Karadzhov (VMware) 

fix last
---
 kernel/trace/trace_functions.c | 161 -
 1 file changed, 158 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 6c912eb0508a..72d2e07dc103 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,16 +27,28 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op, struct ftrace_regs 
*fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+struct ftrace_ops *op,
+struct ftrace_regs *fregs);
 static ftrace_func_t select_trace_function(u32 flags_val);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
-   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
-   TRACE_FUNC_OPT_STACK= 0x1,
+
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
+   TRACE_FUNC_OPT_STACK= 0x1,
+   TRACE_FUNC_OPT_NO_REPEATS   = 0x2,
+
+   /* Update this to next highest bit. */
+   TRACE_FUNC_OPT_HIGHEST_BIT  = 0x4
 };
 
-#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
@@ -90,6 +102,17 @@ void ftrace_destroy_function_files(struct trace_array *tr)
ftrace_free_ftrace_ops(tr);
 }
 
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+   if (!tr->last_func_repeats &&
+   (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+   if (!tracer_alloc_func_repeats(tr))
+   return false;
+   }
+
+   return true;
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -105,6 +128,9 @@ static int function_trace_init(struct trace_array *tr)
if (!func)
return -EINVAL;
 
+   if (!handle_func_repeats(tr, func_flags.val))
+   return -ENOMEM;
+
ftrace_init_array_ops(tr, func);
 
tr->array_buffer.cpu = raw_smp_processor_id();
@@ -206,6 +232,127 @@ function_stack_trace_call(unsigned long ip, unsigned long 
parent_ip,
local_irq_restore(flags);
 }
 
+static inline bool is_repeat_check(struct trace_array *tr,
+  struct trace_func_repeats *last_info,
+  unsigned long ip, unsigned long parent_ip)
+{
+   if (last_info->ip == ip &&
+   last_info->parent_ip == parent_ip &&
+   last_info->count < U16_MAX) {
+   last_info->ts_last_call =
+   ring_buffer_time_stamp(tr->array_buffer.buffer);
+   last_info->count++;
+   return true;
+   }
+
+   return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+  unsigned long ip, unsigned long parent_ip,
+  struct trace_func_repeats *last_info,
+  unsigned int trace_ctx)
+{
+   if (last_info->count) {
+   trace_last_func_repeats(tr, last_info, trace_ctx);
+   last_info->count = 0;
+   }
+
+   last_info->ip = ip;
+   last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op,
+  struct ftrace_regs *fregs)
+{
+   struct trace_func_repeats *last_info;
+   struct trace_array *tr = op->private;
+   struct trace_array_cpu *data;
+   unsigned int trace_ctx;
+   unsigned long flags;
+   int bit;
+   int cpu;
+
+   if (unlikely(!tr->function_enabled))
+   return;
+
+   bit = ftrace_test_recursion_trylock(ip, parent_ip);
+   if (bit < 0)
+   return;
+
+   preempt_disable_notrace();
+
+   cpu = smp_processor_id();
+   data = per_cpu_ptr(tr->array_buffer.data, cpu);
+   if (atomic_read(>disabled))
+   goto out;
+
+   /*
+* An interrupt may happen at any place here. But as far as I can see,
+* the only damage that this can cause is to mess up the repetition
+* counter without valuable data being