[PATCH v4 0/6] Add "func_no_repete" tracing option

2021-04-15 Thread Yordan Karadzhov (VMware)
The new option for function tracing aims to save space on the ring
buffer and to make it more readable in the case when a single function
is called number of times consecutively:

while (cond)
do_func();

Instead of having an identical records for each call of the function
we will record only the first call, followed by an event showing the
number of repeats.

v4 changes:
  * "[PATCH 1/5] Define static void trace_print_time()" is added to
the patch-set. This patch simply applies the modification suggested
by Steven Rostedt in his review of v3. My contribution to the code
of this patch is really negligible.

  * FUNC_REPEATS_GET_DELTA_TS macro is improved.

  * The way we print the time of the last function repeat is improved.

  * "tracer_alloc_func_repeats(struct trace_array *tr)" is removed from
trace.h.

  * FUNC_REPEATS_GET_DELTA_TS macro is replased by a static inline
function

v3 changes:
  * FUNC_REPEATS_SET_DELTA_TS macro has been optimised.

  * Fixed bug in func_set_flag(): In the previous version the value
of the "new_flags" variable was not properly calculated because
I misinterpreted the meaning of the "bit" argument of the function.
This bug in v2 had no real effect, because for the moment we have
only two "function options" so the value of "new_flags" was correct,
although its way of calculating was wrong.

v2 changes:
  * As suggested by Steven in his review, we now record not only the
repetition count, but also the time elapsed between the last
repetition of the function and the actual generation of the
"func_repeats" event. 16 bits are used to record the repetition
count. In the case of an overflow of the counter a second pair of
"function" and "func_repeats" events will be generated. The time
interval gets codded by using up to 48 (32 + 16) bits.


Yordan Karadzhov (VMware) (6):
  tracing: Define static void trace_print_time()
  tracing: Define new ftrace event "func_repeats"
  tracing: Add "last_func_repeats" to struct trace_array
  tracing: Add method for recording "func_repeats" events
  tracing: Unify the logic for function tracing options
  tracing: Add "func_no_repeats" option for function tracing

 kernel/trace/trace.c   |  35 ++
 kernel/trace/trace.h   |  19 +++
 kernel/trace/trace_entries.h   |  22 
 kernel/trace/trace_functions.c | 223 -
 kernel/trace/trace_output.c|  74 +--
 5 files changed, 336 insertions(+), 37 deletions(-)

-- 
2.25.1



[PATCH v4 6/6] tracing: Add "func_no_repeats" option for function tracing

2021-04-15 Thread Yordan Karadzhov (VMware)
If the option is activated the function tracing record gets
consolidated in the cases when a single function is called number
of times consecutively. Instead of having an identical record for
each call of the function we will record only the first call
following by event showing the number of repeats.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_functions.c | 162 -
 1 file changed, 159 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index f37f73a9b1b8..1f0e63f5d1f9 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,15 +27,27 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op, struct ftrace_regs 
*fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+struct ftrace_ops *op,
+struct ftrace_regs *fregs);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
-   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
-   TRACE_FUNC_OPT_STACK= 0x1,
+
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
+   TRACE_FUNC_OPT_STACK= 0x1,
+   TRACE_FUNC_OPT_NO_REPEATS   = 0x2,
+
+   /* Update this to next highest bit. */
+   TRACE_FUNC_OPT_HIGHEST_BIT  = 0x4
 };
 
-#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
@@ -96,11 +108,27 @@ static ftrace_func_t select_trace_function(u32 flags_val)
return function_trace_call;
case TRACE_FUNC_OPT_STACK:
return function_stack_trace_call;
+   case TRACE_FUNC_OPT_NO_REPEATS:
+   return function_no_repeats_trace_call;
+   case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
+   return function_stack_no_repeats_trace_call;
default:
return NULL;
}
 }
 
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+   if (!tr->last_func_repeats &&
+   (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+   tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+   if (!tr->last_func_repeats)
+   return false;
+   }
+
+   return true;
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -116,6 +144,9 @@ static int function_trace_init(struct trace_array *tr)
if (!func)
return -EINVAL;
 
+   if (!handle_func_repeats(tr, func_flags.val))
+   return -ENOMEM;
+
ftrace_init_array_ops(tr, func);
 
tr->array_buffer.cpu = raw_smp_processor_id();
@@ -217,10 +248,132 @@ function_stack_trace_call(unsigned long ip, unsigned 
long parent_ip,
local_irq_restore(flags);
 }
 
+static inline bool is_repeat_check(struct trace_array *tr,
+  struct trace_func_repeats *last_info,
+  unsigned long ip, unsigned long parent_ip)
+{
+   if (last_info->ip == ip &&
+   last_info->parent_ip == parent_ip &&
+   last_info->count < U16_MAX) {
+   last_info->ts_last_call =
+   ring_buffer_time_stamp(tr->array_buffer.buffer);
+   last_info->count++;
+   return true;
+   }
+
+   return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+  unsigned long ip, unsigned long parent_ip,
+  struct trace_func_repeats *last_info,
+  unsigned int trace_ctx)
+{
+   if (last_info->count) {
+   trace_last_func_repeats(tr, last_info, trace_ctx);
+   last_info->count = 0;
+   }
+
+   last_info->ip = ip;
+   last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op,
+  struct ftrace_regs *fregs)
+{
+   struct trace_func_repeats *last_info;
+   struct trace_array *tr = op->private;
+   struct trace_array_cpu *data;
+   unsigned int trace_ctx;
+   unsigned long flags;
+   int bit;
+   int cpu;
+
+   if (unlikely(!tr->function_enabled))
+   return;
+
+   bit = ftrace_test_recursion_trylock(ip, parent_ip);

[PATCH v4 3/6] tracing: Add "last_func_repeats" to struct trace_array

2021-04-15 Thread Yordan Karadzhov (VMware)
The field is used to keep track of the consecutive (on the same CPU) calls
of a single function. This information is needed in order to consolidate
the function tracing record in the cases when a single function is called
number of times.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c |  1 +
 kernel/trace/trace.h | 12 
 2 files changed, 13 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 507a30bf26e4..82833be07c1e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9104,6 +9104,7 @@ static int __remove_instance(struct trace_array *tr)
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
+   free_percpu(tr->last_func_repeats);
free_trace_buffers(tr);
 
for (i = 0; i < tr->nr_topts; i++) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6a5b4c2a0fa7..a4f1b66049fd 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -262,6 +262,17 @@ struct cond_snapshot {
cond_update_fn_tupdate;
 };
 
+/*
+ * struct trace_func_repeats - used to keep track of the consecutive
+ * (on the same CPU) calls of a single function.
+ */
+struct trace_func_repeats {
+   unsigned long   ip;
+   unsigned long   parent_ip;
+   unsigned long   count;
+   u64 ts_last_call;
+};
+
 /*
  * The trace array - an array of per-CPU trace arrays. This is the
  * highest level data structure that individual tracers deal with.
@@ -358,6 +369,7 @@ struct trace_array {
 #ifdef CONFIG_TRACER_SNAPSHOT
struct cond_snapshot*cond_snapshot;
 #endif
+   struct trace_func_repeats   __percpu *last_func_repeats;
 };
 
 enum {
-- 
2.25.1



[PATCH v4 4/6] tracing: Add method for recording "func_repeats" events

2021-04-15 Thread Yordan Karadzhov (VMware)
This patch only provides the implementation of the method.
Later we will used it in a combination with a new option for
function tracing.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c | 34 ++
 kernel/trace/trace.h |  4 
 2 files changed, 38 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 82833be07c1e..66a4ad93b5e9 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3117,6 +3117,40 @@ static void ftrace_trace_userstack(struct trace_array 
*tr,
 
 #endif /* CONFIG_STACKTRACE */
 
+static inline void
+func_repeats_set_delta_ts(struct func_repeats_entry *entry,
+ unsigned long long delta)
+{
+   entry->bottom_delta_ts = delta & U32_MAX;
+   entry->top_delta_ts = (delta >> 32);
+}
+
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx)
+{
+   struct trace_buffer *buffer = tr->array_buffer.buffer;
+   struct func_repeats_entry *entry;
+   struct ring_buffer_event *event;
+   u64 delta;
+
+   event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
+   sizeof(*entry), trace_ctx);
+   if (!event)
+   return;
+
+   delta = ring_buffer_event_time_stamp(buffer, event) -
+   last_info->ts_last_call;
+
+   entry = ring_buffer_event_data(event);
+   entry->ip = last_info->ip;
+   entry->parent_ip = last_info->parent_ip;
+   entry->count = last_info->count;
+   func_repeats_set_delta_ts(entry, delta);
+
+   __buffer_unlock_commit(buffer, event);
+}
+
 /* created for use with alloc_percpu */
 struct trace_buffer_struct {
int nesting;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a4f1b66049fd..cd80d046c7a5 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -695,6 +695,10 @@ static inline void __trace_stack(struct trace_array *tr, 
unsigned int trace_ctx,
 }
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx);
+
 extern u64 ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
-- 
2.25.1



[PATCH v4 1/6] tracing: Define static void trace_print_time()

2021-04-15 Thread Yordan Karadzhov (VMware)
The part of the code that prints the time of the trace record in
"int trace_print_context()" gets extracted in a static function. This
is done as a preparation for a following patch, in which we will define
a new ftrace event called "func_repeats". The new static method,
defined here, will be used by this new event to print the time of the
last repeat of a function that is consecutively called number of times.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_output.c | 26 +-
 1 file changed, 17 insertions(+), 9 deletions(-)

diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a0146e1fffdf..333233d45596 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -587,13 +587,26 @@ lat_print_timestamp(struct trace_iterator *iter, u64 
next_ts)
return !trace_seq_has_overflowed(s);
 }
 
+static void trace_print_time(struct trace_seq *s, struct trace_iterator *iter,
+unsigned long long ts)
+{
+   unsigned long secs, usec_rem;
+   unsigned long long t;
+
+   if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
+   t = ns2usecs(ts);
+   usec_rem = do_div(t, USEC_PER_SEC);
+   secs = (unsigned long)t;
+   trace_seq_printf(s, " %5lu.%06lu", secs, usec_rem);
+   } else
+   trace_seq_printf(s, " %12llu", ts);
+}
+
 int trace_print_context(struct trace_iterator *iter)
 {
struct trace_array *tr = iter->tr;
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
-   unsigned long long t;
-   unsigned long secs, usec_rem;
char comm[TASK_COMM_LEN];
 
trace_find_cmdline(entry->pid, comm);
@@ -614,13 +627,8 @@ int trace_print_context(struct trace_iterator *iter)
if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
trace_print_lat_fmt(s, entry);
 
-   if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
-   t = ns2usecs(iter->ts);
-   usec_rem = do_div(t, USEC_PER_SEC);
-   secs = (unsigned long)t;
-   trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
-   } else
-   trace_seq_printf(s, " %12llu: ", iter->ts);
+   trace_print_time(s, iter, iter->ts);
+   trace_seq_puts(s, ": ");
 
return !trace_seq_has_overflowed(s);
 }
-- 
2.25.1



[PATCH v4 5/6] tracing: Unify the logic for function tracing options

2021-04-15 Thread Yordan Karadzhov (VMware)
Currently the logic for dealing with the options for function tracing
has two different implementations. One is used when we set the flags
(in "static int func_set_flag()") and another used when we initialize
the tracer (in "static int function_trace_init()"). Those two
implementations are meant to do essentially the same thing and they
are both not very convenient for adding new options. In this patch
we add a helper function that provides a single implementation of
the logic for dealing with the options and we make it such that new
options can be easily added.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_functions.c | 65 --
 1 file changed, 38 insertions(+), 27 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index f93723ca66bc..f37f73a9b1b8 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -31,9 +31,12 @@ static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
TRACE_FUNC_OPT_STACK= 0x1,
 };
 
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
struct ftrace_ops *ops;
@@ -86,6 +89,18 @@ void ftrace_destroy_function_files(struct trace_array *tr)
ftrace_free_ftrace_ops(tr);
 }
 
+static ftrace_func_t select_trace_function(u32 flags_val)
+{
+   switch (flags_val & TRACE_FUNC_OPT_MASK) {
+   case TRACE_FUNC_NO_OPTS:
+   return function_trace_call;
+   case TRACE_FUNC_OPT_STACK:
+   return function_stack_trace_call;
+   default:
+   return NULL;
+   }
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -97,12 +112,9 @@ static int function_trace_init(struct trace_array *tr)
if (!tr->ops)
return -ENOMEM;
 
-   /* Currently only the global instance can do stack tracing */
-   if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
-   func_flags.val & TRACE_FUNC_OPT_STACK)
-   func = function_stack_trace_call;
-   else
-   func = function_trace_call;
+   func = select_trace_function(func_flags.val);
+   if (!func)
+   return -EINVAL;
 
ftrace_init_array_ops(tr, func);
 
@@ -213,7 +225,7 @@ static struct tracer_opt func_opts[] = {
 };
 
 static struct tracer_flags func_flags = {
-   .val = 0, /* By default: all flags disabled */
+   .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
.opts = func_opts
 };
 
@@ -235,30 +247,29 @@ static struct tracer function_trace;
 static int
 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-   switch (bit) {
-   case TRACE_FUNC_OPT_STACK:
-   /* do nothing if already set */
-   if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-   break;
-
-   /* We can change this flag when not running. */
-   if (tr->current_trace != &function_trace)
-   break;
+   ftrace_func_t func;
+   u32 new_flags;
 
-   unregister_ftrace_function(tr->ops);
+   /* Do nothing if already set. */
+   if (!!set == !!(func_flags.val & bit))
+   return 0;
 
-   if (set) {
-   tr->ops->func = function_stack_trace_call;
-   register_ftrace_function(tr->ops);
-   } else {
-   tr->ops->func = function_trace_call;
-   register_ftrace_function(tr->ops);
-   }
+   /* We can change this flag only when not running. */
+   if (tr->current_trace != &function_trace)
+   return 0;
 
-   break;
-   default:
+   new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
+   func = select_trace_function(new_flags);
+   if (!func)
return -EINVAL;
-   }
+
+   /* Check if there's anything to change. */
+   if (tr->ops->func == func)
+   return 0;
+
+   unregister_ftrace_function(tr->ops);
+   tr->ops->func = func;
+   register_ftrace_function(tr->ops);
 
return 0;
 }
-- 
2.25.1



[PATCH v4 2/6] tracing: Define new ftrace event "func_repeats"

2021-04-15 Thread Yordan Karadzhov (VMware)
The event aims to consolidate the function tracing record in the cases
when a single function is called number of times consecutively.

while (cond)
do_func();

This may happen in various scenarios (busy waiting for example).
The new ftrace event can be used to show repeated function events with
a single event and save space on the ring buffer

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.h |  3 +++
 kernel/trace/trace_entries.h | 22 +
 kernel/trace/trace_output.c  | 48 
 3 files changed, 73 insertions(+)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5506424eae2a..6a5b4c2a0fa7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,6 +45,7 @@ enum trace_type {
TRACE_BPUTS,
TRACE_HWLAT,
TRACE_RAW_DATA,
+   TRACE_FUNC_REPEATS,
 
__TRACE_LAST_TYPE,
 };
@@ -442,6 +443,8 @@ extern void __ftrace_bad_type(void);
  TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,  \
  TRACE_GRAPH_RET); \
+   IF_ASSIGN(var, ent, struct func_repeats_entry,  \
+ TRACE_FUNC_REPEATS);  \
__ftrace_bad_type();\
} while (0)
 
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 4547ac59da61..251c819cf0c5 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -338,3 +338,25 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
 __entry->nmi_total_ts,
 __entry->nmi_count)
 );
+
+#define FUNC_REPEATS_GET_DELTA_TS(entry)   \
+   (((u64)(entry)->top_delta_ts << 32) | (entry)->bottom_delta_ts) \
+
+FTRACE_ENTRY(func_repeats, func_repeats_entry,
+
+   TRACE_FUNC_REPEATS,
+
+   F_STRUCT(
+   __field(unsigned long,  ip  )
+   __field(unsigned long,  parent_ip   )
+   __field(u16 ,   count   )
+   __field(u16 ,   top_delta_ts)
+   __field(u32 ,   bottom_delta_ts )
+   ),
+
+   F_printk(" %ps <-%ps\t(repeats:%u  delta: -%llu)",
+(void *)__entry->ip,
+(void *)__entry->parent_ip,
+__entry->count,
+FUNC_REPEATS_GET_DELTA_TS(__entry))
+);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 333233d45596..3037f0c88f90 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1381,6 +1381,53 @@ static struct trace_event trace_raw_data_event = {
.funcs  = &trace_raw_data_funcs,
 };
 
+static enum print_line_t
+trace_func_repeats_raw(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   trace_seq_printf(s, "%lu %lu %u %llu\n",
+field->ip,
+field->parent_ip,
+field->count,
+FUNC_REPEATS_GET_DELTA_TS(field));
+
+   return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_func_repeats_print(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   seq_print_ip_sym(s, field->ip, flags);
+   trace_seq_puts(s, " <-");
+   seq_print_ip_sym(s, field->parent_ip, flags);
+   trace_seq_printf(s, " (repeats: %u, last_ts:", field->count);
+   trace_print_time(s, iter,
+iter->ts - FUNC_REPEATS_GET_DELTA_TS(field));
+   trace_seq_puts(s, ")\n");
+
+   return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_func_repeats_funcs = {
+   .trace  = trace_func_repeats_print,
+   .raw= trace_func_repeats_raw,
+};
+
+static struct trace_event trace_func_repeats_event = {
+   .type   = TRACE_FUNC_REPEATS,
+   .funcs  = &trace_func_repeats_funcs,
+};
 
 static struct trace_event *events[] __initdata = {
&trace_fn_event,
@@ -1393,6 +1440,7 @@ static struct trace_event *events[] __initdata = {
&trace_print_event,
&trace_hwlat_event,
&trace_raw_data_event,
+   &trace_func_repeats_event,
NULL
 };
 
-- 
2.25.1



[PATCH v3 2/5] tracing: Add "last_func_repeats" to struct trace_array

2021-04-09 Thread Yordan Karadzhov (VMware)
The field is used to keep track of the consecutive (on the same CPU) calls
of a single function. This information is needed in order to consolidate
the function tracing record in the cases when a single function is called
number of times.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c |  1 +
 kernel/trace/trace.h | 18 ++
 2 files changed, 19 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 507a30bf26e4..82833be07c1e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9104,6 +9104,7 @@ static int __remove_instance(struct trace_array *tr)
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
+   free_percpu(tr->last_func_repeats);
free_trace_buffers(tr);
 
for (i = 0; i < tr->nr_topts; i++) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6a5b4c2a0fa7..1cd4da7ba769 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -262,6 +262,17 @@ struct cond_snapshot {
cond_update_fn_tupdate;
 };
 
+/*
+ * struct trace_func_repeats - used to keep track of the consecutive
+ * (on the same CPU) calls of a single function.
+ */
+struct trace_func_repeats {
+   unsigned long   ip;
+   unsigned long   parent_ip;
+   unsigned long   count;
+   u64 ts_last_call;
+};
+
 /*
  * The trace array - an array of per-CPU trace arrays. This is the
  * highest level data structure that individual tracers deal with.
@@ -358,8 +369,15 @@ struct trace_array {
 #ifdef CONFIG_TRACER_SNAPSHOT
struct cond_snapshot*cond_snapshot;
 #endif
+   struct trace_func_repeats   __percpu *last_func_repeats;
 };
 
+static inline struct trace_func_repeats __percpu *
+tracer_alloc_func_repeats(struct trace_array *tr)
+{
+   return tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+}
+
 enum {
TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
 };
-- 
2.25.1



[PATCH v3 3/5] tracing: Add method for recording "func_repeats" events

2021-04-09 Thread Yordan Karadzhov (VMware)
This patch only provides the implementation of the method.
Later we will used it in a combination with a new option for
function tracing.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c | 26 ++
 kernel/trace/trace.h |  4 
 kernel/trace/trace_entries.h |  6 ++
 3 files changed, 36 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 82833be07c1e..bbc57cf3bda4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3117,6 +3117,32 @@ static void ftrace_trace_userstack(struct trace_array 
*tr,
 
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx)
+{
+   struct trace_buffer *buffer = tr->array_buffer.buffer;
+   struct func_repeats_entry *entry;
+   struct ring_buffer_event *event;
+   u64 delta;
+
+   event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
+   sizeof(*entry), trace_ctx);
+   if (!event)
+   return;
+
+   delta = ring_buffer_event_time_stamp(buffer, event) -
+   last_info->ts_last_call;
+
+   entry = ring_buffer_event_data(event);
+   entry->ip = last_info->ip;
+   entry->parent_ip = last_info->parent_ip;
+   entry->count = last_info->count;
+   FUNC_REPEATS_SET_DELTA_TS(entry, delta)
+
+   __buffer_unlock_commit(buffer, event);
+}
+
 /* created for use with alloc_percpu */
 struct trace_buffer_struct {
int nesting;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1cd4da7ba769..e1f34119c036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -701,6 +701,10 @@ static inline void __trace_stack(struct trace_array *tr, 
unsigned int trace_ctx,
 }
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx);
+
 extern u64 ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index fdd022a7aecf..5e9dc56af4b1 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -342,6 +342,12 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
 #define FUNC_REPEATS_GET_DELTA_TS(entry)   \
 (((u64)entry->top_delta_ts << 32) | entry->bottom_delta_ts)\
 
+#define FUNC_REPEATS_SET_DELTA_TS(entry, delta)\
+   do {\
+   entry->bottom_delta_ts = delta & U32_MAX;   \
+   entry->top_delta_ts = (delta >> 32);\
+   } while (0);\
+
 FTRACE_ENTRY(func_repeats, func_repeats_entry,
 
TRACE_FUNC_REPEATS,
-- 
2.25.1



[PATCH v3 1/5] tracing: Define new ftrace event "func_repeats"

2021-04-09 Thread Yordan Karadzhov (VMware)
The event aims to consolidate the function tracing record in the cases
when a single function is called number of times consecutively.

while (cond)
do_func();

This may happen in various scenarios (busy waiting for example).
The new ftrace event can be used to show repeated function events with
a single event and save space on the ring buffer

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.h |  3 +++
 kernel/trace/trace_entries.h | 22 +
 kernel/trace/trace_output.c  | 47 
 3 files changed, 72 insertions(+)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5506424eae2a..6a5b4c2a0fa7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,6 +45,7 @@ enum trace_type {
TRACE_BPUTS,
TRACE_HWLAT,
TRACE_RAW_DATA,
+   TRACE_FUNC_REPEATS,
 
__TRACE_LAST_TYPE,
 };
@@ -442,6 +443,8 @@ extern void __ftrace_bad_type(void);
  TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,  \
  TRACE_GRAPH_RET); \
+   IF_ASSIGN(var, ent, struct func_repeats_entry,  \
+ TRACE_FUNC_REPEATS);  \
__ftrace_bad_type();\
} while (0)
 
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 4547ac59da61..fdd022a7aecf 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -338,3 +338,25 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
 __entry->nmi_total_ts,
 __entry->nmi_count)
 );
+
+#define FUNC_REPEATS_GET_DELTA_TS(entry)   \
+(((u64)entry->top_delta_ts << 32) | entry->bottom_delta_ts)\
+
+FTRACE_ENTRY(func_repeats, func_repeats_entry,
+
+   TRACE_FUNC_REPEATS,
+
+   F_STRUCT(
+   __field(unsigned long,  ip  )
+   __field(unsigned long,  parent_ip   )
+   __field(u16 ,   count   )
+   __field(u16 ,   top_delta_ts)
+   __field(u32 ,   bottom_delta_ts )
+   ),
+
+   F_printk(" %ps <-%ps\t(repeats:%u  delta_ts: -%llu)",
+(void *)__entry->ip,
+(void *)__entry->parent_ip,
+__entry->count,
+FUNC_REPEATS_GET_DELTA_TS(__entry))
+);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a0146e1fffdf..55b08e146afc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1373,6 +1373,52 @@ static struct trace_event trace_raw_data_event = {
.funcs  = &trace_raw_data_funcs,
 };
 
+static enum print_line_t
+trace_func_repeats_raw(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   trace_seq_printf(s, "%lu %lu %u %llu\n",
+field->ip,
+field->parent_ip,
+field->count,
+FUNC_REPEATS_GET_DELTA_TS(field));
+
+   return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_func_repeats_print(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   seq_print_ip_sym(s, field->ip, flags);
+   trace_seq_puts(s, " <-");
+   seq_print_ip_sym(s, field->parent_ip, flags);
+   trace_seq_printf(s, " (repeats: %u, delta_ts: -%llu)\n",
+field->count,
+FUNC_REPEATS_GET_DELTA_TS(field));
+
+   return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_func_repeats_funcs = {
+   .trace  = trace_func_repeats_print,
+   .raw= trace_func_repeats_raw,
+};
+
+static struct trace_event trace_func_repeats_event = {
+   .type   = TRACE_FUNC_REPEATS,
+   .funcs  = &trace_func_repeats_funcs,
+};
 
 static struct trace_event *events[] __initdata = {
&trace_fn_event,
@@ -1385,6 +1431,7 @@ static struct trace_event *events[] __initdata = {
&trace_print_event,
&trace_hwlat_event,
&trace_raw_data_event,
+   &trace_func_repeats_event,
NULL
 };
 
-- 
2.25.1



[PATCH v3 5/5] tracing: Add "func_no_repeats" option for function tracing

2021-04-09 Thread Yordan Karadzhov (VMware)
If the option is activated the function tracing record gets
consolidated in the cases when a single function is called number
of times consecutively. Instead of having an identical record for
each call of the function we will record only the first call
following by event showing the number of repeats.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_functions.c | 161 -
 1 file changed, 158 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index f37f73a9b1b8..9a3cbdbfd1f7 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,15 +27,27 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op, struct ftrace_regs 
*fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+struct ftrace_ops *op,
+struct ftrace_regs *fregs);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
-   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
-   TRACE_FUNC_OPT_STACK= 0x1,
+
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
+   TRACE_FUNC_OPT_STACK= 0x1,
+   TRACE_FUNC_OPT_NO_REPEATS   = 0x2,
+
+   /* Update this to next highest bit. */
+   TRACE_FUNC_OPT_HIGHEST_BIT  = 0x4
 };
 
-#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
@@ -96,11 +108,26 @@ static ftrace_func_t select_trace_function(u32 flags_val)
return function_trace_call;
case TRACE_FUNC_OPT_STACK:
return function_stack_trace_call;
+   case TRACE_FUNC_OPT_NO_REPEATS:
+   return function_no_repeats_trace_call;
+   case TRACE_FUNC_OPT_STACK | TRACE_FUNC_OPT_NO_REPEATS:
+   return function_stack_no_repeats_trace_call;
default:
return NULL;
}
 }
 
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+   if (!tr->last_func_repeats &&
+   (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+   if (!tracer_alloc_func_repeats(tr))
+   return false;
+   }
+
+   return true;
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -116,6 +143,9 @@ static int function_trace_init(struct trace_array *tr)
if (!func)
return -EINVAL;
 
+   if (!handle_func_repeats(tr, func_flags.val))
+   return -ENOMEM;
+
ftrace_init_array_ops(tr, func);
 
tr->array_buffer.cpu = raw_smp_processor_id();
@@ -217,10 +247,132 @@ function_stack_trace_call(unsigned long ip, unsigned 
long parent_ip,
local_irq_restore(flags);
 }
 
+static inline bool is_repeat_check(struct trace_array *tr,
+  struct trace_func_repeats *last_info,
+  unsigned long ip, unsigned long parent_ip)
+{
+   if (last_info->ip == ip &&
+   last_info->parent_ip == parent_ip &&
+   last_info->count < U16_MAX) {
+   last_info->ts_last_call =
+   ring_buffer_time_stamp(tr->array_buffer.buffer);
+   last_info->count++;
+   return true;
+   }
+
+   return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+  unsigned long ip, unsigned long parent_ip,
+  struct trace_func_repeats *last_info,
+  unsigned int trace_ctx)
+{
+   if (last_info->count) {
+   trace_last_func_repeats(tr, last_info, trace_ctx);
+   last_info->count = 0;
+   }
+
+   last_info->ip = ip;
+   last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op,
+  struct ftrace_regs *fregs)
+{
+   struct trace_func_repeats *last_info;
+   struct trace_array *tr = op->private;
+   struct trace_array_cpu *data;
+   unsigned int trace_ctx;
+   unsigned long flags;
+   int bit;
+   int cpu;
+
+   if (unlikely(!tr->function_enabled))
+   return;
+
+   bit = ftrace_test_recursion_trylock(ip, parent_ip);
+   if (bit < 0)
+   return;
+
+   preempt_disable_

[PATCH v3 4/5] tracing: Unify the logic for function tracing options

2021-04-09 Thread Yordan Karadzhov (VMware)
Currently the logic for dealing with the options for function tracing
has two different implementations. One is used when we set the flags
(in "static int func_set_flag()") and another used when we initialize
the tracer (in "static int function_trace_init()"). Those two
implementations are meant to do essentially the same thing and they
are both not very convenient for adding new options. In this patch
we add a helper function that provides a single implementation of
the logic for dealing with the options and we make it such that new
options can be easily added.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_functions.c | 65 --
 1 file changed, 38 insertions(+), 27 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index f93723ca66bc..f37f73a9b1b8 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -31,9 +31,12 @@ static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
TRACE_FUNC_OPT_STACK= 0x1,
 };
 
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
struct ftrace_ops *ops;
@@ -86,6 +89,18 @@ void ftrace_destroy_function_files(struct trace_array *tr)
ftrace_free_ftrace_ops(tr);
 }
 
+static ftrace_func_t select_trace_function(u32 flags_val)
+{
+   switch (flags_val & TRACE_FUNC_OPT_MASK) {
+   case TRACE_FUNC_NO_OPTS:
+   return function_trace_call;
+   case TRACE_FUNC_OPT_STACK:
+   return function_stack_trace_call;
+   default:
+   return NULL;
+   }
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -97,12 +112,9 @@ static int function_trace_init(struct trace_array *tr)
if (!tr->ops)
return -ENOMEM;
 
-   /* Currently only the global instance can do stack tracing */
-   if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
-   func_flags.val & TRACE_FUNC_OPT_STACK)
-   func = function_stack_trace_call;
-   else
-   func = function_trace_call;
+   func = select_trace_function(func_flags.val);
+   if (!func)
+   return -EINVAL;
 
ftrace_init_array_ops(tr, func);
 
@@ -213,7 +225,7 @@ static struct tracer_opt func_opts[] = {
 };
 
 static struct tracer_flags func_flags = {
-   .val = 0, /* By default: all flags disabled */
+   .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
.opts = func_opts
 };
 
@@ -235,30 +247,29 @@ static struct tracer function_trace;
 static int
 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-   switch (bit) {
-   case TRACE_FUNC_OPT_STACK:
-   /* do nothing if already set */
-   if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-   break;
-
-   /* We can change this flag when not running. */
-   if (tr->current_trace != &function_trace)
-   break;
+   ftrace_func_t func;
+   u32 new_flags;
 
-   unregister_ftrace_function(tr->ops);
+   /* Do nothing if already set. */
+   if (!!set == !!(func_flags.val & bit))
+   return 0;
 
-   if (set) {
-   tr->ops->func = function_stack_trace_call;
-   register_ftrace_function(tr->ops);
-   } else {
-   tr->ops->func = function_trace_call;
-   register_ftrace_function(tr->ops);
-   }
+   /* We can change this flag only when not running. */
+   if (tr->current_trace != &function_trace)
+   return 0;
 
-   break;
-   default:
+   new_flags = (func_flags.val & ~bit) | (set ? bit : 0);
+   func = select_trace_function(new_flags);
+   if (!func)
return -EINVAL;
-   }
+
+   /* Check if there's anything to change. */
+   if (tr->ops->func == func)
+   return 0;
+
+   unregister_ftrace_function(tr->ops);
+   tr->ops->func = func;
+   register_ftrace_function(tr->ops);
 
return 0;
 }
-- 
2.25.1



[PATCH v3 0/5] Add "func_no_repete" tracing option

2021-04-09 Thread Yordan Karadzhov (VMware)
The new option for function tracing aims to save space on the ring
buffer and to make it more readable in the case when a single function
is called number of times consecutively:

while (cond)
do_func();

Instead of having an identical records for each call of the function
we will record only the first call, followed by an event showing the
number of repeats.

v3 changes:
  * FUNC_REPEATS_SET_DELTA_TS macro has been optimised.

  * Fixed bug in func_set_flag(): In the previous version the value
of the "new_flags" variable was not properly calculated because
I misinterpreted the meaning of the "bit" argument of the function.
This bug in v2 had no real effect, because for the moment we have
only two "function options" so the value of "new_flags" was correct,
although its way of calculating was wrong.

v2 changes:
  * As suggested by Steven in his review, we now record not only the
repetition count, but also the time elapsed between the last
repetition of the function and the actual generation of the
"func_repeats" event. 16 bits are used to record the repetition
count. In the case of an overflow of the counter a second pair of
"function" and "func_repeats" events will be generated. The time
interval gets codded by using up to 48 (32 + 16) bits.


Yordan Karadzhov (VMware) (5):
  tracing: Define new ftrace event "func_repeats"
  tracing: Add "last_func_repeats" to struct trace_array
  tracing: Add method for recording "func_repeats" events
  tracing: Unify the logic for function tracing options
  tracing: Add "func_no_repeats" option for function tracing

 kernel/trace/trace.c   |  27 
 kernel/trace/trace.h   |  25 
 kernel/trace/trace_entries.h   |  28 +
 kernel/trace/trace_functions.c | 222 -
 kernel/trace/trace_output.c|  47 +++
 5 files changed, 321 insertions(+), 28 deletions(-)

-- 
2.25.1



Re: [PATCH v2 4/5] tracing: Unify the logic for function tracing options

2021-04-07 Thread Yordan Karadzhov (VMware)

Hi Steven,

Hi Steven,

On 6.04.21 г. 1:15, Steven Rostedt wrote:
  
@@ -235,30 +248,31 @@ static struct tracer function_trace;

  static int
  func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  {
-   switch (bit) {
-   case TRACE_FUNC_OPT_STACK:
-   /* do nothing if already set */
-   if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-   break;
+   ftrace_func_t func;
+   u32 new_flags_val;

Nit, but the variable should just be "new_flags", which is consistent with
old_flags. In the kernel we don't need to the variable names to be so
verbose.

  
-		/* We can change this flag when not running. */

-   if (tr->current_trace != &function_trace)
-   break;
+   /* Do nothing if already set. */
+   if (!!set == !!(func_flags.val & bit))
+   return 0;
  
-		unregister_ftrace_function(tr->ops);

+   /* We can change this flag only when not running. */
+   if (tr->current_trace != &function_trace)
+   return 0;
  
-		if (set) {

-   tr->ops->func = function_stack_trace_call;
-   register_ftrace_function(tr->ops);
-   } else {
-   tr->ops->func = function_trace_call;
-   register_ftrace_function(tr->ops);
-   }
+   new_flags_val = (func_flags.val & ~(1UL << (bit - 1)));
+   new_flags_val |= (set << (bit - 1));

bit is already the mask, no need to shift it, nor there's no reason for the
extra set of parenthesis. And the above can be done in one line.

new_flags = (func_flags.val & ~bit) | (set ? bit : 0);



OK, I totally misinterpreted the meaning of the "bit" argument of the 
function. I did not realized it is a mask. I was thinking the argument 
gives only the number of the bit that changes (like 5 for the 5-th bit 
inside the mask).


Thanks!
Yordan




[PATCH v2 4/5] tracing: Unify the logic for function tracing options

2021-03-29 Thread Yordan Karadzhov (VMware)
Currently the logic for dealing with the options for function tracing
has two different implementations. One is used when we set the flags
(in "static int func_set_flag()") and another used when we initialize
the tracer (in "static int function_trace_init()"). Those two
implementations are meant to do essentially the same thing and they
are both not very convenient for adding new options. In this patch
we add a helper function that provides a single implementation of
the logic for dealing with the options and we make it such that new
options can be easily added.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_functions.c | 66 --
 1 file changed, 40 insertions(+), 26 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index f93723ca66bc..6c912eb0508a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,13 +27,17 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static ftrace_func_t select_trace_function(u32 flags_val);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
TRACE_FUNC_OPT_STACK= 0x1,
 };
 
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
struct ftrace_ops *ops;
@@ -97,12 +101,9 @@ static int function_trace_init(struct trace_array *tr)
if (!tr->ops)
return -ENOMEM;
 
-   /* Currently only the global instance can do stack tracing */
-   if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
-   func_flags.val & TRACE_FUNC_OPT_STACK)
-   func = function_stack_trace_call;
-   else
-   func = function_trace_call;
+   func = select_trace_function(func_flags.val);
+   if (!func)
+   return -EINVAL;
 
ftrace_init_array_ops(tr, func);
 
@@ -205,6 +206,18 @@ function_stack_trace_call(unsigned long ip, unsigned long 
parent_ip,
local_irq_restore(flags);
 }
 
+static ftrace_func_t select_trace_function(u32 flags_val)
+{
+   switch (flags_val & TRACE_FUNC_OPT_MASK) {
+   case TRACE_FUNC_NO_OPTS:
+   return function_trace_call;
+   case TRACE_FUNC_OPT_STACK:
+   return function_stack_trace_call;
+   default:
+   return NULL;
+   }
+}
+
 static struct tracer_opt func_opts[] = {
 #ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -213,7 +226,7 @@ static struct tracer_opt func_opts[] = {
 };
 
 static struct tracer_flags func_flags = {
-   .val = 0, /* By default: all flags disabled */
+   .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
.opts = func_opts
 };
 
@@ -235,30 +248,31 @@ static struct tracer function_trace;
 static int
 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-   switch (bit) {
-   case TRACE_FUNC_OPT_STACK:
-   /* do nothing if already set */
-   if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-   break;
+   ftrace_func_t func;
+   u32 new_flags_val;
 
-   /* We can change this flag when not running. */
-   if (tr->current_trace != &function_trace)
-   break;
+   /* Do nothing if already set. */
+   if (!!set == !!(func_flags.val & bit))
+   return 0;
 
-   unregister_ftrace_function(tr->ops);
+   /* We can change this flag only when not running. */
+   if (tr->current_trace != &function_trace)
+   return 0;
 
-   if (set) {
-   tr->ops->func = function_stack_trace_call;
-   register_ftrace_function(tr->ops);
-   } else {
-   tr->ops->func = function_trace_call;
-   register_ftrace_function(tr->ops);
-   }
+   new_flags_val = (func_flags.val & ~(1UL << (bit - 1)));
+   new_flags_val |= (set << (bit - 1));
 
-   break;
-   default:
+   func = select_trace_function(new_flags_val);
+   if (!func)
return -EINVAL;
-   }
+
+   /* Check if there's anything to change. */
+   if (tr->ops->func == func)
+   return 0;
+
+   unregister_ftrace_function(tr->ops);
+   tr->ops->func = func;
+   register_ftrace_function(tr->ops);
 
return 0;
 }
-- 
2.25.1



[PATCH v2 2/5] tracing: Add "last_func_repeats" to struct trace_array

2021-03-29 Thread Yordan Karadzhov (VMware)
The field is used to keep track of the consecutive (on the same CPU) calls
of a single function. This information is needed in order to consolidate
the function tracing record in the cases when a single function is called
number of times.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c |  1 +
 kernel/trace/trace.h | 18 ++
 2 files changed, 19 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3c605957bb5c..6fcc159c34a8 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9103,6 +9103,7 @@ static int __remove_instance(struct trace_array *tr)
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
+   free_percpu(tr->last_func_repeats);
free_trace_buffers(tr);
 
for (i = 0; i < tr->nr_topts; i++) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 6a5b4c2a0fa7..1cd4da7ba769 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -262,6 +262,17 @@ struct cond_snapshot {
cond_update_fn_tupdate;
 };
 
+/*
+ * struct trace_func_repeats - used to keep track of the consecutive
+ * (on the same CPU) calls of a single function.
+ */
+struct trace_func_repeats {
+   unsigned long   ip;
+   unsigned long   parent_ip;
+   unsigned long   count;
+   u64 ts_last_call;
+};
+
 /*
  * The trace array - an array of per-CPU trace arrays. This is the
  * highest level data structure that individual tracers deal with.
@@ -358,8 +369,15 @@ struct trace_array {
 #ifdef CONFIG_TRACER_SNAPSHOT
struct cond_snapshot*cond_snapshot;
 #endif
+   struct trace_func_repeats   __percpu *last_func_repeats;
 };
 
+static inline struct trace_func_repeats __percpu *
+tracer_alloc_func_repeats(struct trace_array *tr)
+{
+   return tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+}
+
 enum {
TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
 };
-- 
2.25.1



[PATCH v2 5/5] tracing: Add "func_no_repeats" option for function tracing

2021-03-29 Thread Yordan Karadzhov (VMware)
If the option is activated the function tracing record gets
consolidated in the cases when a single function is called number
of times consecutively. Instead of having an identical record for
each call of the function we will record only the first call
following by event showing the number of repeats.

Signed-off-by: Yordan Karadzhov (VMware) 

fix last
---
 kernel/trace/trace_functions.c | 161 -
 1 file changed, 158 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 6c912eb0508a..72d2e07dc103 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,16 +27,28 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op, struct ftrace_regs 
*fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+struct ftrace_ops *op,
+struct ftrace_regs *fregs);
 static ftrace_func_t select_trace_function(u32 flags_val);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
-   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
-   TRACE_FUNC_OPT_STACK= 0x1,
+
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
+   TRACE_FUNC_OPT_STACK= 0x1,
+   TRACE_FUNC_OPT_NO_REPEATS   = 0x2,
+
+   /* Update this to next highest bit. */
+   TRACE_FUNC_OPT_HIGHEST_BIT  = 0x4
 };
 
-#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
@@ -90,6 +102,17 @@ void ftrace_destroy_function_files(struct trace_array *tr)
ftrace_free_ftrace_ops(tr);
 }
 
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+   if (!tr->last_func_repeats &&
+   (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+   if (!tracer_alloc_func_repeats(tr))
+   return false;
+   }
+
+   return true;
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -105,6 +128,9 @@ static int function_trace_init(struct trace_array *tr)
if (!func)
return -EINVAL;
 
+   if (!handle_func_repeats(tr, func_flags.val))
+   return -ENOMEM;
+
ftrace_init_array_ops(tr, func);
 
tr->array_buffer.cpu = raw_smp_processor_id();
@@ -206,6 +232,127 @@ function_stack_trace_call(unsigned long ip, unsigned long 
parent_ip,
local_irq_restore(flags);
 }
 
+static inline bool is_repeat_check(struct trace_array *tr,
+  struct trace_func_repeats *last_info,
+  unsigned long ip, unsigned long parent_ip)
+{
+   if (last_info->ip == ip &&
+   last_info->parent_ip == parent_ip &&
+   last_info->count < U16_MAX) {
+   last_info->ts_last_call =
+   ring_buffer_time_stamp(tr->array_buffer.buffer);
+   last_info->count++;
+   return true;
+   }
+
+   return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+  unsigned long ip, unsigned long parent_ip,
+  struct trace_func_repeats *last_info,
+  unsigned int trace_ctx)
+{
+   if (last_info->count) {
+   trace_last_func_repeats(tr, last_info, trace_ctx);
+   last_info->count = 0;
+   }
+
+   last_info->ip = ip;
+   last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op,
+  struct ftrace_regs *fregs)
+{
+   struct trace_func_repeats *last_info;
+   struct trace_array *tr = op->private;
+   struct trace_array_cpu *data;
+   unsigned int trace_ctx;
+   unsigned long flags;
+   int bit;
+   int cpu;
+
+   if (unlikely(!tr->function_enabled))
+   return;
+
+   bit = ftrace_test_recursion_trylock(ip, parent_ip);
+   if (bit < 0)
+   return;
+
+   preempt_disable_notrace();
+
+   cpu = smp_processor_id();
+   data = per_cpu_ptr(tr->array_buffer.data, cpu);
+   if (atomic_read(&data->disabled))
+   goto out;
+
+   /*
+* An interrupt may happen at any place here. But as far as I can see,
+* the only damage tha

[PATCH v2 1/5] tracing: Define new ftrace event "func_repeats"

2021-03-29 Thread Yordan Karadzhov (VMware)
The event aims to consolidate the function tracing record in the cases
when a single function is called number of times consecutively.

while (cond)
do_func();

This may happen in various scenarios (busy waiting for example).
The new ftrace event can be used to show repeated function events with
a single event and save space on the ring buffer

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.h |  3 +++
 kernel/trace/trace_entries.h | 39 ++
 kernel/trace/trace_output.c  | 47 
 3 files changed, 89 insertions(+)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5506424eae2a..6a5b4c2a0fa7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,6 +45,7 @@ enum trace_type {
TRACE_BPUTS,
TRACE_HWLAT,
TRACE_RAW_DATA,
+   TRACE_FUNC_REPEATS,
 
__TRACE_LAST_TYPE,
 };
@@ -442,6 +443,8 @@ extern void __ftrace_bad_type(void);
  TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,  \
  TRACE_GRAPH_RET); \
+   IF_ASSIGN(var, ent, struct func_repeats_entry,  \
+ TRACE_FUNC_REPEATS);  \
__ftrace_bad_type();\
} while (0)
 
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 4547ac59da61..6f98c3b4e4fa 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -338,3 +338,42 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
 __entry->nmi_total_ts,
 __entry->nmi_count)
 );
+
+#define FUNC_REPEATS_GET_DELTA_TS(entry)   
\
+(((u64)entry->top_delta_ts << 32) | entry->bottom_delta_ts)
\
+
+#define FUNC_REPEATS_SET_DELTA_TS(entry, delta)
\
+   do {
\
+   if (likely(!((u64)delta >> 32))) {  
\
+   entry->bottom_delta_ts = delta; 
\
+   entry->top_delta_ts = 0;
\
+   } else {
\
+   if (likely(!((u64)delta >> 48))) {  
\
+   entry->bottom_delta_ts = delta & U32_MAX;   
\
+   entry->top_delta_ts = (delta >> 32);
\
+   } else {
\
+   /* Timestamp overflow. Set to max. */   
\
+   entry->bottom_delta_ts = U32_MAX;   
\
+   entry->top_delta_ts = U16_MAX;  
\
+   }   
\
+   }   
\
+   } while (0);
\
+
+FTRACE_ENTRY(func_repeats, func_repeats_entry,
+
+   TRACE_FUNC_REPEATS,
+
+   F_STRUCT(
+   __field(unsigned long,  ip  )
+   __field(unsigned long,  parent_ip   )
+   __field(u16 ,   count   )
+   __field(u16 ,   top_delta_ts)
+   __field(u32 ,   bottom_delta_ts )
+   ),
+
+   F_printk(" %ps <-%ps\t(repeats:%u  delta_ts: -%llu)",
+(void *)__entry->ip,
+(void *)__entry->parent_ip,
+__entry->count,
+FUNC_REPEATS_GET_DELTA_TS(__entry))
+);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index a0146e1fffdf..55b08e146afc 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1373,6 +1373,52 @@ static struct trace_event trace_raw_data_event = {
.funcs  = &trace_raw_data_funcs,
 };
 
+static enum print_line_t
+trace_func_repeats_raw(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   trace_seq_printf(s, "%lu %lu %u %llu\n",
+field->ip,
+field->parent_ip,
+field->count,
+FUNC_REPEATS_GET_DELTA_TS(field));
+
+   return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_func_repeats_print(struct trace_iterator 

[PATCH v2 3/5] tracing: Add method for recording "func_repeats" events

2021-03-29 Thread Yordan Karadzhov (VMware)
This patch only provides the implementation of the method.
Later we will used it in a combination with a new option for
function tracing.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c | 26 ++
 kernel/trace/trace.h |  4 
 2 files changed, 30 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6fcc159c34a8..0d3d14112f50 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3116,6 +3116,32 @@ static void ftrace_trace_userstack(struct trace_array 
*tr,
 
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx)
+{
+   struct trace_buffer *buffer = tr->array_buffer.buffer;
+   struct func_repeats_entry *entry;
+   struct ring_buffer_event *event;
+   u64 delta;
+
+   event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
+   sizeof(*entry), trace_ctx);
+   if (!event)
+   return;
+
+   delta = ring_buffer_event_time_stamp(buffer, event) -
+   last_info->ts_last_call;
+
+   entry = ring_buffer_event_data(event);
+   entry->ip = last_info->ip;
+   entry->parent_ip = last_info->parent_ip;
+   entry->count = last_info->count;
+   FUNC_REPEATS_SET_DELTA_TS(entry, delta)
+
+   __buffer_unlock_commit(buffer, event);
+}
+
 /* created for use with alloc_percpu */
 struct trace_buffer_struct {
int nesting;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1cd4da7ba769..e1f34119c036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -701,6 +701,10 @@ static inline void __trace_stack(struct trace_array *tr, 
unsigned int trace_ctx,
 }
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx);
+
 extern u64 ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
-- 
2.25.1



[PATCH v2 0/5] Add "func_no_repete" tracing option

2021-03-29 Thread Yordan Karadzhov (VMware)
The new option for function tracing aims to save space on the ring
buffer and to make it more readable in the case when a single function
is called number of times consecutively:

while (cond)
do_func();

Instead of having an identical records for each call of the function
we will record only the first call, followed by an event showing the
number of repeats.

v2 changes:
  * As suggested by Steven in his review, we now record not only the
repetition count, but also the time elapsed between the last
repetition of the function and the actual generation of the
"func_repeats" event. 16 bits are used to record the repetition
count. In the case of an overflow of the counter a second pair of
"function" and "func_repeats" events will be generated. The time
interval gets codded by using up to 48 (32 + 16) bits.

Yordan Karadzhov (VMware) (5):
  tracing: Define new ftrace event "func_repeats"
  tracing: Add "last_func_repeats" to struct trace_array
  tracing: Add method for recording "func_repeats" events
  tracing: Unify the logic for function tracing options
  tracing: Add "func_no_repeats" option for function tracing

 kernel/trace/trace.c   |  29 +
 kernel/trace/trace.h   |  25 
 kernel/trace/trace_entries.h   |  28 +
 kernel/trace/trace_functions.c | 222 +
 kernel/trace/trace_output.c|  47 +++
 5 files changed, 324 insertions(+), 27 deletions(-)

-- 
2.25.1



[PATCH] tracing: Remove unused argument from "ring_buffer_time_stamp()

2021-03-29 Thread Yordan Karadzhov (VMware)
The "cpu" parameter is not being used by the function.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 include/linux/ring_buffer.h | 2 +-
 kernel/trace/ring_buffer.c  | 2 +-
 kernel/trace/trace.c| 8 
 3 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 057b7ed4fe24..dac53fd3afea 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -181,7 +181,7 @@ unsigned long ring_buffer_commit_overrun_cpu(struct 
trace_buffer *buffer, int cp
 unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int 
cpu);
 unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int 
cpu);
 
-u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
  int cpu, u64 *ts);
 void ring_buffer_set_clock(struct trace_buffer *buffer,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index f4216df58e31..2c0ee6484990 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1080,7 +1080,7 @@ static inline u64 rb_time_stamp(struct trace_buffer 
*buffer)
return ts << DEBUG_SHIFT;
 }
 
-u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
 {
u64 time;
 
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c8e54b674d3e..3c605957bb5c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -771,7 +771,7 @@ static u64 buffer_ftrace_now(struct array_buffer *buf, int 
cpu)
if (!buf->buffer)
return trace_clock_local();
 
-   ts = ring_buffer_time_stamp(buf->buffer, cpu);
+   ts = ring_buffer_time_stamp(buf->buffer);
ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
 
return ts;
@@ -7173,7 +7173,7 @@ static int tracing_time_stamp_mode_open(struct inode 
*inode, struct file *file)
 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct 
ring_buffer_event *rbe)
 {
if (rbe == this_cpu_read(trace_buffered_event))
-   return ring_buffer_time_stamp(buffer, smp_processor_id());
+   return ring_buffer_time_stamp(buffer);
 
return ring_buffer_event_time_stamp(buffer, rbe);
 }
@@ -8087,7 +8087,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
t, usec_rem);
 
-   t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
+   t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
} else {
@@ -8096,7 +8096,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
ring_buffer_oldest_event_ts(trace_buf->buffer, 
cpu));
 
trace_seq_printf(s, "now ts: %llu\n",
-   ring_buffer_time_stamp(trace_buf->buffer, cpu));
+   ring_buffer_time_stamp(trace_buf->buffer));
}
 
cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
-- 
2.25.1



Re: [RFC PATCH 1/5] tracing: Define new ftrace event "func_repeats"

2021-03-08 Thread Yordan Karadzhov (VMware)



On 4.03.21 г. 18:38, Steven Rostedt wrote:

On Thu,  4 Mar 2021 11:01:37 +0200
"Yordan Karadzhov (VMware)"  wrote:

Thanks Yordan for doing this!

I have some comments below.



Hi Steven,

Thank you very much for looking into this!

Your suggestion makes perfect sense. I only have one clarifying question 
below.



diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 4547ac59da61..8007f9b6417f 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -338,3 +338,19 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
 __entry->nmi_total_ts,
 __entry->nmi_count)
  );
+
+FTRACE_ENTRY(func_repeats, func_repeats_entry,
+
+   TRACE_FUNC_REPEATS,
+
+   F_STRUCT(
+   __field(unsigned long,  ip  )
+   __field(unsigned long,  pip )
+   __field(unsigned long,  count   )
+   ),
+
+   F_printk(" %ps <-%ps\t(repeats:%lu)",
+(void *)__entry->ip,
+(void *)__entry->pip,
+__entry->count)


After playing with this a little, I realized that we should also store the
last timestamp as well. I think that would be interesting information.

<...>-37  [004] ...1  2022.303820: gc_worker <-process_one_work
<...>-37  [004] ...1  2022.303820: ___might_sleep <-gc_worker
<...>-37  [004] ...1  2022.303831: ___might_sleep <-gc_worker 
(repeats: 127)
<...>-37  [004] ...1  2022.303831: queue_delayed_work_on 
<-process_one_work

The above shows that __might_sleep() was called 128 times, but what I don't
get from the above, is when that last call was made. You'll see that the
timestamp for the repeat output is the same as the next function shown
(queue_delayed_work_on()). But the timestamp for the last call to
__might_sleep() is lost, and the repeat event ends up being written when
it is detected that there are no more repeats.

If we had:

<...>-37  [004] ...1  2022.303820: gc_worker <-process_one_work
<...>-37  [004] ...1  2022.303820: ___might_sleep <-gc_worker
<...>-37  [004] ...1  2022.303831: ___might_sleep <-gc_worker 
(last ts: 2022.303828 repeats: 127)
<...>-37  [004] ...1  2022.303831: queue_delayed_work_on 
<-process_one_work

We would know the last time __might_sleep was called.

That is, not only should we save the ip and pip in the trace_func_repeats
structure, but we should also be storing the last time stamp of the last
function event that repeated. Otherwise the above looks like the last
__might_sleep called above happened when the queue_delayed_work_on
happened, where that may not be the case.


If we store the last timestamp, this means we will need to use 
additional 64b on the buffer, every time we record the "func_repeats" 
event. This looks like an overkill to me.
Can we store only the duration of the repeats (the difference between 
the timestamp)? This way we can use less memory at the price of having 
one extra arithmetic operation.
Alternative approach can be to store only the least-significant bits of 
the timestamp.


What do you think?

Best regards,
Yordan



-- Steve




[PATCH] tracing: Remove duplicate declaration from trace.h

2021-03-04 Thread Yordan Karadzhov (VMware)
A declaration of function "int trace_empty(struct trace_iterator *iter)"
shows up twice in the header file kernel/trace/trace.h

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.h | 1 -
 1 file changed, 1 deletion(-)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index dec13ff66077..a6446c03cfbc 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -605,7 +605,6 @@ void trace_graph_function(struct trace_array *tr,
 void trace_latency_header(struct seq_file *m);
 void trace_default_header(struct seq_file *m);
 void print_trace_header(struct seq_file *m, struct trace_iterator *iter);
-int trace_empty(struct trace_iterator *iter);
 
 void trace_graph_return(struct ftrace_graph_ret *trace);
 int trace_graph_entry(struct ftrace_graph_ent *trace);
-- 
2.25.1



[RFC PATCH 5/5] tracing: Add "func_no_repeats" option for function tracing

2021-03-04 Thread Yordan Karadzhov (VMware)
If the option is activated the function tracing record gets
consolidated in the cases when a single function is called number
of times consecutively. Instead of having an identical record for
each call of the function we will record only the first call
following by event showing the number of repeats.

Signed-off-by: Yordan Karadzhov (VMware) 

fix last
---
 kernel/trace/trace_functions.c | 157 -
 1 file changed, 154 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 6c912eb0508a..fbf60ff93ffb 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,16 +27,28 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op, struct ftrace_regs 
*fregs);
+static void
+function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+struct ftrace_ops *op,
+struct ftrace_regs *fregs);
 static ftrace_func_t select_trace_function(u32 flags_val);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
-   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
-   TRACE_FUNC_OPT_STACK= 0x1,
+
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
+   TRACE_FUNC_OPT_STACK= 0x1,
+   TRACE_FUNC_OPT_NO_REPEATS   = 0x2,
+
+   /* Update this to next highest bit. */
+   TRACE_FUNC_OPT_HIGHEST_BIT  = 0x4
 };
 
-#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_HIGHEST_BIT - 1)
 
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
@@ -90,6 +102,17 @@ void ftrace_destroy_function_files(struct trace_array *tr)
ftrace_free_ftrace_ops(tr);
 }
 
+static bool handle_func_repeats(struct trace_array *tr, u32 flags_val)
+{
+   if (!tr->last_func_repeats &&
+   (flags_val & TRACE_FUNC_OPT_NO_REPEATS)) {
+   if (!tracer_alloc_func_repeats(tr))
+   return false;
+   }
+
+   return true;
+}
+
 static int function_trace_init(struct trace_array *tr)
 {
ftrace_func_t func;
@@ -105,6 +128,9 @@ static int function_trace_init(struct trace_array *tr)
if (!func)
return -EINVAL;
 
+   if (!handle_func_repeats(tr, func_flags.val))
+   return -ENOMEM;
+
ftrace_init_array_ops(tr, func);
 
tr->array_buffer.cpu = raw_smp_processor_id();
@@ -206,6 +232,123 @@ function_stack_trace_call(unsigned long ip, unsigned long 
parent_ip,
local_irq_restore(flags);
 }
 
+static inline bool is_repeat(struct trace_func_repeats *last_info,
+unsigned long ip, unsigned long parent_ip)
+{
+   if (last_info->ip == ip &&
+   last_info->parent_ip == parent_ip) {
+   last_info->count++;
+   return true;
+   }
+
+   return false;
+}
+
+static inline void process_repeats(struct trace_array *tr,
+  unsigned long ip, unsigned long parent_ip,
+  struct trace_func_repeats *last_info,
+  unsigned int trace_ctx)
+{
+   if (last_info->count) {
+   trace_last_func_repeats(tr, last_info, trace_ctx);
+   last_info->count = 0;
+   }
+
+   last_info->ip = ip;
+   last_info->parent_ip = parent_ip;
+}
+
+static void
+function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
+  struct ftrace_ops *op,
+  struct ftrace_regs *fregs)
+{
+   struct trace_func_repeats *last_info;
+   struct trace_array *tr = op->private;
+   struct trace_array_cpu *data;
+   unsigned int trace_ctx;
+   unsigned long flags;
+   int bit;
+   int cpu;
+
+   if (unlikely(!tr->function_enabled))
+   return;
+
+   bit = ftrace_test_recursion_trylock(ip, parent_ip);
+   if (bit < 0)
+   return;
+
+   preempt_disable_notrace();
+
+   cpu = smp_processor_id();
+   data = per_cpu_ptr(tr->array_buffer.data, cpu);
+   if (atomic_read(&data->disabled))
+   goto out;
+
+   /*
+* An interrupt may happen at any place here. But as far as I can see,
+* the only damage that this can cause is to mess up the repetition
+* counter without valuable data being lost.
+* TODO: think about a solution that is better than just hoping to be
+* lucky.
+*/
+   last_info = per_cpu_ptr(tr->la

[RFC PATCH 3/5] tracing: Add method for recording "func_repeats" events

2021-03-04 Thread Yordan Karadzhov (VMware)
This patch only provides the implementation of the method.
Later we will used it in a combination with a new option for
function tracing.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c | 21 +
 kernel/trace/trace.h |  4 
 2 files changed, 25 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 5f5fa08c0644..5c62fda666af 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3109,6 +3109,27 @@ static void ftrace_trace_userstack(struct trace_array 
*tr,
 
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx)
+{
+   struct trace_buffer *buffer = tr->array_buffer.buffer;
+   struct func_repeats_entry *entry;
+   struct ring_buffer_event *event;
+
+   event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
+   sizeof(*entry), trace_ctx);
+   if (!event)
+   return;
+
+   entry = ring_buffer_event_data(event);
+   entry->ip = last_info->ip;
+   entry->pip = last_info->parent_ip;
+   entry->count = last_info->count;
+
+   __buffer_unlock_commit(buffer, event);
+}
+
 /* created for use with alloc_percpu */
 struct trace_buffer_struct {
int nesting;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 09bf12c038f4..0ef823bb9594 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -696,6 +696,10 @@ static inline void __trace_stack(struct trace_array *tr, 
unsigned int trace_ctx,
 }
 #endif /* CONFIG_STACKTRACE */
 
+void trace_last_func_repeats(struct trace_array *tr,
+struct trace_func_repeats *last_info,
+unsigned int trace_ctx);
+
 extern u64 ftrace_now(int cpu);
 
 extern void trace_find_cmdline(int pid, char comm[]);
-- 
2.25.1



[RFC PATCH 4/5] tracing: Unify the logic for function tracing options

2021-03-04 Thread Yordan Karadzhov (VMware)
Currently the logic for dealing with the options for function tracing
has two different implementations. One is used when we set the flags
(in "static int func_set_flag()") and another used when we initialize
the tracer (in "static int function_trace_init()"). Those two
implementations are meant to do essentially the same thing and they
are both not very convenient for adding new options. In this patch
we add a helper function that provides a single implementation of
the logic for dealing with the options and we make it such that new
options can be easily added.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace_functions.c | 66 --
 1 file changed, 40 insertions(+), 26 deletions(-)

diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index f93723ca66bc..6c912eb0508a 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -27,13 +27,17 @@ function_trace_call(unsigned long ip, unsigned long 
parent_ip,
 static void
 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  struct ftrace_ops *op, struct ftrace_regs *fregs);
+static ftrace_func_t select_trace_function(u32 flags_val);
 static struct tracer_flags func_flags;
 
 /* Our option */
 enum {
+   TRACE_FUNC_NO_OPTS  = 0x0, /* No flags set. */
TRACE_FUNC_OPT_STACK= 0x1,
 };
 
+#define TRACE_FUNC_OPT_MASK(TRACE_FUNC_OPT_STACK)
+
 int ftrace_allocate_ftrace_ops(struct trace_array *tr)
 {
struct ftrace_ops *ops;
@@ -97,12 +101,9 @@ static int function_trace_init(struct trace_array *tr)
if (!tr->ops)
return -ENOMEM;
 
-   /* Currently only the global instance can do stack tracing */
-   if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
-   func_flags.val & TRACE_FUNC_OPT_STACK)
-   func = function_stack_trace_call;
-   else
-   func = function_trace_call;
+   func = select_trace_function(func_flags.val);
+   if (!func)
+   return -EINVAL;
 
ftrace_init_array_ops(tr, func);
 
@@ -205,6 +206,18 @@ function_stack_trace_call(unsigned long ip, unsigned long 
parent_ip,
local_irq_restore(flags);
 }
 
+static ftrace_func_t select_trace_function(u32 flags_val)
+{
+   switch (flags_val & TRACE_FUNC_OPT_MASK) {
+   case TRACE_FUNC_NO_OPTS:
+   return function_trace_call;
+   case TRACE_FUNC_OPT_STACK:
+   return function_stack_trace_call;
+   default:
+   return NULL;
+   }
+}
+
 static struct tracer_opt func_opts[] = {
 #ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
@@ -213,7 +226,7 @@ static struct tracer_opt func_opts[] = {
 };
 
 static struct tracer_flags func_flags = {
-   .val = 0, /* By default: all flags disabled */
+   .val = TRACE_FUNC_NO_OPTS, /* By default: all flags disabled */
.opts = func_opts
 };
 
@@ -235,30 +248,31 @@ static struct tracer function_trace;
 static int
 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
 {
-   switch (bit) {
-   case TRACE_FUNC_OPT_STACK:
-   /* do nothing if already set */
-   if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
-   break;
+   ftrace_func_t func;
+   u32 new_flags_val;
 
-   /* We can change this flag when not running. */
-   if (tr->current_trace != &function_trace)
-   break;
+   /* Do nothing if already set. */
+   if (!!set == !!(func_flags.val & bit))
+   return 0;
 
-   unregister_ftrace_function(tr->ops);
+   /* We can change this flag only when not running. */
+   if (tr->current_trace != &function_trace)
+   return 0;
 
-   if (set) {
-   tr->ops->func = function_stack_trace_call;
-   register_ftrace_function(tr->ops);
-   } else {
-   tr->ops->func = function_trace_call;
-   register_ftrace_function(tr->ops);
-   }
+   new_flags_val = (func_flags.val & ~(1UL << (bit - 1)));
+   new_flags_val |= (set << (bit - 1));
 
-   break;
-   default:
+   func = select_trace_function(new_flags_val);
+   if (!func)
return -EINVAL;
-   }
+
+   /* Check if there's anything to change. */
+   if (tr->ops->func == func)
+   return 0;
+
+   unregister_ftrace_function(tr->ops);
+   tr->ops->func = func;
+   register_ftrace_function(tr->ops);
 
return 0;
 }
-- 
2.25.1



[RFC PATCH 2/5] tracing: Add "last_func_repeats" to struct trace_array

2021-03-04 Thread Yordan Karadzhov (VMware)
The field is used to keep track of the consecutive (on the same CPU) calls
of a single function. This information is needed in order to consolidate
the function tracing record in the cases when a single function is called
number of times.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.c |  1 +
 kernel/trace/trace.h | 17 +
 2 files changed, 18 insertions(+)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e295c413580e..5f5fa08c0644 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8895,6 +8895,7 @@ static int __remove_instance(struct trace_array *tr)
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);
tracefs_remove(tr->dir);
+   free_percpu(tr->last_func_repeats);
free_trace_buffers(tr);
 
for (i = 0; i < tr->nr_topts; i++) {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 2be4a56879de..09bf12c038f4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -262,6 +262,16 @@ struct cond_snapshot {
cond_update_fn_tupdate;
 };
 
+/*
+ * struct trace_func_repeats - used to keep track of the consecutive
+ * (on the same CPU) calls of a single function.
+ */
+struct trace_func_repeats {
+   unsigned long ip;
+   unsigned long parent_ip;
+   unsigned long count;
+};
+
 /*
  * The trace array - an array of per-CPU trace arrays. This is the
  * highest level data structure that individual tracers deal with.
@@ -358,8 +368,15 @@ struct trace_array {
 #ifdef CONFIG_TRACER_SNAPSHOT
struct cond_snapshot*cond_snapshot;
 #endif
+   struct trace_func_repeats   __percpu *last_func_repeats;
 };
 
+static inline struct trace_func_repeats *
+tracer_alloc_func_repeats(struct trace_array *tr)
+{
+   return tr->last_func_repeats = alloc_percpu(struct trace_func_repeats);
+}
+
 enum {
TRACE_ARRAY_FL_GLOBAL   = (1 << 0)
 };
-- 
2.25.1



[RFC PATCH 1/5] tracing: Define new ftrace event "func_repeats"

2021-03-04 Thread Yordan Karadzhov (VMware)
The event aims to consolidate the function tracing record in the cases
when a single function is called number of times consecutively.

while (cond)
do_func();

This may happen in various scenarios (busy waiting for example).
The new ftrace event can be used to show repeated function events with
a single event and save space on the ring buffer

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel/trace/trace.h |  3 +++
 kernel/trace/trace_entries.h | 16 +
 kernel/trace/trace_output.c  | 44 
 3 files changed, 63 insertions(+)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a6446c03cfbc..2be4a56879de 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,6 +45,7 @@ enum trace_type {
TRACE_BPUTS,
TRACE_HWLAT,
TRACE_RAW_DATA,
+   TRACE_FUNC_REPEATS,
 
__TRACE_LAST_TYPE,
 };
@@ -441,6 +442,8 @@ extern void __ftrace_bad_type(void);
  TRACE_GRAPH_ENT); \
IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry,  \
  TRACE_GRAPH_RET); \
+   IF_ASSIGN(var, ent, struct func_repeats_entry,  \
+ TRACE_FUNC_REPEATS);  \
__ftrace_bad_type();\
} while (0)
 
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 4547ac59da61..8007f9b6417f 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -338,3 +338,19 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
 __entry->nmi_total_ts,
 __entry->nmi_count)
 );
+
+FTRACE_ENTRY(func_repeats, func_repeats_entry,
+
+   TRACE_FUNC_REPEATS,
+
+   F_STRUCT(
+   __field(unsigned long,  ip  )
+   __field(unsigned long,  pip )
+   __field(unsigned long,  count   )
+   ),
+
+   F_printk(" %ps <-%ps\t(repeats:%lu)",
+(void *)__entry->ip,
+(void *)__entry->pip,
+__entry->count)
+);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 61255bad7e01..af6b066972e9 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1373,6 +1373,49 @@ static struct trace_event trace_raw_data_event = {
.funcs  = &trace_raw_data_funcs,
 };
 
+static enum print_line_t
+trace_func_repeats_raw(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   trace_seq_printf(s, "%lu %lu %li\n",
+field->pip,
+field->ip,
+field->count);
+
+   return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_func_repeats_print(struct trace_iterator *iter, int flags,
+struct trace_event *event)
+{
+   struct func_repeats_entry *field;
+   struct trace_seq *s = &iter->seq;
+
+   trace_assign_type(field, iter->ent);
+
+   seq_print_ip_sym(s, field->ip, flags);
+   trace_seq_puts(s, " <-");
+   seq_print_ip_sym(s, field->pip, flags);
+   trace_seq_printf(s, " (repeats: %li)\n", field->count);
+
+   return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_func_repeats_funcs = {
+   .trace  = trace_func_repeats_print,
+   .raw= trace_func_repeats_raw,
+};
+
+static struct trace_event trace_func_repeats_event = {
+   .type   = TRACE_FUNC_REPEATS,
+   .funcs  = &trace_func_repeats_funcs,
+};
 
 static struct trace_event *events[] __initdata = {
&trace_fn_event,
@@ -1385,6 +1428,7 @@ static struct trace_event *events[] __initdata = {
&trace_print_event,
&trace_hwlat_event,
&trace_raw_data_event,
+   &trace_func_repeats_event,
NULL
 };
 
-- 
2.25.1



[RFC PATCH 0/5] Add "func_no_repete" tracing option

2021-03-04 Thread Yordan Karadzhov (VMware)
The new option for function tracing aims to save space on the ring
buffer and to make it more readable in the case when a single function
is called number of times consecutively:

while (cond)
do_func();

Instead of having an identical records for each call of the function
we will record only the first call, followed by an event showing the
number of repeats.

Yordan Karadzhov (VMware) (5):
  tracing: Define new ftrace event "func_repeats"
  tracing: Add "last_func_repeats" to struct trace_array
  tracing: Add method for recording "func_repeats" events
  tracing: Unify the logic for function tracing options
  tracing: Add "func_no_repeats" option for function tracing

 kernel/trace/trace.c   |  22 
 kernel/trace/trace.h   |  24 
 kernel/trace/trace_entries.h   |  16 +++
 kernel/trace/trace_functions.c | 219 +
 kernel/trace/trace_output.c|  44 +++
 5 files changed, 298 insertions(+), 27 deletions(-)

-- 
2.25.1



[PATCH v2 3/4] kernelshark: Adding gui_event_handlers for View and Graph

2017-11-10 Thread Yordan Karadzhov (VMware)
gui_event_handlers are added to trace_view_store and graph_info.
The handlers are used to execute plugin-specific actions
during the processing of the trace data.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kshark-plugin.h|  3 +++
 trace-graph.c  | 41 +
 trace-graph.h  |  7 +++
 trace-plot-task.c  | 17 +
 trace-view-store.c | 16 
 trace-view-store.h |  5 +
 6 files changed, 85 insertions(+), 4 deletions(-)

diff --git a/kshark-plugin.h b/kshark-plugin.h
index d65ee02..42fb40c 100644
--- a/kshark-plugin.h
+++ b/kshark-plugin.h
@@ -48,6 +48,9 @@ enum gui_plugin_actions {
KSHARK_PLUGIN_LOAD,
KSHARK_PLUGIN_RELOAD,
KSHARK_PLUGIN_UNLOAD,
+   KSHARK_PLUGIN_GET_PID,
+   KSHARK_PLUGIN_GET_PREV_STATE,
+   KSHARK_PLUGIN_GET_COMMAND,
 };
 
 enum gui_event_types {
diff --git a/trace-graph.c b/trace-graph.c
index 1db342f..a79aacf 100644
--- a/trace-graph.c
+++ b/trace-graph.c
@@ -129,6 +129,12 @@ static void free_task_hash(struct graph_info *ginfo)
}
 }
 
+void trace_graph_register_gui_handler(struct graph_info *info,
+ struct gui_event_handler *handler) {
+   handler->next = info->event_handlers;
+   info->event_handlers = handler;
+}
+
 /**
  * trace_graph_task_list - return an allocated list of all found tasks
  * @ginfo: The graph info structure
@@ -1021,6 +1027,7 @@ int trace_graph_check_sched_wakeup(struct graph_info 
*ginfo,
   gint *pid)
 {
struct event_format *event;
+   struct gui_event_handler *handler;
unsigned long long val;
gboolean found;
gint id;
@@ -1079,6 +1086,20 @@ int trace_graph_check_sched_wakeup(struct graph_info 
*ginfo,
return 1;
}
 
+   handler = find_gui_event_handler(ginfo->event_handlers, id);
+
+   if (handler) {
+   if (handler->type == KSHARK_PLUGIN_WAKEUP_EVENT) {
+   if (!handler->event_func(record, KSHARK_PLUGIN_GET_PID, 
&val))
+   return 0;
+
+   if (pid)
+   *pid = val;
+
+   return 1;
+   }
+   }
+
return 0;
 }
 
@@ -1088,6 +1109,7 @@ int trace_graph_check_sched_switch(struct graph_info 
*ginfo,
 {
unsigned long long val;
struct event_format *event;
+   struct gui_event_handler *handler;
gint this_pid;
gint id;
int ret = 1;
@@ -1118,7 +1140,9 @@ int trace_graph_check_sched_switch(struct graph_info 
*ginfo,
}
}
 
+
id = pevent_data_type(ginfo->pevent, record);
+
if (id == ginfo->event_sched_switch_id) {
pevent_read_number_field(ginfo->event_pid_field, record->data, 
&val);
if (comm)
@@ -1139,6 +1163,20 @@ int trace_graph_check_sched_switch(struct graph_info 
*ginfo,
goto out;
}
 
+   handler = find_gui_event_handler(ginfo->event_handlers, id);
+
+   if (handler) {
+   if (handler->type == KSHARK_PLUGIN_SWITCH_EVENT) {
+   if (handler->event_func(record, KSHARK_PLUGIN_GET_PID, 
&val)) {
+   if (pid)
+   *pid = val;
+   if(comm)
+   handler->event_func(record, 
KSHARK_PLUGIN_GET_COMMAND, &comm);
+   goto out;
+   }
+   }
+   }
+
ret = 0;
  out:
if (ret && comm && ginfo->read_comms) {
@@ -1285,6 +1323,7 @@ static void draw_info_box(struct graph_info *ginfo, const 
gchar *buffer,
 
view_start = gtk_adjustment_get_value(ginfo->hadj);
view_width = gtk_adjustment_get_page_size(ginfo->hadj);
+
if (x > view_start + width)
x -= width;
 
@@ -2781,6 +2820,8 @@ trace_graph_create_with_callbacks(struct tracecmd_input 
*handle,
 
ginfo->callbacks = cbs;
 
+   ginfo->event_handlers = NULL;
+
ginfo->task_filter = filter_task_hash_alloc();
ginfo->hide_tasks = filter_task_hash_alloc();
 
diff --git a/trace-graph.h b/trace-graph.h
index 7e66838..21e8feb 100644
--- a/trace-graph.h
+++ b/trace-graph.h
@@ -24,6 +24,7 @@
 #include "trace-cmd.h"
 #include "trace-filter-hash.h"
 #include "trace-xml.h"
+#include "kshark-plugin.h"
 
 struct graph_info;
 
@@ -258,6 +259,8 @@ struct graph_info {
gintplot_data_y;
gintplot_data_w;
gintplot_data_h;
+
+   struct gui_event_handler *event_handlers;
 };
 
 
@@ -266,6 +269,10 @@ trace_graph_create(struct tracecmd_input *handle);
 struct graph_info *

[PATCH v2 4/4] kernelshark: Adding a GUI plugin for xenomai events

2017-11-10 Thread Yordan Karadzhov (VMware)
A plugin for processing xenomai events "cobalt_switch_context"
and "cobalt_thread_resume" is added.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 Makefile |   1 +
 kshark-plugin.h  |  10 ++
 plugin_xenomai_gui.c | 300 +++
 3 files changed, 311 insertions(+)
 create mode 100644 plugin_xenomai_gui.c

diff --git a/Makefile b/Makefile
index 992e6b0..e4d280b 100644
--- a/Makefile
+++ b/Makefile
@@ -384,6 +384,7 @@ PLUGIN_OBJS += plugin_tlb.o
 PLUGINS := $(PLUGIN_OBJS:.o=.so)
 
 GUI_PLUGIN_OBJS =
+GUI_PLUGIN_OBJS += plugin_xenomai_gui.o
 
 GUI_PLUGINS := $(GUI_PLUGIN_OBJS:.o=.so)
 
diff --git a/kshark-plugin.h b/kshark-plugin.h
index 42fb40c..1166781 100644
--- a/kshark-plugin.h
+++ b/kshark-plugin.h
@@ -53,6 +53,16 @@ enum gui_plugin_actions {
KSHARK_PLUGIN_GET_COMMAND,
 };
 
+enum gui_plugin_ctx_updates {
+   KSHARK_PLUGIN_UPDATE_SWITCH_EVENT   = (1 << 0),
+   KSHARK_PLUGIN_UPDATE_WAKEUP_EVENT   = (1 << 1),
+   KSHARK_PLUGIN_UPDATE_WAKEUP_PID = (1 << 2),
+   KSHARK_PLUGIN_UPDATE_SWITCH_PID = (1 << 3),
+   KSHARK_PLUGIN_UPDATE_PREV_STATE = (1 << 4),
+   KSHARK_PLUGIN_UPDATE_NEXT_NAME  = (1 << 5),
+   KSHARK_PLUGIN_UPDATE_ALL= INT_MAX
+};
+
 enum gui_event_types {
KSHARK_PLUGIN_SWITCH_EVENT,
KSHARK_PLUGIN_WAKEUP_EVENT,
diff --git a/plugin_xenomai_gui.c b/plugin_xenomai_gui.c
new file mode 100644
index 000..c786cc8
--- /dev/null
+++ b/plugin_xenomai_gui.c
@@ -0,0 +1,300 @@
+/*
+ *  Copyright (C) 2017 VMware Inc, Yordan Karadzhov 
+ *
+ * ~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
+ *
+ * ~~
+ */
+
+#include 
+#include 
+
+#include "event-parse.h"
+#include "kernel-shark.h"
+#include "kshark-plugin.h"
+
+struct xenomai_context {
+   struct pevent   *pevent;
+
+   struct event_format *cobalt_switch_event;
+   struct format_field *cobalt_switch_next_pid_field;
+   struct format_field *cobalt_switch_prev_state_field;
+   struct format_field *cobalt_switch_next_name_field;
+
+   struct event_format *cobalt_wakeup_event;
+   struct format_field *cobalt_wakeup_pid_field;
+};
+
+struct xenomai_context   *xenomai_context_handler = NULL;
+struct gui_event_handler *switch_handler = NULL;
+struct gui_event_handler *wakeup_handler = NULL;
+
+#define COBALT_PREV_STATE_BIT  (1 << 3)
+#define COBALT_COMM_OFFSET_MASK0x
+
+static gboolean xenomai_update_context(struct pevent *pevent, int task_id)
+{
+   struct event_format *event;
+   struct xenomai_context *ctx = xenomai_context_handler;
+
+   if (!ctx)
+   return FALSE;
+
+   if (task_id & KSHARK_PLUGIN_UPDATE_SWITCH_EVENT) {
+   event = pevent_find_event_by_name(pevent,
+ "cobalt_core",
+ "cobalt_switch_context");
+   if (!event)
+   return FALSE;
+
+   ctx->cobalt_switch_event = event;
+   }
+
+   if (task_id & KSHARK_PLUGIN_UPDATE_WAKEUP_EVENT) {
+   event = pevent_find_event_by_name(pevent,
+ "cobalt_core",
+ "cobalt_thread_resume");
+   if (!event)
+   return FALSE;
+
+   ctx->cobalt_wakeup_event = event;
+   }
+
+   if (task_id & KSHARK_PLUGIN_UPDATE_SWITCH_PID) {
+   ctx->cobalt_switch_next_pid_field =
+   pevent_find_field(ctx->cobalt_switch_event, "next_pid");
+   }
+
+   if (task_id & KSHARK_PLUGIN_UPDATE_PREV_STATE) {
+   ctx->cobalt_switch_prev_state_field =
+   pevent_find_field(ctx->cobalt_switch_event, 
"prev_state");
+   }
+
+   if (task_id & KSHARK_PLUGIN_UPDATE_NEXT_NAME) {
+   ctx->cobalt_switch_next_name_field

[PATCH v2 1/4] kernelshark: Adding infrastructure for GUI plugins

2017-11-10 Thread Yordan Karadzhov (VMware)
Makefile modified in order to support building of GUI plugins.

kshark_plugin_loader and kshark_plugin_unloader are modified

kshark_plugin_reloader is defined and is called when the user
loads a new trace data file.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 Makefile|  31 ++--
 kernel-shark.c  | 150 +---
 kshark-plugin.h |  22 -
 3 files changed, 146 insertions(+), 57 deletions(-)

diff --git a/Makefile b/Makefile
index 5c35143..8d0f16f 100644
--- a/Makefile
+++ b/Makefile
@@ -293,6 +293,8 @@ else
   print_shared_lib_compile =   echo '  $(GUI)COMPILE SHARED LIB '$(GOBJ);
   print_plugin_obj_compile =   echo '  $(GUI)COMPILE PLUGIN OBJ '$(GOBJ);
   print_plugin_build = echo '  $(GUI)BUILD PLUGIN   '$(GOBJ);
+  print_gui_plugin_obj_compile =   echo '  $(GUI)COMPILE GUI_PLUGIN OBJ 
'$(GOBJ);
+  print_gui_plugin_build = echo '  $(GUI)BUILD GUI_PLUGIN   
'$(GOBJ);
   print_static_lib_build = echo '  $(GUI)BUILD STATIC LIB   '$(GOBJ);
   print_install =  echo '  $(GUI)INSTALL '$(GSPACE)$1' to  
$(DESTDIR_SQ)$2';
 endif
@@ -317,6 +319,14 @@ do_plugin_build =  \
($(print_plugin_build)  \
$(CC) $(CFLAGS) $(LDFLAGS) -shared -nostartfiles -o $@ $<)
 
+do_compile_gui_plugin_obj =\
+   ($(print_gui_plugin_obj_compile)\
+   $(CC) -c $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ $<)
+
+do_gui_plugin_build =  \
+   ($(print_gui_plugin_build)  \
+   $(CC) $(CFLAGS) $(LDFLAGS) -shared -nostartfiles -o $@ $<)
+
 do_build_static_lib =  \
($(print_static_lib_build)  \
$(RM) $@;  $(AR) rcs $@ $^)
@@ -373,13 +383,17 @@ PLUGIN_OBJS += plugin_tlb.o
 
 PLUGINS := $(PLUGIN_OBJS:.o=.so)
 
+GUI_PLUGIN_OBJS =
+
+GUI_PLUGINS := $(GUI_PLUGIN_OBJS:.o=.so)
+
 ALL_OBJS = $(TRACE_CMD_OBJS) $(KERNEL_SHARK_OBJS) $(TRACE_VIEW_MAIN_OBJS) \
-   $(TRACE_GRAPH_MAIN_OBJS) $(TCMD_LIB_OBJS) $(PLUGIN_OBJS)
+   $(TRACE_GRAPH_MAIN_OBJS) $(TCMD_LIB_OBJS) $(PLUGIN_OBJS) 
$(GUI_PLUGIN_OBJS)
 
 CMD_TARGETS = trace_plugin_dir trace_python_dir tc_version.h libparsevent.a 
$(LIB_FILE) \
trace-cmd  $(PLUGINS) $(BUILD_PYTHON)
 
-GUI_TARGETS = ks_version.h trace-graph trace-view kernelshark
+GUI_TARGETS = ks_version.h trace-graph trace-view kernelshark $(GUI_PLUGINS)
 
 TARGETS = $(CMD_TARGETS) $(GUI_TARGETS)
 
@@ -401,7 +415,7 @@ gui: $(CMD_TARGETS)
 
 all_gui: $(GUI_TARGETS) show_gui_done
 
-GUI_OBJS = $(KERNEL_SHARK_OBJS) $(TRACE_VIEW_MAIN_OBJS) 
$(TRACE_GRAPH_MAIN_OBJS)
+GUI_OBJS = $(KERNEL_SHARK_OBJS) $(TRACE_VIEW_MAIN_OBJS) 
$(TRACE_GRAPH_MAIN_OBJS) $(GUI_PLUGIN_OBJS)
 
 gui_objs := $(sort $(GUI_OBJS))
 
@@ -447,6 +461,12 @@ $(PLUGIN_OBJS): %.o : $(src)/%.c
 $(PLUGINS): %.so: %.o
$(Q)$(do_plugin_build)
 
+$(GUI_PLUGIN_OBJS): %.o : $(src)/%.c
+   $(Q)$(do_compile_gui_plugin_obj)
+
+$(GUI_PLUGINS): %.so: %.o
+   $(Q)$(do_gui_plugin_build)
+
 define make_version.h
(echo '/* This file is automatically generated. Do not modify. */'; 
\
echo \#define VERSION_CODE $(shell  
\
@@ -549,7 +569,10 @@ cscope: force
$(RM) cscope*
find . -name '*.[ch]' | cscope -b -q
 
-PLUGINS_INSTALL = $(subst .so,.install,$(PLUGINS)) $(subst 
.so,.install,$(PYTHON_PLUGINS))
+PLUGINS_INSTALL =
+PLUGINS_INSTALL += $(subst .so,.install,$(PLUGINS))
+PLUGINS_INSTALL += $(subst .so,.install,$(GUI_PLUGINS))
+PLUGINS_INSTALL += $(subst .so,.install,$(PYTHON_PLUGINS))
 
 define do_install
$(print_install)\
diff --git a/kernel-shark.c b/kernel-shark.c
index 89723c3..79b387a 100644
--- a/kernel-shark.c
+++ b/kernel-shark.c
@@ -339,6 +339,96 @@ int kernelshark_load_file(struct shark_info *info, const 
char *file)
return 0;
 }
 
+static struct plugin_list {
+   struct plugin_list  *next;
+   const char  *file;
+} *plugins;
+static struct plugin_list **plugin_next = &plugins;
+
+static void add_plugin(const char *file)
+{
+   struct stat st;
+   int ret;
+
+   ret = stat(file, &st);
+   if (ret < 0) {
+   warning("plugin %s not found", file);
+   return;
+   }
+
+   *plugin_next = calloc(sizeof(struct plugin_list), 1);
+   if (!*plugin_next) {
+   warning("failed to allocat memory for plugin");
+   return;
+   }
+
+   (*plugin_next)->file = file;
+   plugin_next = &(*plugin_next)->next;
+}
+
+static void handle_plugins(struct shark_info *info,
+  struct trace_view_store *store,
+

[PATCH v2 2/4] kernelshark: Adding a GUI event handler

2017-11-10 Thread Yordan Karadzhov (VMware)
gui_event_handler is defined. Instruments for dealing
with this handler are added.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 Makefile|  6 ++---
 kshark-plugin.c | 69 +
 kshark-plugin.h | 22 ++
 3 files changed, 94 insertions(+), 3 deletions(-)
 create mode 100644 kshark-plugin.c

diff --git a/Makefile b/Makefile
index 8d0f16f..992e6b0 100644
--- a/Makefile
+++ b/Makefile
@@ -348,17 +348,17 @@ $(obj)/%.o: $(src)/%.c
$(Q)$(call check_gui)
 
 TRACE_GUI_OBJS = trace-filter.o trace-compat.o trace-filter-hash.o 
trace-dialog.o \
-   trace-xml.o
+   trace-xml.o kshark-plugin.o
 TRACE_CMD_OBJS = trace-cmd.o trace-record.o trace-read.o trace-split.o 
trace-listen.o \
 trace-stack.o trace-hist.o trace-mem.o trace-snapshot.o trace-stat.o \
 trace-hash.o trace-profile.o trace-stream.o trace-record.o 
trace-restore.o \
 trace-check-events.o trace-show.o trace-list.o
-TRACE_VIEW_OBJS = trace-view.o trace-view-store.o
+TRACE_VIEW_OBJS = trace-view.o trace-view-store.o kshark-plugin.o
 TRACE_GRAPH_OBJS = trace-graph.o trace-plot.o trace-plot-cpu.o 
trace-plot-task.o
 TRACE_VIEW_MAIN_OBJS = trace-view-main.o $(TRACE_VIEW_OBJS) $(TRACE_GUI_OBJS)
 TRACE_GRAPH_MAIN_OBJS = trace-graph-main.o $(TRACE_GRAPH_OBJS) 
$(TRACE_GUI_OBJS)
 KERNEL_SHARK_OBJS = $(TRACE_VIEW_OBJS) $(TRACE_GRAPH_OBJS) $(TRACE_GUI_OBJS) \
-   trace-capture.o kernel-shark.o
+   trace-capture.o kernel-shark.o kshark-plugin.o
 
 PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o 
str_error_r.o
 TCMD_LIB_OBJS = $(PEVENT_LIB_OBJS) trace-util.o trace-input.o trace-ftrace.o \
diff --git a/kshark-plugin.c b/kshark-plugin.c
new file mode 100644
index 000..6532512
--- /dev/null
+++ b/kshark-plugin.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 VMware Inc, Yordan Karadzhov 
+ *
+ * ~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not,  see <http://www.gnu.org/licenses>
+ *
+ * ~~
+ */
+
+#include 
+
+#include "kshark-plugin.h"
+
+struct gui_event_handler *make_gui_event_handler(int event_id, int type,
+
kshark_plugin_event_handler_func evt_func,
+
kshark_plugin_context_update_func ctx_func)
+{
+   struct gui_event_handler *handler = malloc(sizeof(struct 
gui_event_handler));
+
+   if (!handler)
+   return NULL;
+
+   handler->next = NULL;
+   handler->id = event_id;
+   handler->type = type;
+   handler->event_func = evt_func;
+   handler->context_func = ctx_func;
+
+   return handler;
+}
+
+struct gui_event_handler *find_gui_event_handler(struct gui_event_handler 
*handlers,
+int event_id)
+{
+   while (handlers) {
+   if (handlers->id == event_id)
+   return handlers;
+
+   handlers = handlers->next;
+   }
+
+   return NULL;
+}
+
+void unregister_gui_event_handler(struct gui_event_handler **handlers, int 
event_id)
+{
+   struct gui_event_handler **last = handlers;
+   struct gui_event_handler *list;
+
+   for (list = *handlers; list; list = list->next) {
+   if (list->id == event_id) {
+   *last = list->next;
+   return;
+   }
+
+   last = &list->next;
+   }
+}
diff --git a/kshark-plugin.h b/kshark-plugin.h
index 81c09b5..d65ee02 100644
--- a/kshark-plugin.h
+++ b/kshark-plugin.h
@@ -50,4 +50,26 @@ enum gui_plugin_actions {
KSHARK_PLUGIN_UNLOAD,
 };
 
+enum gui_event_types {
+   KSHARK_PLUGIN_SWITCH_EVENT,
+   KSHARK_PLUGIN_WAKEUP_EVENT,
+};
+
+struct gui_event_handler {
+   struct gui_event_handler*next;
+   int id;
+   int type;
+   kshark_plugin_event_handler_funcevent_func;
+   kshark_plugin_context_update_func   context_func;
+};
+
+struct gui_event_handler *make_gui_event_handler(int event_id, int type,
+ 

[PATCH 2/2] kernelshark: Adding a GUI plugin for xenomai events

2017-11-01 Thread Yordan Karadzhov (VMware)
Makefile modified in order to support building of gui plugins.

Function handlers for processing of plugin-specific events (xenomai
events "cobalt_switch_context" and "cobalt_thread_resume" in this case)
are addet to trace_view_store and graph_info.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 Makefile |  35 +--
 kernel-shark.c   |   8 +-
 kshark-plugin.c  |  65 +
 kshark-plugin.h  |  49 +-
 plugin_xenomai_gui.c | 257 +++
 trace-graph.c|  48 ++
 trace-graph.h|   7 ++
 trace-plot-task.c|  17 +++-
 trace-view-store.c   |  24 +
 trace-view-store.h   |   5 +
 10 files changed, 500 insertions(+), 15 deletions(-)
 create mode 100644 kshark-plugin.c
 create mode 100644 plugin_xenomai_gui.c

diff --git a/Makefile b/Makefile
index 5c35143..96f30e2 100644
--- a/Makefile
+++ b/Makefile
@@ -293,6 +293,8 @@ else
   print_shared_lib_compile =   echo '  $(GUI)COMPILE SHARED LIB '$(GOBJ);
   print_plugin_obj_compile =   echo '  $(GUI)COMPILE PLUGIN OBJ '$(GOBJ);
   print_plugin_build = echo '  $(GUI)BUILD PLUGIN   '$(GOBJ);
+  print_gui_plugin_obj_compile =   echo '  $(GUI)COMPILE GUI_PLUGIN OBJ 
'$(GOBJ);
+  print_gui_plugin_build = echo '  $(GUI)BUILD GUI_PLUGIN   
'$(GOBJ);
   print_static_lib_build = echo '  $(GUI)BUILD STATIC LIB   '$(GOBJ);
   print_install =  echo '  $(GUI)INSTALL '$(GSPACE)$1' to  
$(DESTDIR_SQ)$2';
 endif
@@ -317,6 +319,14 @@ do_plugin_build =  \
($(print_plugin_build)  \
$(CC) $(CFLAGS) $(LDFLAGS) -shared -nostartfiles -o $@ $<)
 
+do_compile_gui_plugin_obj =\
+   ($(print_gui_plugin_obj_compile)\
+   $(CC) -c $(CPPFLAGS) $(CFLAGS) -fPIC -o $@ $<)
+
+do_gui_plugin_build =  \
+   ($(print_gui_plugin_build)  \
+   $(CC) $(CFLAGS) $(LDFLAGS) -shared -nostartfiles -o $@ $<)
+
 do_build_static_lib =  \
($(print_static_lib_build)  \
$(RM) $@;  $(AR) rcs $@ $^)
@@ -338,17 +348,17 @@ $(obj)/%.o: $(src)/%.c
$(Q)$(call check_gui)
 
 TRACE_GUI_OBJS = trace-filter.o trace-compat.o trace-filter-hash.o 
trace-dialog.o \
-   trace-xml.o
+   trace-xml.o kshark-plugin.o
 TRACE_CMD_OBJS = trace-cmd.o trace-record.o trace-read.o trace-split.o 
trace-listen.o \
 trace-stack.o trace-hist.o trace-mem.o trace-snapshot.o trace-stat.o \
 trace-hash.o trace-profile.o trace-stream.o trace-record.o 
trace-restore.o \
 trace-check-events.o trace-show.o trace-list.o
-TRACE_VIEW_OBJS = trace-view.o trace-view-store.o
+TRACE_VIEW_OBJS = trace-view.o trace-view-store.o kshark-plugin.o
 TRACE_GRAPH_OBJS = trace-graph.o trace-plot.o trace-plot-cpu.o 
trace-plot-task.o
 TRACE_VIEW_MAIN_OBJS = trace-view-main.o $(TRACE_VIEW_OBJS) $(TRACE_GUI_OBJS)
 TRACE_GRAPH_MAIN_OBJS = trace-graph-main.o $(TRACE_GRAPH_OBJS) 
$(TRACE_GUI_OBJS)
 KERNEL_SHARK_OBJS = $(TRACE_VIEW_OBJS) $(TRACE_GRAPH_OBJS) $(TRACE_GUI_OBJS) \
-   trace-capture.o kernel-shark.o
+   trace-capture.o kernel-shark.o kshark-plugin.o
 
 PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o 
str_error_r.o
 TCMD_LIB_OBJS = $(PEVENT_LIB_OBJS) trace-util.o trace-input.o trace-ftrace.o \
@@ -373,13 +383,19 @@ PLUGIN_OBJS += plugin_tlb.o
 
 PLUGINS := $(PLUGIN_OBJS:.o=.so)
 
+
+GUI_PLUGIN_OBJS =
+GUI_PLUGIN_OBJS += plugin_xenomai_gui.o
+
+GUI_PLUGINS := $(GUI_PLUGIN_OBJS:.o=.so)
+
 ALL_OBJS = $(TRACE_CMD_OBJS) $(KERNEL_SHARK_OBJS) $(TRACE_VIEW_MAIN_OBJS) \
-   $(TRACE_GRAPH_MAIN_OBJS) $(TCMD_LIB_OBJS) $(PLUGIN_OBJS)
+   $(TRACE_GRAPH_MAIN_OBJS) $(TCMD_LIB_OBJS) $(PLUGIN_OBJS) 
$(GUI_PLUGIN_OBJS)
 
 CMD_TARGETS = trace_plugin_dir trace_python_dir tc_version.h libparsevent.a 
$(LIB_FILE) \
trace-cmd  $(PLUGINS) $(BUILD_PYTHON)
 
-GUI_TARGETS = ks_version.h trace-graph trace-view kernelshark
+GUI_TARGETS = ks_version.h trace-graph trace-view kernelshark $(GUI_PLUGINS)
 
 TARGETS = $(CMD_TARGETS) $(GUI_TARGETS)
 
@@ -401,7 +417,7 @@ gui: $(CMD_TARGETS)
 
 all_gui: $(GUI_TARGETS) show_gui_done
 
-GUI_OBJS = $(KERNEL_SHARK_OBJS) $(TRACE_VIEW_MAIN_OBJS) 
$(TRACE_GRAPH_MAIN_OBJS)
+GUI_OBJS = $(KERNEL_SHARK_OBJS) $(TRACE_VIEW_MAIN_OBJS) 
$(TRACE_GRAPH_MAIN_OBJS) $(GUI_PLUGIN_OBJS)
 
 gui_objs := $(sort $(GUI_OBJS))
 
@@ -447,6 +463,13 @@ $(PLUGIN_OBJS): %.o : $(src)/%.c
 $(PLUGINS): %.so: %.o
$(Q)$(do_plugin_build)
 
+
+$(GUI_PLUGIN_OBJS): %.o : $(src)/%.c
+   $(Q)$(do_compile_gui_plugin_obj)
+
+$(GUI_PLUGINS): %.so: %.o
+   $(Q)$(do_gui_plugin_build)
+
 define make_version.h
(echo '/* This file is automatically generated. Do not modify. */'; 
\
 

[PATCH 1/2] kernelshark: Fix the GUI plugin loading

2017-11-01 Thread Yordan Karadzhov (VMware)
In add_plugin(const char *): wrong file name used when loading a plugin.

Signed-off-by: Yordan Karadzhov (VMware) 
---
 kernel-shark.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/kernel-shark.c b/kernel-shark.c
index c952302..89723c3 100644
--- a/kernel-shark.c
+++ b/kernel-shark.c
@@ -1816,7 +1816,7 @@ static void add_plugin(const char *file)
struct stat st;
int ret;
 
-   ret = stat(default_input_file, &st);
+   ret = stat(file, &st);
if (ret < 0) {
warning("plugin %s not found", file);
return;
-- 
2.15.0.rc0