[tip:perf/core] perf trace: Add ordered processing

2018-12-18 Thread tip-bot for Jiri Olsa
Commit-ID:  028713aa8389d960cb1935a9954327bdaa163cf8
Gitweb: https://git.kernel.org/tip/028713aa8389d960cb1935a9954327bdaa163cf8
Author: Jiri Olsa 
AuthorDate: Wed, 5 Dec 2018 17:05:09 +0100
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Mon, 17 Dec 2018 15:21:17 -0300

perf trace: Add ordered processing

Sort events to provide the precise outcome of ordered events, just like
is done with 'perf report' and 'perf top'.

Signed-off-by: Jiri Olsa 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Alexander Shishkin 
Cc: Dmitry Levin 
Cc: Eugene Syromiatnikov 
Cc: Frederic Weisbecker 
Cc: Luis Cláudio Gonçalves 
Cc: Namhyung Kim 
Cc: Peter Zijlstra 
Cc: Steven Rostedt (VMware) 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/20181205160509.1168-9-jo...@kernel.org
[ split from a larger patch, added trace__ prefixes to new 'struct trace' 
methods ]
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/builtin-trace.c | 52 +-
 1 file changed, 51 insertions(+), 1 deletion(-)

diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 3b6b1fecf2bb..366ec3c8f580 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -127,6 +127,10 @@ struct trace {
boolforce;
boolvfs_getname;
int trace_pgfaults;
+   struct {
+   struct ordered_events   data;
+   u64 last;
+   } oe;
 };
 
 struct tp_field {
@@ -2652,6 +2656,42 @@ static int trace__deliver_event(struct trace *trace, 
union perf_event *event)
return 0;
 }
 
+static int trace__flush_ordered_events(struct trace *trace)
+{
+   u64 first = ordered_events__first_time(&trace->oe.data);
+   u64 flush = trace->oe.last - NSEC_PER_SEC;
+
+   /* Is there some thing to flush.. */
+   if (first && first < flush)
+   return ordered_events__flush_time(&trace->oe.data, flush);
+
+   return 0;
+}
+
+static int trace__deliver_ordered_event(struct trace *trace, union perf_event 
*event)
+{
+   struct perf_evlist *evlist = trace->evlist;
+   int err;
+
+   err = perf_evlist__parse_sample_timestamp(evlist, event, 
&trace->oe.last);
+   if (err && err != -1)
+   return err;
+
+   err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
+   if (err)
+   return err;
+
+   return trace__flush_ordered_events(trace);
+}
+
+static int ordered_events__deliver_event(struct ordered_events *oe,
+struct ordered_event *event)
+{
+   struct trace *trace = container_of(oe, struct trace, oe.data);
+
+   return trace__deliver_event(trace, event->event);
+}
+
 static int trace__run(struct trace *trace, int argc, const char **argv)
 {
struct perf_evlist *evlist = trace->evlist;
@@ -2819,7 +2859,9 @@ again:
while ((event = perf_mmap__read_event(md)) != NULL) {
++trace->nr_events;
 
-   trace__deliver_event(trace, event);
+   err = trace__deliver_ordered_event(trace, event);
+   if (err)
+   goto out_disable;
 
perf_mmap__consume(md);
 
@@ -2842,6 +2884,9 @@ again:
draining = true;
 
goto again;
+   } else {
+   if (trace__flush_ordered_events(trace))
+   goto out_disable;
}
} else {
goto again;
@@ -2852,6 +2897,8 @@ out_disable:
 
perf_evlist__disable(evlist);
 
+   ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
+
if (!err) {
if (trace->summary)
trace__fprintf_thread_summary(trace, trace->output);
@@ -3562,6 +3609,9 @@ int cmd_trace(int argc, const char **argv)
}
}
 
+   ordered_events__init(&trace.oe.data, ordered_events__deliver_event, 
&trace);
+   ordered_events__set_copy_on_queue(&trace.oe.data, true);
+
/*
 * If we are augmenting syscalls, then combine what we put in the
 * __augmented_syscalls__ BPF map with what is in the


[tip:perf/core] perf trace: Add ordered processing

2018-12-14 Thread tip-bot for Jiri Olsa
Commit-ID:  4085fed6373f15892959e3392efe5552504cfc94
Gitweb: https://git.kernel.org/tip/4085fed6373f15892959e3392efe5552504cfc94
Author: Jiri Olsa 
AuthorDate: Wed, 5 Dec 2018 17:05:09 +0100
Committer:  Arnaldo Carvalho de Melo 
CommitDate: Thu, 6 Dec 2018 16:43:28 -0300

perf trace: Add ordered processing

Sort events to provide the precise outcome of ordered events, just like
is done with 'perf report' and 'perf top'.

Signed-off-by: Jiri Olsa 
Tested-by: Arnaldo Carvalho de Melo 
Cc: Alexander Shishkin 
Cc: Dmitry Levin 
Cc: Eugene Syromiatnikov 
Cc: Frederic Weisbecker 
Cc: Luis Cláudio Gonçalves 
Cc: Namhyung Kim 
Cc: Peter Zijlstra 
Cc: Steven Rostedt (VMware) 
Cc: Thomas Gleixner 
Link: http://lkml.kernel.org/r/20181205160509.1168-9-jo...@kernel.org
[ split from a larger patch, added trace__ prefixes to new 'struct trace' 
methods ]
Signed-off-by: Arnaldo Carvalho de Melo 
---
 tools/perf/builtin-trace.c | 52 +-
 1 file changed, 51 insertions(+), 1 deletion(-)

diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 3b6b1fecf2bb..366ec3c8f580 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -127,6 +127,10 @@ struct trace {
boolforce;
boolvfs_getname;
int trace_pgfaults;
+   struct {
+   struct ordered_events   data;
+   u64 last;
+   } oe;
 };
 
 struct tp_field {
@@ -2652,6 +2656,42 @@ static int trace__deliver_event(struct trace *trace, 
union perf_event *event)
return 0;
 }
 
+static int trace__flush_ordered_events(struct trace *trace)
+{
+   u64 first = ordered_events__first_time(&trace->oe.data);
+   u64 flush = trace->oe.last - NSEC_PER_SEC;
+
+   /* Is there some thing to flush.. */
+   if (first && first < flush)
+   return ordered_events__flush_time(&trace->oe.data, flush);
+
+   return 0;
+}
+
+static int trace__deliver_ordered_event(struct trace *trace, union perf_event 
*event)
+{
+   struct perf_evlist *evlist = trace->evlist;
+   int err;
+
+   err = perf_evlist__parse_sample_timestamp(evlist, event, 
&trace->oe.last);
+   if (err && err != -1)
+   return err;
+
+   err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
+   if (err)
+   return err;
+
+   return trace__flush_ordered_events(trace);
+}
+
+static int ordered_events__deliver_event(struct ordered_events *oe,
+struct ordered_event *event)
+{
+   struct trace *trace = container_of(oe, struct trace, oe.data);
+
+   return trace__deliver_event(trace, event->event);
+}
+
 static int trace__run(struct trace *trace, int argc, const char **argv)
 {
struct perf_evlist *evlist = trace->evlist;
@@ -2819,7 +2859,9 @@ again:
while ((event = perf_mmap__read_event(md)) != NULL) {
++trace->nr_events;
 
-   trace__deliver_event(trace, event);
+   err = trace__deliver_ordered_event(trace, event);
+   if (err)
+   goto out_disable;
 
perf_mmap__consume(md);
 
@@ -2842,6 +2884,9 @@ again:
draining = true;
 
goto again;
+   } else {
+   if (trace__flush_ordered_events(trace))
+   goto out_disable;
}
} else {
goto again;
@@ -2852,6 +2897,8 @@ out_disable:
 
perf_evlist__disable(evlist);
 
+   ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
+
if (!err) {
if (trace->summary)
trace__fprintf_thread_summary(trace, trace->output);
@@ -3562,6 +3609,9 @@ int cmd_trace(int argc, const char **argv)
}
}
 
+   ordered_events__init(&trace.oe.data, ordered_events__deliver_event, 
&trace);
+   ordered_events__set_copy_on_queue(&trace.oe.data, true);
+
/*
 * If we are augmenting syscalls, then combine what we put in the
 * __augmented_syscalls__ BPF map with what is in the