These two new exported functions will be used in a future patch by
kdb_ftdump() to quickly skip all but the last few trace entries.

Suggested-by: Steven Rostedt <rost...@goodmis.org>
Signed-off-by: Douglas Anderson <diand...@chromium.org>
Acked-by: Daniel Thompson <daniel.thomp...@linaro.org>
---

Changes in v6:
- Add Daniel Thompson Ack.

Changes in v5: None
Changes in v4:
- trace_total_entries() / trace_total_entries_cpu() new for v4

Changes in v3: None

 kernel/trace/trace.c | 65 ++++++++++++++++++++++++++++++++++----------
 kernel/trace/trace.h |  3 ++
 2 files changed, 53 insertions(+), 15 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ccd759eaad79..7afc90f82e53 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3490,34 +3490,69 @@ static void s_stop(struct seq_file *m, void *p)
        trace_event_read_unlock();
 }
 
+static void
+get_total_entries_cpu(struct trace_buffer *buf, unsigned long *total,
+                     unsigned long *entries, int cpu)
+{
+       unsigned long count;
+
+       count = ring_buffer_entries_cpu(buf->buffer, cpu);
+       /*
+        * If this buffer has skipped entries, then we hold all
+        * entries for the trace and we need to ignore the
+        * ones before the time stamp.
+        */
+       if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
+               count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
+               /* total is the same as the entries */
+               *total = count;
+       } else
+               *total = count +
+                       ring_buffer_overrun_cpu(buf->buffer, cpu);
+       *entries = count;
+}
+
 static void
 get_total_entries(struct trace_buffer *buf,
                  unsigned long *total, unsigned long *entries)
 {
-       unsigned long count;
+       unsigned long t, e;
        int cpu;
 
        *total = 0;
        *entries = 0;
 
        for_each_tracing_cpu(cpu) {
-               count = ring_buffer_entries_cpu(buf->buffer, cpu);
-               /*
-                * If this buffer has skipped entries, then we hold all
-                * entries for the trace and we need to ignore the
-                * ones before the time stamp.
-                */
-               if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
-                       count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
-                       /* total is the same as the entries */
-                       *total += count;
-               } else
-                       *total += count +
-                               ring_buffer_overrun_cpu(buf->buffer, cpu);
-               *entries += count;
+               get_total_entries_cpu(buf, &t, &e, cpu);
+               *total += t;
+               *entries += e;
        }
 }
 
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
+{
+       unsigned long total, entries;
+
+       if (!tr)
+               tr = &global_trace;
+
+       get_total_entries_cpu(&tr->trace_buffer, &total, &entries, cpu);
+
+       return entries;
+}
+
+unsigned long trace_total_entries(struct trace_array *tr)
+{
+       unsigned long total, entries;
+
+       if (!tr)
+               tr = &global_trace;
+
+       get_total_entries(&tr->trace_buffer, &total, &entries);
+
+       return entries;
+}
+
 static void print_lat_help_header(struct seq_file *m)
 {
        seq_puts(m, "#                  _------=> CPU#            \n"
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d80cee49e0eb..4562b30eda9d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -719,6 +719,9 @@ void trace_init_global_iter(struct trace_iterator *iter);
 
 void tracing_iter_reset(struct trace_iterator *iter, int cpu);
 
+unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu);
+unsigned long trace_total_entries(struct trace_array *tr);
+
 void trace_function(struct trace_array *tr,
                    unsigned long ip,
                    unsigned long parent_ip,
-- 
2.21.0.225.g810b269d1ac-goog

Reply via email to