From: Kan Liang <[email protected]>

Show frequency, CPU Utilization and percent performance for each symbol
in perf report by --perf-freq

In sampling group, only group leader do sampling. So only need to print
group leader's freq in --group.

--perf-freq option also implies --group.

Only save and caculate freq/CPU%/CORE_BUSY when --perf-freq option is
set (replace dump_trace)

Here is an example.

$ perf report --stdio --perf-freq

                                 Overhead   FREQ MHz   CPU%  CORE_BUSY%
Command      Shared Object     Symbol
 ........................................  .........  .....  ..........
...........  ................  ......................

    99.54%  99.54%  99.53%  99.53%  99.53%       2301     96         99
tchain_edit  tchain_edit       [.] f3
     0.20%   0.20%   0.20%   0.20%   0.20%       2301     98         99
tchain_edit  tchain_edit       [.] f2
     0.05%   0.05%   0.05%   0.05%   0.05%       2300     98         99
tchain_edit  [kernel.vmlinux]  [k] read_tsc

Signed-off-by: Kan Liang <[email protected]>
---
 tools/perf/Documentation/perf-report.txt | 12 ++++++++++++
 tools/perf/builtin-report.c              | 23 ++++++++++++++++++++++-
 tools/perf/ui/hist.c                     | 14 ++++++++++++++
 tools/perf/util/session.c                |  2 +-
 tools/perf/util/symbol.h                 | 12 +++++++++++-
 5 files changed, 60 insertions(+), 3 deletions(-)

diff --git a/tools/perf/Documentation/perf-report.txt 
b/tools/perf/Documentation/perf-report.txt
index b941d5e..671bac0 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -353,6 +353,18 @@ include::itrace.txt[]
 --socket-filter::
        Only report the samples on the processor socket that match with this 
filter
 
+--perf-freq::
+       Show CPU frequency and performance result from sample read.
+       To generate the frequency and performance output, the perf.data file
+       must have been obtained by group read and using special events cycles,
+       ref-cycles, msr/tsc/, msr/aperf/ or msr/mperf/ (Use --perf-freq in 
record)
+       Freq MHz: The frequency during the sample interval. Needs cycles and
+                 ref-cycles event.
+       CPU%: CPU utilization during the sample interval. Needs ref-cycles and
+             msr/tsc/ events.
+       CORE_BUSY%: actual percent performance (APERF/MPERF%) during the
+                   sample interval. Needs msr/aperf/ and msr/mperf/ events.
+
 include::callchain-overhead-calculation.txt[]
 
 SEE ALSO
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index e2a2fdd..98abe6a 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -116,7 +116,7 @@ static int hist_iter__report_callback(struct 
hist_entry_iter *iter,
        struct mem_info *mi;
        struct branch_info *bi;
 
-       if (dump_trace &&
+       if (symbol_conf.perf_freq &&
            (iter->ops == &hist_iter_normal) &&
            perf_evsel__is_group_leader(evsel))
                set_he_perf_freq(rep->session, iter);
@@ -778,12 +778,16 @@ int cmd_report(int argc, const char **argv, const char 
*prefix __maybe_unused)
                    "Show callgraph from reference event"),
        OPT_INTEGER(0, "socket-filter", &report.socket_filter,
                    "only show processor socket that match with this filter"),
+       OPT_BOOLEAN(0, "perf-freq", &symbol_conf.perf_freq,
+                   "show CPU freqency and performance info"),
        OPT_END()
        };
        struct perf_data_file file = {
                .mode  = PERF_DATA_MODE_READ,
        };
+       struct perf_evsel *pos;
        int ret = hists__init();
+       perf_freq_t freq_data_status = { 0 };
 
        if (ret < 0)
                return ret;
@@ -868,6 +872,23 @@ repeat:
                symbol_conf.cumulate_callchain = false;
        }
 
+       if (symbol_conf.perf_freq) {
+               symbol_conf.perf_freq_type = 0;
+               evlist__for_each(session->evlist, pos) {
+                       perf_freq__init(session->header.env.msr_pmu_type,
+                                       pos, freq_data_status, 1);
+               }
+               if (perf_freq__has_freq(freq_data_status))
+                       symbol_conf.perf_freq_type |= 1U << DISPLAY_FREQ;
+               if (perf_freq__has_cpu_util(freq_data_status))
+                       symbol_conf.perf_freq_type |= 1U << DISPLAY_CPU_UTIL;
+               if (perf_freq__has_core_busy(freq_data_status))
+                       symbol_conf.perf_freq_type |= 1U << DISPLAY_CORE_BUSY;
+
+               /* --perf-freq option imply --group */
+               symbol_conf.event_group = true;
+       }
+
        if (setup_sorting() < 0) {
                if (sort_order)
                        parse_options_usage(report_usage, options, "s", 1);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index ba95ee2..d9082df 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -111,6 +111,11 @@ int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp 
*hpp,
        int len = fmt->user_len ?: fmt->len;
        bool leader_only = false;
 
+       if (((fmt == &perf_hpp__format[PERF_HPP__FREQ]) ||
+            (fmt == &perf_hpp__format[PERF_HPP__CPU_UTIL]) ||
+            (fmt == &perf_hpp__format[PERF_HPP__CORE_BUSY])))
+               leader_only = true;
+
        if (symbol_conf.field_sep) {
                return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
                                  print_fn, fmt_percent, leader_only);
@@ -522,6 +527,15 @@ void perf_hpp__init(void)
        if (symbol_conf.show_total_period)
                perf_hpp__column_enable(PERF_HPP__PERIOD);
 
+       if (symbol_conf.perf_freq) {
+               if (symbol_conf.perf_freq_type & (1U << DISPLAY_FREQ))
+                       perf_hpp__column_enable(PERF_HPP__FREQ);
+               if (symbol_conf.perf_freq_type & (1U << DISPLAY_CPU_UTIL))
+                       perf_hpp__column_enable(PERF_HPP__CPU_UTIL);
+               if (symbol_conf.perf_freq_type & (1U << DISPLAY_CORE_BUSY))
+                       perf_hpp__column_enable(PERF_HPP__CORE_BUSY);
+       }
+
        /* prepend overhead field for backward compatiblity.  */
        list = &perf_hpp__format[PERF_HPP__OVERHEAD].sort_list;
        if (list_empty(list))
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6ba3a68..a2f06cf 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1101,7 +1101,7 @@ static int machines__deliver_event(struct machines 
*machines,
                        ++evlist->stats.nr_unknown_id;
                        return 0;
                }
-               if (dump_trace &&
+               if (symbol_conf.perf_freq &&
                    (evsel->attr.sample_type & PERF_SAMPLE_READ) &&
                    (evsel->attr.read_format & PERF_FORMAT_GROUP))
                        perf_caculate_freq(sample, evsel);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 40073c6..95bc239 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -80,6 +80,14 @@ static inline size_t symbol__size(const struct symbol *sym)
 struct strlist;
 struct intlist;
 
+enum perf_freq_display {
+       DISPLAY_FREQ            = 0,
+       DISPLAY_CPU_UTIL,
+       DISPLAY_CORE_BUSY,
+
+       DISPLAY_MAX
+};
+
 struct symbol_conf {
        unsigned short  priv_size;
        unsigned short  nr_events;
@@ -107,7 +115,8 @@ struct symbol_conf {
                        show_hist_headers,
                        branch_callstack,
                        has_filter,
-                       show_ref_callgraph;
+                       show_ref_callgraph,
+                       perf_freq;
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,
@@ -132,6 +141,7 @@ struct symbol_conf {
        struct intlist  *pid_list,
                        *tid_list;
        const char      *symfs;
+       u64             perf_freq_type;
 };
 
 extern struct symbol_conf symbol_conf;
-- 
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to