From: Kan Liang <kan.li...@intel.com>

Multiplexing overhead is one of the key overhead when the number of
events is more than available counters.

The multiplexing overhead PERF_CORE_MUX_OVERHEAD is a common overhead
type.

Signed-off-by: Kan Liang <kan.li...@intel.com>
---
 include/uapi/linux/perf_event.h | 1 +
 kernel/events/core.c            | 9 +++++++++
 2 files changed, 10 insertions(+)

diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 23b7963..c488336 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -1001,6 +1001,7 @@ struct perf_branch_entry {
  */
 enum perf_record_overhead_type {
        /* common overhead */
+       PERF_CORE_MUX_OVERHEAD  = 0,
        /* PMU specific */
        PERF_OVERHEAD_MAX,
 };
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dbde193..28468ae 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3326,6 +3326,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
        struct perf_event_context *ctx = NULL;
+       u64 start_clock, end_clock;
        int rotate = 0;
 
        if (cpuctx->ctx.nr_events) {
@@ -3342,6 +3343,7 @@ static int perf_rotate_context(struct perf_cpu_context 
*cpuctx)
        if (!rotate)
                goto done;
 
+       start_clock = perf_clock();
        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
        perf_pmu_disable(cpuctx->ctx.pmu);
 
@@ -3357,6 +3359,13 @@ static int perf_rotate_context(struct perf_cpu_context 
*cpuctx)
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
+
+       /* calculate multiplexing overhead */
+       if (cpuctx->ctx.pmu->stat) {
+               end_clock = perf_clock();
+               cpuctx->overhead[PERF_CORE_MUX_OVERHEAD].nr++;
+               cpuctx->overhead[PERF_CORE_MUX_OVERHEAD].time += end_clock - 
start_clock;
+       }
 done:
 
        return rotate;
-- 
2.4.3

Reply via email to