From: Andi Kleen <a...@linux.intel.com>

For TopDown metrics it is useful to have a remove transaction when
the counter is unscheduled, so that the value can be saved correctly.
Add a remove transaction to the perf core.

Signed-off-by: Andi Kleen <a...@linux.intel.com>
Signed-off-by: Kan Liang <kan.li...@linux.intel.com>
---
 arch/x86/events/core.c     | 3 +--
 include/linux/perf_event.h | 1 +
 kernel/events/core.c       | 5 +++++
 3 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index b684f0294f35..2b2328a528df 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1902,8 +1902,7 @@ static inline void x86_pmu_read(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  *
- * We only support PERF_PMU_TXN_ADD transactions. Save the
- * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
+ * Save the transaction flags and ignore non-PERF_PMU_TXN_ADD
  * transactions.
  */
 static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index bd3d6a89ccd4..088358be55ff 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -233,6 +233,7 @@ struct perf_event;
  */
 #define PERF_PMU_TXN_ADD  0x1          /* txn to add/schedule event on PMU */
 #define PERF_PMU_TXN_READ 0x2          /* txn to read event group from PMU */
+#define PERF_PMU_TXN_REMOVE 0x4                /* txn to remove event on PMU */
 
 /**
  * pmu::capabilities flags
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 560ac237b8be..bc07cd5a302d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2032,6 +2032,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_cpu_context *cpuctx,
                struct perf_event_context *ctx)
 {
+       struct pmu *pmu = ctx->pmu;
        struct perf_event *event;
 
        if (group_event->state != PERF_EVENT_STATE_ACTIVE)
@@ -2039,6 +2040,8 @@ group_sched_out(struct perf_event *group_event,
 
        perf_pmu_disable(ctx->pmu);
 
+       pmu->start_txn(pmu, PERF_PMU_TXN_REMOVE);
+
        event_sched_out(group_event, cpuctx, ctx);
 
        /*
@@ -2051,6 +2054,8 @@ group_sched_out(struct perf_event *group_event,
 
        if (group_event->attr.exclusive)
                cpuctx->exclusive = 0;
+
+       pmu->commit_txn(pmu);
 }
 
 #define DETACH_GROUP   0x01UL
-- 
2.17.1

Reply via email to