[ Upstream commit 871a93b0aad65a7f44ee25f2d17932ef6d559850 ]

Kan reported that n_metric gets corrupted for cancelled transactions;
a similar issue exists for n_pair for AMD's Large Increment thing.

The problem was confirmed and confirmed fixed by Kim using:

  sudo perf stat -e "{cycles,cycles,cycles,cycles}:D" -a sleep 10 &

  # should succeed:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

  # should fail:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all,fp_ret_sse_avx_ops.all,cycles}:D" 
-a workload

  # previously failed, now succeeds with this patch:
  sudo perf stat -e "{fp_ret_sse_avx_ops.all}:D" -a workload

Fixes: 5738891229a2 ("perf/x86/amd: Add support for Large Increment per Cycle 
Events")
Reported-by: Kan Liang <kan.li...@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Tested-by: Kim Phillips <kim.phill...@amd.com>
Link: 
https://lkml.kernel.org/r/20201005082516.gg2...@hirez.programming.kicks-ass.net
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 arch/x86/events/core.c       | 6 +++++-
 arch/x86/events/perf_event.h | 1 +
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 1cbf57dc2ac89..11bbc6590f904 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1087,8 +1087,10 @@ static int collect_events(struct cpu_hw_events *cpuc, 
struct perf_event *leader,
 
                cpuc->event_list[n] = event;
                n++;
-               if (is_counter_pair(&event->hw))
+               if (is_counter_pair(&event->hw)) {
                        cpuc->n_pair++;
+                       cpuc->n_txn_pair++;
+               }
        }
        return n;
 }
@@ -1962,6 +1964,7 @@ static void x86_pmu_start_txn(struct pmu *pmu, unsigned 
int txn_flags)
 
        perf_pmu_disable(pmu);
        __this_cpu_write(cpu_hw_events.n_txn, 0);
+       __this_cpu_write(cpu_hw_events.n_txn_pair, 0);
 }
 
 /*
@@ -1987,6 +1990,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
         */
        __this_cpu_sub(cpu_hw_events.n_added, 
__this_cpu_read(cpu_hw_events.n_txn));
        __this_cpu_sub(cpu_hw_events.n_events, 
__this_cpu_read(cpu_hw_events.n_txn));
+       __this_cpu_sub(cpu_hw_events.n_pair, 
__this_cpu_read(cpu_hw_events.n_txn_pair));
        perf_pmu_enable(pmu);
 }
 
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 7b68ab5f19e76..0e74235cdac9e 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -210,6 +210,7 @@ struct cpu_hw_events {
                                             they've never been enabled yet */
        int                     n_txn;    /* the # last events in the below 
arrays;
                                             added in the current transaction */
+       int                     n_txn_pair;
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter 
assignment */
        u64                     tags[X86_PMC_IDX_MAX];
 
-- 
2.25.1



Reply via email to