Not all cores prevent using Intel PT and LBRs simultaneously, although most of them still do as of today. This patch adds an opt-in flag for such cores to disable mutual exclusivity between PT and LBR; also flip it on for Goldmont.
Signed-off-by: Alexander Shishkin <[email protected]> --- arch/x86/events/core.c | 6 ++++++ arch/x86/events/intel/core.c | 1 + arch/x86/events/perf_event.h | 1 + 3 files changed, 8 insertions(+) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 41d93d0e97..5e5e76a52f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -360,6 +360,9 @@ int x86_add_exclusive(unsigned int what) { int i; + if (x86_pmu.lbr_pt_coexist) + return 0; + if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { mutex_lock(&pmc_reserve_mutex); for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { @@ -380,6 +383,9 @@ fail_unlock: void x86_del_exclusive(unsigned int what) { + if (x86_pmu.lbr_pt_coexist) + return; + atomic_dec(&x86_pmu.lbr_exclusive[what]); atomic_dec(&active_events); } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 79b59437f5..e36422c687 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3609,6 +3609,7 @@ __init int intel_pmu_init(void) */ x86_pmu.pebs_aliases = NULL; x86_pmu.pebs_prec_dist = true; + x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; pr_cont("Goldmont events, "); break; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 7d62a02f49..8bd764df81 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -601,6 +601,7 @@ struct x86_pmu { u64 lbr_sel_mask; /* LBR_SELECT valid bits */ const int *lbr_sel_map; /* lbr_select mappings */ bool lbr_double_abort; /* duplicated lbr aborts */ + bool lbr_pt_coexist; /* LBR may coexist with PT */ /* * Intel PT/LBR/BTS are exclusive -- 2.8.0.rc3

