From: Kan Liang <kan.li...@linux.intel.com> The hardware cache events are different among hybrid PMUs. Each hybrid PMU should have its own hw cache event table.
The hw_cache_extra_regs is not part of the struct x86_pmu, the hybrid() cannot be applied here. Reviewed-by: Andi Kleen <a...@linux.intel.com> Signed-off-by: Kan Liang <kan.li...@linux.intel.com> --- arch/x86/events/core.c | 11 +++++++++-- arch/x86/events/perf_event.h | 9 +++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 0bd9554..d71ca69 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -356,6 +356,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) { struct perf_event_attr *attr = &event->attr; unsigned int cache_type, cache_op, cache_result; + struct x86_hybrid_pmu *pmu = is_hybrid() ? hybrid_pmu(event->pmu) : NULL; u64 config, val; config = attr->config; @@ -375,7 +376,10 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) return -EINVAL; cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX); - val = hw_cache_event_ids[cache_type][cache_op][cache_result]; + if (pmu) + val = pmu->hw_cache_event_ids[cache_type][cache_op][cache_result]; + else + val = hw_cache_event_ids[cache_type][cache_op][cache_result]; if (val == 0) return -ENOENT; @@ -384,7 +388,10 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event) return -EINVAL; hwc->config |= val; - attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; + if (pmu) + attr->config1 = pmu->hw_cache_extra_regs[cache_type][cache_op][cache_result]; + else + attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result]; return x86_pmu_extra_regs(val, event); } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index cfb2da0..203c165 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -640,6 +640,15 @@ struct x86_hybrid_pmu { int num_counters; int num_counters_fixed; struct event_constraint unconstrained; + + u64 hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + u64 hw_cache_extra_regs + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; }; static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) -- 2.7.4