For Knights Landing processor we need to filter OFFCORE_RESPONSE
events by config1 parameter to make sure that it will end up in
an appropriate PMC to meet specification.

On Knights Landing:
MSR_OFFCORE_RSP_1 bits 8, 11, 14 can be used only on PMC1
MSR_OFFCORE_RSP_0 bit 38 can be used only on PMC0

This patch introduces INTEL_EEVENT_CONSTRAINT where third parameter
specifies extended config bits allowed only on given PMCs.

Patch depends on "Change offcore response masks for Knights Landing"

Reported-by: Andi Kleen <a...@linux.intel.com>
Acked-by: Andi Kleen <a...@linux.intel.com>
Signed-off-by: Lukasz Odzioba <lukasz.odzi...@intel.com>
---
 arch/x86/events/core.c         |  3 ++-
 arch/x86/events/intel/core.c   | 17 ++++++++++++++---
 arch/x86/events/intel/uncore.c |  2 +-
 arch/x86/events/perf_event.h   | 41 ++++++++++++++++++++++++-----------------
 4 files changed, 41 insertions(+), 22 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 33787ee..a4be71c 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -122,6 +122,7 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event 
*event)
                        continue;
                if (event->attr.config1 & ~er->valid_mask)
                        return -EINVAL;
+
                /* Check if the extra msrs can be safely accessed*/
                if (!er->extra_msr_access)
                        return -ENXIO;
@@ -1736,7 +1737,7 @@ static int __init init_hw_perf_events(void)
 
        unconstrained = (struct event_constraint)
                __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
-                                  0, x86_pmu.num_counters, 0, 0);
+                                  0, x86_pmu.num_counters, 0, 0, 0);
 
        x86_pmu_format_group.attrs = x86_pmu.format_attrs;
 
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 7c66695..794f5c8 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -177,6 +177,17 @@ static struct event_constraint 
intel_slm_event_constraints[] __read_mostly =
        EVENT_CONSTRAINT_END
 };
 
+static struct event_constraint intel_knl_event_constraints[] __read_mostly = {
+       FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+       FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+       FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
+       /* MSR_OFFCORE_RSP_1 bits 8, 11, 14 can be used only on PMC1 */
+       INTEL_EEVENT_CONSTRAINT(0x02b7, 2, 0x4900),
+       /* MSR_OFFCORE_RSP_0 bit 38 can be used only on PMC0 */
+       INTEL_EEVENT_CONSTRAINT(0x01b7, 1, 1ull<<38),
+       EVENT_CONSTRAINT_END
+};
+
 struct event_constraint intel_skl_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
@@ -2284,16 +2295,16 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, 
int idx,
                          struct perf_event *event)
 {
        struct event_constraint *c;
-
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
                        if ((event->hw.config & c->cmask) == c->code) {
+                               if (c->emask && !(c->emask & 
event->attr.config1))
+                                       continue;
                                event->hw.flags |= c->flags;
                                return c;
                        }
                }
        }
-
        return &unconstrained;
 }
 
@@ -3784,7 +3795,7 @@ __init int intel_pmu_init(void)
                       knl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
                intel_pmu_lbr_init_knl();
 
-               x86_pmu.event_constraints = intel_slm_event_constraints;
+               x86_pmu.event_constraints = intel_knl_event_constraints;
                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
                x86_pmu.extra_regs = intel_knl_extra_regs;
 
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index fce7406..fc5b866 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -839,7 +839,7 @@ static int __init uncore_type_init(struct intel_uncore_type 
*type, bool setid)
        type->pmus = pmus;
        type->unconstrainted = (struct event_constraint)
                __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
-                               0, type->num_counters, 0, 0);
+                               0, type->num_counters, 0, 0, 0);
 
        if (type->event_descs) {
                for (i = 0; type->event_descs[i].attr.attr.name; i++);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 8bd764d..47241ed5 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -52,6 +52,7 @@ struct event_constraint {
        int     weight;
        int     overlap;
        int     flags;
+       u64     emask;
 };
 /*
  * struct hw_perf_event.flags flags
@@ -239,21 +240,22 @@ struct cpu_hw_events {
        void                            *kfree_on_online[X86_PERF_KFREE_MAX];
 };
 
-#define __EVENT_CONSTRAINT(c, n, m, w, o, f) {\
+#define __EVENT_CONSTRAINT(c, n, m, w, o, f, e) {\
        { .idxmsk64 = (n) },            \
        .code = (c),                    \
        .cmask = (m),                   \
        .weight = (w),                  \
        .overlap = (o),                 \
-       .flags = f,                     \
+       .flags = (f),                   \
+       .emask = (e),                   \
 }
 
 #define EVENT_CONSTRAINT(c, n, m)      \
-       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0)
+       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 0, 0, 0)
 
 #define INTEL_EXCLEVT_CONSTRAINT(c, n) \
        __EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT, HWEIGHT(n),\
-                          0, PERF_X86_EVENT_EXCL)
+                          0, PERF_X86_EVENT_EXCL, 0)
 
 /*
  * The overlap flag marks event constraints with overlapping counter
@@ -277,7 +279,7 @@ struct cpu_hw_events {
  * and its counter masks must be kept at a minimum.
  */
 #define EVENT_CONSTRAINT_OVERLAP(c, n, m)      \
-       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0)
+       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n), 1, 0, 0)
 
 /*
  * Constraint on the Event code.
@@ -286,6 +288,12 @@ struct cpu_hw_events {
        EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
 
 /*
+ * Constraint on the Extended Event code
+ */
+#define INTEL_EEVENT_CONSTRAINT(c, n, e) \
+       __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, HWEIGHT(n), 0, 0, e)
+
+/*
  * Constraint on the Event code + UMask + fixed-mask
  *
  * filter mask to validate fixed counter events.
@@ -318,15 +326,15 @@ struct cpu_hw_events {
 
 #define INTEL_EXCLUEVT_CONSTRAINT(c, n)        \
        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
-                          HWEIGHT(n), 0, PERF_X86_EVENT_EXCL)
+                          HWEIGHT(n), 0, PERF_X86_EVENT_EXC, 0)
 
 #define INTEL_PLD_CONSTRAINT(c, n)     \
        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
-                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT)
+                          HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LDLAT, 0)
 
 #define INTEL_PST_CONSTRAINT(c, n)     \
        __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
-                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST)
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST, 0)
 
 /* Event constraint, but match on all event flags too. */
 #define INTEL_FLAGS_EVENT_CONSTRAINT(c, n) \
@@ -340,50 +348,49 @@ struct cpu_hw_events {
 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_ST(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
-                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW, 0)
 
 /* Check flags and event code, and set the HSW load flag */
 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
-                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW, 0)
 
 #define INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          ARCH_PERFMON_EVENTSEL_EVENT|X86_ALL_EVENT_FLAGS, \
                          HWEIGHT(n), 0, \
-                         PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
+                         PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL, 0)
 
 /* Check flags and event code/umask, and set the HSW store flag */
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
-                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW, 0)
 
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
                          HWEIGHT(n), 0, \
-                         PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL)
+                         PERF_X86_EVENT_PEBS_ST_HSW|PERF_X86_EVENT_EXCL, 0)
 
 /* Check flags and event code/umask, and set the HSW load flag */
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
-                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW)
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_LD_HSW, 0)
 
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
                          HWEIGHT(n), 0, \
-                         PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL)
+                         PERF_X86_EVENT_PEBS_LD_HSW|PERF_X86_EVENT_EXCL, 0)
 
 /* Check flags and event code/umask, and set the HSW N/A flag */
 #define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
        __EVENT_CONSTRAINT(code, n,                     \
                          INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
-                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
-
+                         HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW, 0)
 
 /*
  * We define the end marker as having a weight of -1
-- 
1.8.3.1

Reply via email to