There are event code updates for some of the generic events
and cache events for power10. Inorder to maintain the current
event codes work with DD1 also, create a new array of generic_events,
cache_events and pmu_attr_groups with suffix _dd1, example,
power10_events_attr_dd1. So that further updates to event codes
can be made in the original list, ie, power10_events_attr. Update the
power10 pmu init code to pick the dd1 list while registering
the power PMU, based on the pvr (Processor Version Register) value.

Signed-off-by: Athira Rajeev <atraj...@linux.vnet.ibm.com>
---
 arch/powerpc/perf/power10-pmu.c | 152 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 152 insertions(+)

diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index 88c5430..bc3d4dd 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -129,6 +129,31 @@ static int power10_get_alternatives(u64 event, unsigned 
int flags, u64 alt[])
 CACHE_EVENT_ATTR(dTLB-load-misses,             PM_DTLB_MISS);
 CACHE_EVENT_ATTR(iTLB-load-misses,             PM_ITLB_MISS);
 
+static struct attribute *power10_events_attr_dd1[] = {
+       GENERIC_EVENT_PTR(PM_RUN_CYC),
+       GENERIC_EVENT_PTR(PM_RUN_INST_CMPL),
+       GENERIC_EVENT_PTR(PM_BR_CMPL),
+       GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+       GENERIC_EVENT_PTR(PM_LD_REF_L1),
+       GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+       GENERIC_EVENT_PTR(MEM_LOADS),
+       GENERIC_EVENT_PTR(MEM_STORES),
+       CACHE_EVENT_PTR(PM_LD_MISS_L1),
+       CACHE_EVENT_PTR(PM_LD_REF_L1),
+       CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
+       CACHE_EVENT_PTR(PM_ST_MISS_L1),
+       CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+       CACHE_EVENT_PTR(PM_INST_FROM_L1),
+       CACHE_EVENT_PTR(PM_IC_PREF_REQ),
+       CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+       CACHE_EVENT_PTR(PM_DATA_FROM_L3),
+       CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+       CACHE_EVENT_PTR(PM_BR_CMPL),
+       CACHE_EVENT_PTR(PM_DTLB_MISS),
+       CACHE_EVENT_PTR(PM_ITLB_MISS),
+       NULL
+};
+
 static struct attribute *power10_events_attr[] = {
        GENERIC_EVENT_PTR(PM_RUN_CYC),
        GENERIC_EVENT_PTR(PM_RUN_INST_CMPL),
@@ -154,6 +179,11 @@ static int power10_get_alternatives(u64 event, unsigned 
int flags, u64 alt[])
        NULL
 };
 
+static struct attribute_group power10_pmu_events_group_dd1 = {
+       .name = "events",
+       .attrs = power10_events_attr_dd1,
+};
+
 static struct attribute_group power10_pmu_events_group = {
        .name = "events",
        .attrs = power10_events_attr,
@@ -205,12 +235,27 @@ static int power10_get_alternatives(u64 event, unsigned 
int flags, u64 alt[])
        .attrs = power10_pmu_format_attr,
 };
 
+static const struct attribute_group *power10_pmu_attr_groups_dd1[] = {
+       &power10_pmu_format_group,
+       &power10_pmu_events_group_dd1,
+       NULL,
+};
+
 static const struct attribute_group *power10_pmu_attr_groups[] = {
        &power10_pmu_format_group,
        &power10_pmu_events_group,
        NULL,
 };
 
+static int power10_generic_events_dd1[] = {
+       [PERF_COUNT_HW_CPU_CYCLES] =                    PM_RUN_CYC,
+       [PERF_COUNT_HW_INSTRUCTIONS] =                  PM_RUN_INST_CMPL,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] =           PM_BR_CMPL,
+       [PERF_COUNT_HW_BRANCH_MISSES] =                 PM_BR_MPRED_CMPL,
+       [PERF_COUNT_HW_CACHE_REFERENCES] =              PM_LD_REF_L1,
+       [PERF_COUNT_HW_CACHE_MISSES] =                  PM_LD_MISS_L1,
+};
+
 static int power10_generic_events[] = {
        [PERF_COUNT_HW_CPU_CYCLES] =                    PM_RUN_CYC,
        [PERF_COUNT_HW_INSTRUCTIONS] =                  PM_RUN_INST_CMPL,
@@ -276,6 +321,107 @@ static void power10_config_bhrb(u64 pmu_bhrb_filter)
  * 0 means not supported, -1 means nonsensical, other values
  * are event codes.
  */
+static u64 power10_cache_events_dd1[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = PM_LD_REF_L1,
+                       [C(RESULT_MISS)] = PM_LD_MISS_L1,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = 0,
+                       [C(RESULT_MISS)] = PM_ST_MISS_L1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
+                       [C(RESULT_MISS)] = 0,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = PM_INST_FROM_L1,
+                       [C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
+                       [C(RESULT_MISS)] = 0,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
+                       [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = 0,
+               },
+       },
+        [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = 0,
+                       [C(RESULT_MISS)] = PM_DTLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = 0,
+                       [C(RESULT_MISS)] = PM_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = PM_BR_CMPL,
+                       [C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+       },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)] = -1,
+                       [C(RESULT_MISS)] = -1,
+               },
+       },
+};
+
 static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
        [C(L1D)] = {
                [C(OP_READ)] = {
@@ -422,6 +568,12 @@ int init_power10_pmu(void)
        /* Set the PERF_REG_EXTENDED_MASK here */
        PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
 
+       if ((PVR_CFG(pvr) == 1)) {
+               power10_pmu.generic_events = power10_generic_events_dd1;
+               power10_pmu.attr_groups = power10_pmu_attr_groups_dd1;
+               power10_pmu.cache_events = &power10_cache_events_dd1;
+       }
+
        rc = register_power_pmu(&power10_pmu);
        if (rc)
                return rc;
-- 
1.8.3.1

Reply via email to