From: David Mercado <david.merc...@windriver.com> This patch adds PMU support to the Axxia AXM55xx platform. Note that on this platform, all PMU IRQ lines are OR'ed together into a single IRQ, and therefore, this implementation uses a rotating IRQ affinity scheme to deal with it.
Signed-off-by: David Mercado <david.merc...@windriver.com> --- arch/arm/kernel/perf_event_v7.c | 21 +++++++++++---------- arch/arm/mach-axxia/axxia-gic.c | 25 +++++++++++++++++++++++-- arch/arm/mach-axxia/axxia.c | 2 +- 3 files changed, 35 insertions(+), 13 deletions(-) diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 039cffb..13e1a5c 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -93,6 +93,7 @@ enum armv7_a5_perf_types { /* ARMv7 Cortex-A15 specific event types */ enum armv7_a15_perf_types { + ARMV7_A15_PERFCTR_CPU_CYCLES = 0x11, ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40, ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41, ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42, @@ -487,7 +488,7 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] * Cortex-A15 HW events mapping */ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { - [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_A15_PERFCTR_CPU_CYCLES, [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, @@ -917,34 +918,34 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) u32 val; unsigned int cnt; - printk(KERN_INFO "PMNC registers dump:\n"); + pr_info("PMNC registers dump:\n"); asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); - printk(KERN_INFO "PMNC =0x%08x\n", val); + pr_info("PMNC =0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); - printk(KERN_INFO "CNTENS=0x%08x\n", val); + pr_info("CNTENS=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); - printk(KERN_INFO "INTENS=0x%08x\n", val); + pr_info("INTENS=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); - printk(KERN_INFO "FLAGS =0x%08x\n", val); + pr_info("FLAGS =0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); - printk(KERN_INFO "SELECT=0x%08x\n", val); + pr_info("SELECT=0x%08x\n", val); asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); - printk(KERN_INFO "CCNT =0x%08x\n", val); + pr_info("CCNT =0x%08x\n", val); for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { armv7_pmnc_select_counter(cnt); asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); - printk(KERN_INFO "CNT[%d] count =0x%08x\n", + pr_info("CNT[%d] count =0x%08x\n", ARMV7_IDX_TO_COUNTER(cnt), val); asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); - printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", + pr_info("CNT[%d] evtsel=0x%08x\n", ARMV7_IDX_TO_COUNTER(cnt), val); } } diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c index d2edcef..5aefa42 100644 --- a/arch/arm/mach-axxia/axxia-gic.c +++ b/arch/arm/mach-axxia/axxia-gic.c @@ -283,6 +283,10 @@ static void gic_mask_irq(struct irq_data *d) if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM)) return; + /* Don't mess with the PMU IRQ either. */ + if (irqid == IRQ_PMU) + return; + /* Deal with PPI interrupts directly. */ if ((irqid > 16) && (irqid < 32)) { _gic_mask_irq(d); @@ -327,6 +331,10 @@ static void gic_unmask_irq(struct irq_data *d) if ((irqid >= IPI0_CPU0) && (irqid < MAX_AXM_IPI_NUM)) return; + /* Don't mess with the PMU IRQ either. */ + if (irqid == IRQ_PMU) + return; + /* Deal with PPI interrupts directly. */ if ((irqid > 15) && (irqid < 32)) { _gic_unmask_irq(d); @@ -565,8 +573,8 @@ static int gic_set_affinity(struct irq_data *d, * different than the prior cluster, remove the IRQ affinity * on the old cluster. */ - if ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) != - (irq_cpuid[irqid] / CORES_PER_CLUSTER)) { + if ((irqid != IRQ_PMU) && ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) != + (irq_cpuid[irqid] / CORES_PER_CLUSTER))) { /* * If old cpu assignment falls within the same cluster as * the cpu we're currently running on, set the IRQ affinity @@ -776,6 +784,11 @@ static void __cpuinit gic_dist_init(struct gic_chip_data *gic) } /* + * Set the PMU IRQ to the first cpu in this cluster. + */ + writeb_relaxed(0x01, base + GIC_DIST_TARGET + IRQ_PMU); + + /* * Set Axxia IPI interrupts to be edge triggered. */ for (i = IPI0_CPU0; i < MAX_AXM_IPI_NUM; i++) { @@ -798,6 +811,14 @@ static void __cpuinit gic_dist_init(struct gic_chip_data *gic) base + GIC_DIST_ENABLE_SET + enableoff); } + /* + * Do the initial enable of the PMU IRQ here. + */ + enablemask = 1 << (IRQ_PMU % 32); + enableoff = (IRQ_PMU / 32) * 4; + writel_relaxed(enablemask, + base + GIC_DIST_ENABLE_SET + enableoff); + writel_relaxed(1, base + GIC_DIST_CTRL); } diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c index 421b050..9646672 100644 --- a/arch/arm/mach-axxia/axxia.c +++ b/arch/arm/mach-axxia/axxia.c @@ -161,7 +161,7 @@ static struct resource axxia_pmu_resources[] = { }; /* - * The PMU IRQ lines of two cores are wired together into a single interrupt. + * The PMU IRQ lines of four cores are wired together into a single interrupt. * Bounce the interrupt to other cores if it's not ours. */ #define CORES_PER_CLUSTER 4 -- 1.7.9.5 _______________________________________________ linux-yocto mailing list linux-yocto@yoctoproject.org https://lists.yoctoproject.org/listinfo/linux-yocto