Re: [kvm-unit-tests PATCH v4 07/12] arm: pmu: Basic event counter Tests

2022-09-20 Thread Zenghui Yu via

Hi Eric,

On 2022/9/20 17:23, Eric Auger wrote:

Hi Zenghui,

On 9/19/22 16:30, Zenghui Yu wrote:

Hi Eric,

A few comments when looking through the PMU test code (2 years after
the series was merged).


Thank you for reviewing even after this time! Do you want to address the
issues yourself and send a patch series or do you prefer I proceed?


It'd be great if you could help to proceed. I'm afraid that I don't
have enough time to deal with it in the next few days.

Thanks,
Zenghui



Re: [kvm-unit-tests PATCH v4 07/12] arm: pmu: Basic event counter Tests

2022-09-20 Thread Eric Auger
Hi Zenghui,

On 9/19/22 16:30, Zenghui Yu wrote:
> Hi Eric,
>
> A few comments when looking through the PMU test code (2 years after
> the series was merged).

Thank you for reviewing even after this time! Do you want to address the
issues yourself and send a patch series or do you prefer I proceed?

Thanks

Eric
> On 2020/4/3 15:13, Eric Auger wrote:
>> Adds the following tests:
>> - event-counter-config: test event counter configuration
>> - basic-event-count:
>>   - programs counters #0 and #1 to count 2 required events
>>   (resp. CPU_CYCLES and INST_RETIRED). Counter #0 is preset
>>   to a value close enough to the 32b
>>   overflow limit so that we check the overflow bit is set
>>   after the execution of the asm loop.
>> - mem-access: counts MEM_ACCESS event on counters #0 and #1
>>   with and without 32-bit overflow.
>>
>> Signed-off-by: Eric Auger 
>> Reviewed-by: Andre Przywara 
>
> [...]
>
>> diff --git a/arm/pmu.c b/arm/pmu.c
>> index 8c49e50..45dccf7 100644
>> --- a/arm/pmu.c
>> +++ b/arm/pmu.c
>> @@ -38,6 +43,7 @@
>>  #define SW_INCR    0x0
>>  #define INST_RETIRED    0x8
>>  #define CPU_CYCLES    0x11
>> +#define MEM_ACCESS    0x13
>>  #define INST_PREC    0x1B
>
> The spec spells event 0x001B as INST_SPEC.
>
>>  #define STALL_FRONTEND    0x23
>>  #define STALL_BACKEND    0x24
>
> [...]
>
>> @@ -175,6 +188,11 @@ static inline void precise_instrs_loop(int loop,
>> uint32_t pmcr)
>>  }
>>  
>>  #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
>> +#define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
>> +#define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
>> +
>> +#define PMEVTYPER_EXCLUDE_EL1 BIT(31)
>
> Unused macro.
>
>> +#define PMEVTYPER_EXCLUDE_EL0 BIT(30)
>>  
>>  static bool is_event_supported(uint32_t n, bool warn)
>>  {
>> @@ -228,6 +246,224 @@ static void test_event_introspection(void)
>>  report(required_events, "Check required events are implemented");
>>  }
>>  
>> +/*
>> + * Extra instructions inserted by the compiler would be difficult to
>> compensate
>> + * for, so hand assemble everything between, and including, the PMCR
>> accesses
>> + * to start and stop counting. isb instructions are inserted to make
>> sure
>> + * pmccntr read after this function returns the exact instructions
>> executed
>> + * in the controlled block. Loads @loop times the data at @address
>> into x9.
>> + */
>> +static void mem_access_loop(void *addr, int loop, uint32_t pmcr)
>> +{
>> +asm volatile(
>> +    "   msr pmcr_el0, %[pmcr]\n"
>> +    "   isb\n"
>> +    "   mov x10, %[loop]\n"
>> +    "1: sub x10, x10, #1\n"
>> +    "   ldr    x9, [%[addr]]\n"
>> +    "   cmp x10, #0x0\n"
>> +    "   b.gt    1b\n"
>> +    "   msr pmcr_el0, xzr\n"
>> +    "   isb\n"
>> +    :
>> +    : [addr] "r" (addr), [pmcr] "r" (pmcr), [loop] "r" (loop)
>> +    : "x9", "x10", "cc");
>> +}
>> +
>> +static void pmu_reset(void)
>> +{
>> +    /* reset all counters, counting disabled at PMCR level*/
>> +    set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
>> +    /* Disable all counters */
>> +    write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
>> +    /* clear overflow reg */
>> +    write_sysreg(ALL_SET, pmovsclr_el0);
>> +    /* disable overflow interrupts on all counters */
>> +    write_sysreg(ALL_SET, pmintenclr_el1);
>> +    isb();
>> +}
>> +
>> +static void test_event_counter_config(void)
>> +{
>> +    int i;
>> +
>> +    if (!pmu.nb_implemented_counters) {
>> +    report_skip("No event counter, skip ...");
>> +    return;
>> +    }
>> +
>> +    pmu_reset();
>> +
>> +    /*
>> + * Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
>
> s/PMESELR/PMSELR/ ?
>
>> + * select counter 0
>> + */
>> +    write_sysreg(1, PMSELR_EL0);
>> +    /* program this counter to count unsupported event */
>> +    write_sysreg(0xEA, PMXEVTYPER_EL0);
>> +    write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
>> +    report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
>> +    "PMESELR/PMXEVTYPER/PMEVTYPERn");
>
> Ditto
>
>> +    report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
>> +    "PMESELR/PMXEVCNTR/PMEVCNTRn");
>
> Ditto
>
>> +
>> +    /* try to configure an unsupported event within the range [0x0,
>> 0x3F] */
>> +    for (i = 0; i <= 0x3F; i++) {
>> +    if (!is_event_supported(i, false))
>> +    break;
>> +    }
>> +    if (i > 0x3F) {
>> +    report_skip("pmevtyper: all events within [0x0, 0x3F] are
>> supported");
>> +    return;
>> +    }
>> +
>> +    /* select counter 0 */
>> +    write_sysreg(0, PMSELR_EL0);
>> +    /* program this counter to count unsupported event */
>
> We get the unsupported event number *i* but don't program it into
> PMXEVTYPER_EL0 (or PMEVTYPER0_EL0). Is it intentional?
>
>> +    write_sysreg(i, PMXEVCNTR_EL0);
>> +    /* read the counter value */
>> +    read_sysreg(PMXEVCNTR_EL0);
>> +    report(read_sysreg(PMXEVCNTR_EL0) == i,
>> +    "read of a counter programmed with 

Re: [kvm-unit-tests PATCH v4 07/12] arm: pmu: Basic event counter Tests

2022-09-19 Thread Andrew Jones
On Mon, Sep 19, 2022 at 10:30:01PM +0800, Zenghui Yu wrote:
> Hi Eric,
> 
> A few comments when looking through the PMU test code (2 years after
> the series was merged).

Yes, these patches were merged long ago. Now you need to send patches,
not comments.

Thanks,
drew



Re: [kvm-unit-tests PATCH v4 07/12] arm: pmu: Basic event counter Tests

2022-09-19 Thread Zenghui Yu via

Hi Eric,

A few comments when looking through the PMU test code (2 years after
the series was merged).

On 2020/4/3 15:13, Eric Auger wrote:

Adds the following tests:
- event-counter-config: test event counter configuration
- basic-event-count:
  - programs counters #0 and #1 to count 2 required events
  (resp. CPU_CYCLES and INST_RETIRED). Counter #0 is preset
  to a value close enough to the 32b
  overflow limit so that we check the overflow bit is set
  after the execution of the asm loop.
- mem-access: counts MEM_ACCESS event on counters #0 and #1
  with and without 32-bit overflow.

Signed-off-by: Eric Auger 
Reviewed-by: Andre Przywara 


[...]


diff --git a/arm/pmu.c b/arm/pmu.c
index 8c49e50..45dccf7 100644
--- a/arm/pmu.c
+++ b/arm/pmu.c
@@ -38,6 +43,7 @@
 #define SW_INCR0x0
 #define INST_RETIRED   0x8
 #define CPU_CYCLES 0x11
+#define MEM_ACCESS 0x13
 #define INST_PREC  0x1B


The spec spells event 0x001B as INST_SPEC.


 #define STALL_FRONTEND 0x23
 #define STALL_BACKEND  0x24


[...]


@@ -175,6 +188,11 @@ static inline void precise_instrs_loop(int loop, uint32_t 
pmcr)
 }
 
 #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)

+#define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
+#define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
+
+#define PMEVTYPER_EXCLUDE_EL1 BIT(31)


Unused macro.


+#define PMEVTYPER_EXCLUDE_EL0 BIT(30)
 
 static bool is_event_supported(uint32_t n, bool warn)

 {
@@ -228,6 +246,224 @@ static void test_event_introspection(void)
report(required_events, "Check required events are implemented");
 }
 
+/*

+ * Extra instructions inserted by the compiler would be difficult to compensate
+ * for, so hand assemble everything between, and including, the PMCR accesses
+ * to start and stop counting. isb instructions are inserted to make sure
+ * pmccntr read after this function returns the exact instructions executed
+ * in the controlled block. Loads @loop times the data at @address into x9.
+ */
+static void mem_access_loop(void *addr, int loop, uint32_t pmcr)
+{
+asm volatile(
+   "   msr pmcr_el0, %[pmcr]\n"
+   "   isb\n"
+   "   mov x10, %[loop]\n"
+   "1: sub x10, x10, #1\n"
+   "   ldrx9, [%[addr]]\n"
+   "   cmp x10, #0x0\n"
+   "   b.gt1b\n"
+   "   msr pmcr_el0, xzr\n"
+   "   isb\n"
+   :
+   : [addr] "r" (addr), [pmcr] "r" (pmcr), [loop] "r" (loop)
+   : "x9", "x10", "cc");
+}
+
+static void pmu_reset(void)
+{
+   /* reset all counters, counting disabled at PMCR level*/
+   set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
+   /* Disable all counters */
+   write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
+   /* clear overflow reg */
+   write_sysreg(ALL_SET, pmovsclr_el0);
+   /* disable overflow interrupts on all counters */
+   write_sysreg(ALL_SET, pmintenclr_el1);
+   isb();
+}
+
+static void test_event_counter_config(void)
+{
+   int i;
+
+   if (!pmu.nb_implemented_counters) {
+   report_skip("No event counter, skip ...");
+   return;
+   }
+
+   pmu_reset();
+
+   /*
+* Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,


s/PMESELR/PMSELR/ ?


+* select counter 0
+*/
+   write_sysreg(1, PMSELR_EL0);
+   /* program this counter to count unsupported event */
+   write_sysreg(0xEA, PMXEVTYPER_EL0);
+   write_sysreg(0xdeadbeef, PMXEVCNTR_EL0);
+   report((read_regn_el0(pmevtyper, 1) & 0xFFF) == 0xEA,
+   "PMESELR/PMXEVTYPER/PMEVTYPERn");


Ditto


+   report((read_regn_el0(pmevcntr, 1) == 0xdeadbeef),
+   "PMESELR/PMXEVCNTR/PMEVCNTRn");


Ditto


+
+   /* try to configure an unsupported event within the range [0x0, 0x3F] */
+   for (i = 0; i <= 0x3F; i++) {
+   if (!is_event_supported(i, false))
+   break;
+   }
+   if (i > 0x3F) {
+   report_skip("pmevtyper: all events within [0x0, 0x3F] are 
supported");
+   return;
+   }
+
+   /* select counter 0 */
+   write_sysreg(0, PMSELR_EL0);
+   /* program this counter to count unsupported event */


We get the unsupported event number *i* but don't program it into
PMXEVTYPER_EL0 (or PMEVTYPER0_EL0). Is it intentional?


+   write_sysreg(i, PMXEVCNTR_EL0);
+   /* read the counter value */
+   read_sysreg(PMXEVCNTR_EL0);
+   report(read_sysreg(PMXEVCNTR_EL0) == i,
+   "read of a counter programmed with unsupported event");
+
+}


[...]


+
+static void test_mem_access(void)
+{
+   void *addr = malloc(PAGE_SIZE);


*addr* isn't freed at the end of test_mem_access(). The same in
test_chain_promotion() and test_overflow_interrupt().

Zenghui



[kvm-unit-tests PATCH v4 07/12] arm: pmu: Basic event counter Tests

2020-04-03 Thread Eric Auger
Adds the following tests:
- event-counter-config: test event counter configuration
- basic-event-count:
  - programs counters #0 and #1 to count 2 required events
  (resp. CPU_CYCLES and INST_RETIRED). Counter #0 is preset
  to a value close enough to the 32b
  overflow limit so that we check the overflow bit is set
  after the execution of the asm loop.
- mem-access: counts MEM_ACCESS event on counters #0 and #1
  with and without 32-bit overflow.

Signed-off-by: Eric Auger 
Reviewed-by: Andre Przywara 

---

v2 -> v3:
- used BIT() for PMEVTYPER_EXCLUDE_EL1/0
- print the event id in hexa (Peter)
- added prefix pop
- s/write_regn/write_regn_el0
- s/read_regn/read_regn_el0
- remove print_pmevtyper
- moved write/read_regn_el0 into lib/arm64/asm/sysreg.h
- added pmu prefix to the test names
- Kept Andre's R-b as functional code has not changed

v1 -> v2:
- fix PMCNTENSET_EL0 and PMCNTENCLR_EL0 op0
- print PMEVTYPER SH
- properly clobber used regs and add "cc"
- simplify mem_access_loop
---
 arm/pmu.c  | 248 +
 arm/unittests.cfg  |  18 +++
 lib/arm64/asm/sysreg.h |   6 +
 3 files changed, 272 insertions(+)

diff --git a/arm/pmu.c b/arm/pmu.c
index 8c49e50..45dccf7 100644
--- a/arm/pmu.c
+++ b/arm/pmu.c
@@ -19,9 +19,14 @@
 #include "asm/sysreg.h"
 #include "asm/processor.h"
 #include 
+#include 
 
 #define PMU_PMCR_E (1 << 0)
+#define PMU_PMCR_P (1 << 1)
 #define PMU_PMCR_C (1 << 2)
+#define PMU_PMCR_D (1 << 3)
+#define PMU_PMCR_X (1 << 4)
+#define PMU_PMCR_DP(1 << 5)
 #define PMU_PMCR_LC(1 << 6)
 #define PMU_PMCR_N_SHIFT   11
 #define PMU_PMCR_N_MASK0x1f
@@ -38,6 +43,7 @@
 #define SW_INCR0x0
 #define INST_RETIRED   0x8
 #define CPU_CYCLES 0x11
+#define MEM_ACCESS 0x13
 #define INST_PREC  0x1B
 #define STALL_FRONTEND 0x23
 #define STALL_BACKEND  0x24
@@ -47,6 +53,10 @@
 #define EXT_COMMON_EVENTS_LOW  0x4000
 #define EXT_COMMON_EVENTS_HIGH 0x403F
 
+#define ALL_SET0x
+#define ALL_CLEAR  0x0
+#define PRE_OVERFLOW   0xFFF0
+
 struct pmu {
unsigned int version;
unsigned int nb_implemented_counters;
@@ -127,6 +137,9 @@ static inline void precise_instrs_loop(int loop, uint32_t 
pmcr)
 
 /* event counter tests only implemented for aarch64 */
 static void test_event_introspection(void) {}
+static void test_event_counter_config(void) {}
+static void test_basic_event_count(void) {}
+static void test_mem_access(void) {}
 
 #elif defined(__aarch64__)
 #define ID_AA64DFR0_PERFMON_SHIFT 8
@@ -175,6 +188,11 @@ static inline void precise_instrs_loop(int loop, uint32_t 
pmcr)
 }
 
 #define PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
+#define PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
+#define PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
+
+#define PMEVTYPER_EXCLUDE_EL1 BIT(31)
+#define PMEVTYPER_EXCLUDE_EL0 BIT(30)
 
 static bool is_event_supported(uint32_t n, bool warn)
 {
@@ -228,6 +246,224 @@ static void test_event_introspection(void)
report(required_events, "Check required events are implemented");
 }
 
+/*
+ * Extra instructions inserted by the compiler would be difficult to compensate
+ * for, so hand assemble everything between, and including, the PMCR accesses
+ * to start and stop counting. isb instructions are inserted to make sure
+ * pmccntr read after this function returns the exact instructions executed
+ * in the controlled block. Loads @loop times the data at @address into x9.
+ */
+static void mem_access_loop(void *addr, int loop, uint32_t pmcr)
+{
+asm volatile(
+   "   msr pmcr_el0, %[pmcr]\n"
+   "   isb\n"
+   "   mov x10, %[loop]\n"
+   "1: sub x10, x10, #1\n"
+   "   ldr x9, [%[addr]]\n"
+   "   cmp x10, #0x0\n"
+   "   b.gt1b\n"
+   "   msr pmcr_el0, xzr\n"
+   "   isb\n"
+   :
+   : [addr] "r" (addr), [pmcr] "r" (pmcr), [loop] "r" (loop)
+   : "x9", "x10", "cc");
+}
+
+static void pmu_reset(void)
+{
+   /* reset all counters, counting disabled at PMCR level*/
+   set_pmcr(pmu.pmcr_ro | PMU_PMCR_LC | PMU_PMCR_C | PMU_PMCR_P);
+   /* Disable all counters */
+   write_sysreg_s(ALL_SET, PMCNTENCLR_EL0);
+   /* clear overflow reg */
+   write_sysreg(ALL_SET, pmovsclr_el0);
+   /* disable overflow interrupts on all counters */
+   write_sysreg(ALL_SET, pmintenclr_el1);
+   isb();
+}
+
+static void test_event_counter_config(void)
+{
+   int i;
+
+   if (!pmu.nb_implemented_counters) {
+   report_skip("No event counter, skip ...");
+   return;
+   }
+
+   pmu_reset();
+
+   /*
+* Test setting through PMESELR/PMXEVTYPER and PMEVTYPERn read,
+* select counter 0
+*/
+   write_sysreg(1, PMSELR_EL0);
+   /* program this counter to count un