Hi Andrew,

On 05/21/2019 04:52 PM, Andrew Murray wrote:
> ARMv8 provides support for chained PMU counters, where an event type
> of 0x001E is set for odd-numbered counters, the event counter will
> increment by one for each overflow of the preceding even-numbered
> counter. Let's emulate this in KVM by creating a 64 bit perf counter
> when a user chains two emulated counters together.
> 
> For chained events we only support generating an overflow interrupt
> on the high counter. We use the attributes of the low counter to
> determine the attributes of the perf event.
> 
> Suggested-by: Marc Zyngier <marc.zyng...@arm.com>
> Signed-off-by: Andrew Murray <andrew.mur...@arm.com>
> ---
>  include/kvm/arm_pmu.h |   2 +
>  virt/kvm/arm/pmu.c    | 246 ++++++++++++++++++++++++++++++++++++------
>  2 files changed, 215 insertions(+), 33 deletions(-)
> 
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index b73f31baca52..8b434745500a 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -22,6 +22,7 @@
>  #include <asm/perf_event.h>
>  
>  #define ARMV8_PMU_CYCLE_IDX          (ARMV8_PMU_MAX_COUNTERS - 1)
> +#define ARMV8_PMU_MAX_COUNTER_PAIRS  ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
>  
>  #ifdef CONFIG_KVM_ARM_PMU
>  
> @@ -34,6 +35,7 @@ struct kvm_pmc {
>  struct kvm_pmu {
>       int irq_num;
>       struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
> +     DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
>       bool ready;
>       bool created;
>       bool irq_level;
> diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
> index ae1e886d4a1a..4b0981c402c6 100644
> --- a/virt/kvm/arm/pmu.c
> +++ b/virt/kvm/arm/pmu.c
> @@ -25,28 +25,128 @@
>  #include <kvm/arm_vgic.h>
>  
>  static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
> +
> +#define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
> +
> +static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
> +{
> +     struct kvm_pmu *pmu;
> +     struct kvm_vcpu_arch *vcpu_arch;
> +
> +     pmc -= pmc->idx;
> +     pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
> +     vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
> +     return container_of(vcpu_arch, struct kvm_vcpu, arch);
> +}
> +
>  /**
> - * kvm_pmu_get_counter_value - get PMU counter value
> + * kvm_pmu_pmc_is_chained - determine if the pmc is chained
> + * @pmc: The PMU counter pointer
> + */
> +static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
> +{
> +     struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
> +
> +     return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
> +}
> +
> +/**
> + * kvm_pmu_pmc_is_high_counter - determine if select_idx is a high/low 
> counter
> + * @select_idx: The counter index
> + */
> +static bool kvm_pmu_pmc_is_high_counter(u64 select_idx)
> +{
> +     return select_idx & 0x1;
> +}
> +
> +/**
> + * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
> + * @pmc: The PMU counter pointer
> + *
> + * When a pair of PMCs are chained together we use the low counter 
> (canonical)
> + * to hold the underlying perf event.
> + */
> +static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
> +{
> +     if (kvm_pmu_pmc_is_chained(pmc) &&
> +         kvm_pmu_pmc_is_high_counter(pmc->idx))
> +             return pmc - 1;
> +
> +     return pmc;
> +}
> +
> +/**
> + * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
>   * @vcpu: The vcpu pointer
>   * @select_idx: The counter index
>   */
> -u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
> +static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 
> select_idx)
>  {
> -     u64 counter, reg, enabled, running;
> -     struct kvm_pmu *pmu = &vcpu->arch.pmu;
> -     struct kvm_pmc *pmc = &pmu->pmc[select_idx];
> +     u64 eventsel, reg;
>  
> -     reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
> -           ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
> -     counter = __vcpu_sys_reg(vcpu, reg);
> +     select_idx |= 0x1;
> +
> +     if (select_idx == ARMV8_PMU_CYCLE_IDX)
> +             return false;
> +
> +     reg = PMEVTYPER0_EL0 + select_idx;
> +     eventsel = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_EVENT;
> +
> +     return armv8pmu_evtype_is_chain(eventsel);
> +}
> +
> +/**
> + * kvm_pmu_get_pair_counter_value - get PMU counter value
> + * @vcpu: The vcpu pointer
> + * @pmc: The PMU counter pointer
> + */
> +static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
> +                                       struct kvm_pmc *pmc)
> +{
> +     u64 counter, counter_high, reg, enabled, running;
> +
> +     if (kvm_pmu_pmc_is_chained(pmc)) {
> +             pmc = kvm_pmu_get_canonical_pmc(pmc);
> +             reg = PMEVCNTR0_EL0 + pmc->idx;
> +
> +             counter = __vcpu_sys_reg(vcpu, reg);
> +             counter_high = __vcpu_sys_reg(vcpu, reg + 1);
> +
> +             counter = lower_32_bits(counter) | (counter_high << 32);
> +     } else {
> +             reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
> +                   ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
> +             counter = __vcpu_sys_reg(vcpu, reg);
> +     }
>  
> -     /* The real counter value is equal to the value of counter register plus
> +     /*
> +      * The real counter value is equal to the value of counter register plus
>        * the value perf event counts.
>        */
>       if (pmc->perf_event)
>               counter += perf_event_read_value(pmc->perf_event, &enabled,
>                                                &running);
>  
> +     return counter;
> +}
> +
> +/**
> + * kvm_pmu_get_counter_value - get PMU counter value
> + * @vcpu: The vcpu pointer
> + * @select_idx: The counter index
> + */
> +u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
> +{
> +     u64 counter;
> +     struct kvm_pmu *pmu = &vcpu->arch.pmu;
> +     struct kvm_pmc *pmc = &pmu->pmc[select_idx];
> +
> +     counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
> +
> +     if (kvm_pmu_pmc_is_chained(pmc) &&
> +         kvm_pmu_pmc_is_high_counter(select_idx))
> +             counter >>= 32;
> +
>       return counter & pmc->bitmask;
>  }
>  
> @@ -74,6 +174,7 @@ void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 
> select_idx, u64 val)
>   */
>  static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
>  {
> +     pmc = kvm_pmu_get_canonical_pmc(pmc);
>       if (pmc->perf_event) {
>               perf_event_disable(pmc->perf_event);
>               perf_event_release_kernel(pmc->perf_event);
> @@ -91,13 +192,24 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, 
> struct kvm_pmc *pmc)
>  {
>       u64 counter, reg;
>  
> -     if (pmc->perf_event) {
> +     pmc = kvm_pmu_get_canonical_pmc(pmc);
> +     if (!pmc->perf_event)
> +             return;
> +
> +     if (kvm_pmu_pmc_is_chained(pmc)) {
> +             counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
> +
> +             reg = PMEVCNTR0_EL0 + pmc->idx;
> +             __vcpu_sys_reg(vcpu, reg) = counter & pmc->bitmask;
> +             __vcpu_sys_reg(vcpu, reg + 1) = (counter >> 32) & pmc->bitmask;
> +     } else {
>               counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
>               reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
>                      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
>               __vcpu_sys_reg(vcpu, reg) = counter;
> -             kvm_pmu_release_perf_event(pmc);
>       }
> +
> +     kvm_pmu_release_perf_event(pmc);
>  }
>  
>  /**
> @@ -115,6 +227,8 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
>               pmu->pmc[i].idx = i;
>               pmu->pmc[i].bitmask = 0xffffffffUL;
>       }
> +
> +     bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
>  }
>  
>  /**
> @@ -154,6 +268,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, 
> u64 val)
>       int i;
>       struct kvm_pmu *pmu = &vcpu->arch.pmu;
>       struct kvm_pmc *pmc;
> +     struct perf_event *perf_event;
>  
>       if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
>               return;
> @@ -163,9 +278,21 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, 
> u64 val)
>                       continue;
>  
>               pmc = &pmu->pmc[i];
> +
> +             /*
> +              * For high counters of chained events we must recreate the
> +              * perf event with the long (64bit) attribute set.
> +              */
> +             if (kvm_pmu_pmc_is_chained(pmc) &&
> +                 kvm_pmu_pmc_is_high_counter(i)) {
> +                     kvm_pmu_create_perf_event(vcpu, i);
> +                     continue;
> +             }
> +
> +             pmc = kvm_pmu_get_canonical_pmc(pmc);

But pmc is already a canonical pmc, we don't need to call
kvm_pmu_get_canonical_pmc(). The condition above is the same as the one
use in kvm_pmu_get_canonical_pmc(), so no "non canonical" pmc ever
reaches that point. I would understand putting a comment to clarify that
fact.

>               if (pmc->perf_event) {
>                       perf_event_enable(pmc->perf_event);
> -                     if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
> +                     if (perf_event->state != PERF_EVENT_STATE_ACTIVE)

You forgot to set perf_event.

>                               kvm_debug("fail to enable perf event\n");
>               }
>       }
> @@ -192,6 +319,18 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, 
> u64 val)
>                       continue;
>  
>               pmc = &pmu->pmc[i];
> +
> +             /*
> +              * For high counters of chained events we must recreate the
> +              * perf event with the long (64bit) attribute unset.
> +              */
> +             if (kvm_pmu_pmc_is_chained(pmc) &&
> +                 kvm_pmu_pmc_is_high_counter(i)) {
> +                     kvm_pmu_create_perf_event(vcpu, i);
> +                     continue;
> +             }
> +
> +             pmc = kvm_pmu_get_canonical_pmc(pmc);

Same as the enable case, we know pmc is already canonical, no need to
call the function.

Thanks,

-- 
Julien Thierry
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to