Make sure reads and writes to PMCR_EL0 conform to additional
constraints imposed when the PMU is partitioned.

Signed-off-by: Colton Lewis <[email protected]>
---
 arch/arm64/kvm/pmu.c      | 2 +-
 arch/arm64/kvm/sys_regs.c | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 1fd012f8ff4a9..48b39f096fa12 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -877,7 +877,7 @@ u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
 u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
 {
        u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
-       u64 n = vcpu->kvm->arch.nr_pmu_counters;
+       u64 n = kvm_pmu_guest_num_counters(vcpu);
 
        if (vcpu_has_nv(vcpu) && !vcpu_is_el2(vcpu))
                n = FIELD_GET(MDCR_EL2_HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 70104087b6c7b..f2ae761625a66 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1360,7 +1360,7 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct 
sys_reg_desc *r,
         */
        if (!kvm_vm_has_ran_once(kvm) &&
            !vcpu_has_nv(vcpu)        &&
-           new_n <= kvm_arm_pmu_get_max_counters(kvm))
+           new_n <= kvm_pmu_hpmn(vcpu))
                kvm->arch.nr_pmu_counters = new_n;
 
        mutex_unlock(&kvm->arch.config_lock);
-- 
2.52.0.239.gd5f0c6e74e-goog


Reply via email to