Hi Marc,

On 7/13/21 2:58 PM, Marc Zyngier wrote:
> We always sanitise our PMU sysreg on the write side, so there
> is no need to do it on the read side as well.
>
> Drop the unnecessary masking.

Checked for all the remaining uses of kvm_pmu_valid_counter_mask in sys_regs.c 
and
in pmu-emul.c, and nothing stands out:

Reviewed-by: Alexandru Elisei <alexandru.eli...@arm.com>

Thanks,

Alex

>
> Signed-off-by: Marc Zyngier <m...@kernel.org>
> ---
>  arch/arm64/kvm/pmu-emul.c | 3 +--
>  arch/arm64/kvm/sys_regs.c | 6 +++---
>  2 files changed, 4 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index f33825c995cb..fae4e95b586c 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -373,7 +373,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
>               reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
>               reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
>               reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
> -             reg &= kvm_pmu_valid_counter_mask(vcpu);
>       }
>  
>       return reg;
> @@ -569,7 +568,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
>  
>       if (val & ARMV8_PMU_PMCR_E) {
>               kvm_pmu_enable_counter_mask(vcpu,
> -                    __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
> +                    __vcpu_sys_reg(vcpu, PMCNTENSET_EL0));
>       } else {
>               kvm_pmu_disable_counter_mask(vcpu, mask);
>       }
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 95ccb8f45409..7ead93a8d67f 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -883,7 +883,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct 
> sys_reg_params *p,
>                       kvm_pmu_disable_counter_mask(vcpu, val);
>               }
>       } else {
> -             p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
> +             p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
>       }
>  
>       return true;
> @@ -907,7 +907,7 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct 
> sys_reg_params *p,
>                       /* accessing PMINTENCLR_EL1 */
>                       __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
>       } else {
> -             p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
> +             p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
>       }
>  
>       return true;
> @@ -929,7 +929,7 @@ static bool access_pmovs(struct kvm_vcpu *vcpu, struct 
> sys_reg_params *p,
>                       /* accessing PMOVSCLR_EL0 */
>                       __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & 
> mask);
>       } else {
> -             p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
> +             p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
>       }
>  
>       return true;
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to