I would just do small changes to the validity checks for MSRs.

On 08/05/20 10:32, Like Xu wrote:
>               return 0;
> +     case MSR_IA32_PERF_CAPABILITIES:
> +             *data = vcpu->arch.perf_capabilities;
> +             return 0;

This should be:

                if (!msr_info->host_initiated &&
                    !guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
                        return 1;

>       default:
> -             if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
> +             if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
> +                     (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
>                       u64 val = pmc_read_counter(pmc);
>                       *data = val & pmu->counter_bitmask[KVM_PMC_GP];
>                       return 0;
> @@ -258,9 +277,21 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, 
> struct msr_data *msr_info)
>                       return 0;
>               }
>               break;
> +     case MSR_IA32_PERF_CAPABILITIES:
> +             if (msr_info->host_initiated &&
> +                     !(data & ~vmx_get_perf_capabilities())) {

Likewise:

                if (!msr->info->host_initiated)
                        return 1;
                if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
                    ? data & ~vmx_get_perf_capabilities()
                    : data)
                        return 1;

Otherwise looks good, I'm going to queue this.

Paolo

Reply via email to