This patch adds support for AMD Core Performance counters in the guest.
The base event select and counter MSRs are changed. In addition, with
the core extension, there are 2 extra counters available for performance
measurements for a total of 6.

With the new MSRs, the logic to map them to the gp_counters[] is changed.
New functions are introduced to get the right base MSRs and to check the
validity of the get/set MSRs.

If the guest has vcpus of either family 16h or a generation < 15h, it
falls back to using K7 MSRs and the number of counters the guest can
access is set to 4.

Signed-off-by: Janakarajan Natarajan <[email protected]>
---
 arch/x86/kvm/pmu_amd.c | 133 +++++++++++++++++++++++++++++++++++++++++++------
 arch/x86/kvm/x86.c     |   1 +
 2 files changed, 120 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index cd94443..2c694446 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -19,6 +19,11 @@
 #include "lapic.h"
 #include "pmu.h"
 
+enum pmu_type {
+       PMU_TYPE_COUNTER = 0,
+       PMU_TYPE_EVNTSEL,
+};
+
 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
        [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
@@ -31,6 +36,86 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] 
= {
        [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
 };
 
+static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+{
+       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+       int family;
+       bool has_perf_ext;
+
+       family = guest_cpuid_family(vcpu);
+       has_perf_ext = guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
+
+       switch (family) {
+       case 0x15:
+       case 0x17:
+               if (has_perf_ext) {
+                       if (type == PMU_TYPE_COUNTER)
+                               return MSR_F15H_PERF_CTR;
+                       else
+                               return MSR_F15H_PERF_CTL;
+                       break;
+               }
+               /*
+                * Fall-through because the K7 MSRs are
+                * backwards compatible
+                */
+       default:
+               if (type == PMU_TYPE_COUNTER)
+                       return MSR_K7_PERFCTR0;
+               else
+                       return MSR_K7_EVNTSEL0;
+       }
+}
+
+static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
+                                            enum pmu_type type)
+{
+       unsigned int base = get_msr_base(pmu, type);
+
+       if (base == MSR_F15H_PERF_CTL) {
+               switch (msr) {
+               case MSR_F15H_PERF_CTL0:
+               case MSR_F15H_PERF_CTL1:
+               case MSR_F15H_PERF_CTL2:
+               case MSR_F15H_PERF_CTL3:
+               case MSR_F15H_PERF_CTL4:
+               case MSR_F15H_PERF_CTL5:
+                       /*
+                        * AMD Perf Extension MSRs are not continuous.
+                        *
+                        * E.g. MSR_F15H_PERF_CTR0 -> 0xc0010201
+                        *      MSR_F15H_PERF_CTR1 -> 0xc0010203
+                        *
+                        * These are mapped to work with gp_counters[].
+                        * The index into the array is calculated by
+                        * dividing the difference between the requested
+                        * msr and the msr base by 2.
+                        *
+                        * E.g. MSR_F15H_PERF_CTR1 uses
+                        *      ->gp_counters[(0xc0010203-0xc0010201)/2]
+                        *      ->gp_counters[1]
+                        */
+                       return &pmu->gp_counters[(msr - base) >> 1];
+               default:
+                       return NULL;
+               }
+       } else if (base == MSR_F15H_PERF_CTR) {
+               switch (msr) {
+               case MSR_F15H_PERF_CTR0:
+               case MSR_F15H_PERF_CTR1:
+               case MSR_F15H_PERF_CTR2:
+               case MSR_F15H_PERF_CTR3:
+               case MSR_F15H_PERF_CTR4:
+               case MSR_F15H_PERF_CTR5:
+                       return &pmu->gp_counters[(msr - base) >> 1];
+               default:
+                       return NULL;
+               }
+       } else {
+               return get_gp_pmc(pmu, msr, base);
+       }
+}
+
 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
                                    u8 event_select,
                                    u8 unit_mask)
@@ -64,7 +149,20 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 
 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
 {
-       return get_gp_pmc(pmu, MSR_K7_EVNTSEL0 + pmc_idx, MSR_K7_EVNTSEL0);
+       unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
+       unsigned int family;
+
+       family = guest_cpuid_family(pmu_to_vcpu(pmu));
+
+       if (family == 0x15 || family == 0x17) {
+               /*
+                * The idx is contiguous. The MSRs are not. The counter MSRs
+                * are interleaved with the event select MSRs.
+                */
+               pmc_idx *= 2;
+       }
+
+       return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
 }
 
 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
@@ -96,8 +194,8 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        int ret = false;
 
-       ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) ||
-               get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+       ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
+               get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 
        return ret;
 }
@@ -107,14 +205,14 @@ static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 
msr, u64 *data)
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        struct kvm_pmc *pmc;
 
-       /* MSR_K7_PERFCTRn */
-       pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
+       /* MSR_PERFCTRn */
+       pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                *data = pmc_read_counter(pmc);
                return 0;
        }
-       /* MSR_K7_EVNTSELn */
-       pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+       /* MSR_EVNTSELn */
+       pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
        if (pmc) {
                *data = pmc->eventsel;
                return 0;
@@ -130,14 +228,14 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        u32 msr = msr_info->index;
        u64 data = msr_info->data;
 
-       /* MSR_K7_PERFCTRn */
-       pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
+       /* MSR_PERFCTRn */
+       pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                pmc->counter += data - pmc_read_counter(pmc);
                return 0;
        }
-       /* MSR_K7_EVNTSELn */
-       pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+       /* MSR_EVNTSELn */
+       pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
        if (pmc) {
                if (data == pmc->eventsel)
                        return 0;
@@ -153,8 +251,15 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       int family, nr_counters;
+
+       family = guest_cpuid_family(vcpu);
+       if (family == 0x15 || family == 0x17)
+               nr_counters = AMD64_NUM_COUNTERS_CORE;
+       else
+               nr_counters = AMD64_NUM_COUNTERS;
 
-       pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
+       pmu->nr_arch_gp_counters = nr_counters;
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
        pmu->reserved_bits = 0xffffffff00200000ull;
        /* not applicable to AMD; but clean them to prevent any fall out */
@@ -169,7 +274,7 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        int i;
 
-       for (i = 0; i < AMD64_NUM_COUNTERS ; i++) {
+       for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
                pmu->gp_counters[i].type = KVM_PMC_GP;
                pmu->gp_counters[i].vcpu = vcpu;
                pmu->gp_counters[i].idx = i;
@@ -181,7 +286,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
        int i;
 
-       for (i = 0; i < AMD64_NUM_COUNTERS; i++) {
+       for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
                struct kvm_pmc *pmc = &pmu->gp_counters[i];
 
                pmc_stop_counter(pmc);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 03869eb..5a6ad6f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2433,6 +2433,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        case MSR_AMD64_DC_CFG:
                msr_info->data = 0;
                break;
+       case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
        case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
-- 
2.7.4

Reply via email to