Add pmc_hostonly and pmc_guestonly bitmaps to struct kvm_pmu to track which
guest-enabled performance counters have just one of the Host-Only and
Guest-Only event selector bits set. PMCs that are disabled, have neither
HG_ONLY bit set, or have both HG_ONLY bits set are not tracked, because
they don't require special handling at vCPU state transitions.

Update the bitmaps when the guest writes to an event selector MSR.

Signed-off-by: Jim Mattson <[email protected]>
---
 arch/x86/include/asm/kvm_host.h |  4 ++++
 arch/x86/kvm/pmu.c              |  2 ++
 arch/x86/kvm/svm/pmu.c          | 28 ++++++++++++++++++++++++++++
 3 files changed, 34 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ecd4019b84b7..92050f76f84b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -593,6 +593,10 @@ struct kvm_pmu {
        DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX);
        DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX);
 
+       /* AMD only: track PMCs with Host-Only or Guest-Only bits set */
+       DECLARE_BITMAP(pmc_hostonly, X86_PMC_IDX_MAX);
+       DECLARE_BITMAP(pmc_guestonly, X86_PMC_IDX_MAX);
+
        u64 ds_area;
        u64 pebs_enable;
        u64 pebs_enable_rsvd;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bd6b785cf261..833ee2ecd43f 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -921,6 +921,8 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
        pmu->need_cleanup = false;
 
        bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
+       bitmap_zero(pmu->pmc_hostonly, X86_PMC_IDX_MAX);
+       bitmap_zero(pmu->pmc_guestonly, X86_PMC_IDX_MAX);
 
        kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
                pmc_stop_counter(pmc);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index f619417557f9..c06013e2b4b1 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -147,6 +147,33 @@ static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        return 1;
 }
 
+static void amd_pmu_update_hg_bitmaps(struct kvm_pmc *pmc)
+{
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+       u64 eventsel = pmc->eventsel;
+
+       if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE)) {
+               bitmap_clear(pmu->pmc_hostonly, pmc->idx, 1);
+               bitmap_clear(pmu->pmc_guestonly, pmc->idx, 1);
+               return;
+       }
+
+       switch (eventsel & AMD64_EVENTSEL_HG_ONLY) {
+       case AMD64_EVENTSEL_HOSTONLY:
+               bitmap_set(pmu->pmc_hostonly, pmc->idx, 1);
+               bitmap_clear(pmu->pmc_guestonly, pmc->idx, 1);
+               break;
+       case AMD64_EVENTSEL_GUESTONLY:
+               bitmap_clear(pmu->pmc_hostonly, pmc->idx, 1);
+               bitmap_set(pmu->pmc_guestonly, pmc->idx, 1);
+               break;
+       default:
+               bitmap_clear(pmu->pmc_hostonly, pmc->idx, 1);
+               bitmap_clear(pmu->pmc_guestonly, pmc->idx, 1);
+               break;
+       }
+}
+
 static bool amd_pmu_dormant_hg_event(struct kvm_pmc *pmc)
 {
        u64 hg_only = pmc->eventsel & AMD64_EVENTSEL_HG_ONLY;
@@ -196,6 +223,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
                if (data != pmc->eventsel) {
                        pmc->eventsel = data;
                        amd_pmu_set_eventsel_hw(pmc);
+                       amd_pmu_update_hg_bitmaps(pmc);
                        kvm_pmu_request_counter_reprogram(pmc);
                }
                return 0;
-- 
2.52.0.457.g6b5491de43-goog


Reply via email to