Rather than guest/host save/retsore functions, implement context switch
functions that take care of details like the VPA update for nested.

The reason to split these kind of helpers into explicit save/load
functions is mainly to schedule SPR access nicely, but PMU is a special
case where the load requires mtSPR (to stop counters) and other
difficulties, so there's less possibility to schedule those nicely. The
SPR accesses also have side-effects if the PMU is running, and in later
changes we keep the host PMU running as long as possible so this code
can be better profiled, which also complicates scheduling.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 51 ++++++++++++++++--------------------
 1 file changed, 23 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 38d8afa16839..13b8389b0479 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3690,7 +3690,8 @@ static void freeze_pmu(unsigned long mmcr0, unsigned long 
mmcra)
        isync();
 }
 
-static void save_p9_host_pmu(struct p9_host_os_sprs *host_os_sprs)
+static void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+                               struct p9_host_os_sprs *host_os_sprs)
 {
        if (ppc_get_pmu_inuse()) {
                /*
@@ -3724,10 +3725,19 @@ static void save_p9_host_pmu(struct p9_host_os_sprs 
*host_os_sprs)
                        host_os_sprs->sier3 = mfspr(SPRN_SIER3);
                }
        }
-}
 
-static void load_p9_guest_pmu(struct kvm_vcpu *vcpu)
-{
+#ifdef CONFIG_PPC_PSERIES
+       if (kvmhv_on_pseries()) {
+               if (vcpu->arch.vpa.pinned_addr) {
+                       struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+                       get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
+               } else {
+                       get_lppaca()->pmcregs_in_use = 1;
+               }
+       }
+#endif
+
+       /* load guest */
        mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
        mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
        mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
@@ -3752,7 +3762,8 @@ static void load_p9_guest_pmu(struct kvm_vcpu *vcpu)
        /* No isync necessary because we're starting counters */
 }
 
-static void save_p9_guest_pmu(struct kvm_vcpu *vcpu)
+static void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+                               struct p9_host_os_sprs *host_os_sprs)
 {
        struct lppaca *lp;
        int save_pmu = 1;
@@ -3787,10 +3798,12 @@ static void save_p9_guest_pmu(struct kvm_vcpu *vcpu)
        } else {
                freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
        }
-}
 
-static void load_p9_host_pmu(struct p9_host_os_sprs *host_os_sprs)
-{
+#ifdef CONFIG_PPC_PSERIES
+       if (kvmhv_on_pseries())
+               get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
+#endif
+
        if (ppc_get_pmu_inuse()) {
                mtspr(SPRN_PMC1, host_os_sprs->pmc1);
                mtspr(SPRN_PMC2, host_os_sprs->pmc2);
@@ -3929,8 +3942,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        save_p9_host_os_sprs(&host_os_sprs);
 
-       save_p9_host_pmu(&host_os_sprs);
-
        kvmppc_subcore_enter_guest();
 
        vc->entry_exit_map = 1;
@@ -3942,17 +3953,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
            cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
                kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
 
-#ifdef CONFIG_PPC_PSERIES
-       if (kvmhv_on_pseries()) {
-               if (vcpu->arch.vpa.pinned_addr) {
-                       struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
-                       get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
-               } else {
-                       get_lppaca()->pmcregs_in_use = 1;
-               }
-       }
-#endif
-       load_p9_guest_pmu(vcpu);
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
 
        msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
        load_fp_state(&vcpu->arch.fp);
@@ -4076,11 +4077,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        vcpu_vpa_increment_dispatch(vcpu);
 
-       save_p9_guest_pmu(vcpu);
-#ifdef CONFIG_PPC_PSERIES
-       if (kvmhv_on_pseries())
-               get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
-#endif
+       switch_pmu_to_host(vcpu, &host_os_sprs);
 
        vc->entry_exit_map = 0x101;
        vc->in_guest = 0;
@@ -4089,8 +4086,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
 
-       load_p9_host_pmu(&host_os_sprs);
-
        kvmppc_subcore_exit_guest();
 
        return trap;
-- 
2.23.0

Reply via email to