This juggles SPR switching on the entry and exit sides to be more
symmetric, which makes the next refactoring patch possible with no
functional change.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 612b70216e75..a780a9b9effd 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4069,7 +4069,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
            cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
                kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
 
-       switch_pmu_to_guest(vcpu, &host_os_sprs);
+       load_spr_state(vcpu, &host_os_sprs);
 
        load_fp_state(&vcpu->arch.fp);
 #ifdef CONFIG_ALTIVEC
@@ -4077,7 +4077,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 #endif
        mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
 
-       load_spr_state(vcpu, &host_os_sprs);
+       switch_pmu_to_guest(vcpu, &host_os_sprs);
 
        if (kvmhv_on_pseries()) {
                /*
@@ -4177,6 +4177,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
                        vcpu->arch.slb_max = 0;
        }
 
+       switch_pmu_to_host(vcpu, &host_os_sprs);
+
        store_spr_state(vcpu);
 
        store_fp_state(&vcpu->arch.fp);
@@ -4191,8 +4193,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
 
        vcpu_vpa_increment_dispatch(vcpu);
 
-       switch_pmu_to_host(vcpu, &host_os_sprs);
-
        timer_rearm_host_dec(*tb);
 
        restore_p9_host_os_sprs(vcpu, &host_os_sprs);
-- 
2.23.0

Reply via email to