This also moves the PSSCR update in nested entry to avoid a SPR scoreboard stall.
-45 cycles (6276) POWER9 virt-mode NULL hcall Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/kvm/book3s_hv.c | 7 +++++-- arch/powerpc/kvm/book3s_hv_p9_entry.c | 26 +++++++++++++++++++------- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index c0a04ce39e00..a37ab798eb7c 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3856,7 +3856,9 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) msr = mfmsr(); /* TM restore can update msr */ - mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); + if (vcpu->arch.psscr != host_psscr) + mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); + kvmhv_save_hv_regs(vcpu, &hvregs); hvregs.lpcr = lpcr; vcpu->arch.regs.msr = vcpu->arch.shregs.msr; @@ -3897,7 +3899,6 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns vcpu->arch.shregs.dar = mfspr(SPRN_DAR); vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); - mtspr(SPRN_PSSCR_PR, host_psscr); store_vcpu_state(vcpu); @@ -3910,6 +3911,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns timer_rearm_host_dec(*tb); restore_p9_host_os_sprs(vcpu, &host_os_sprs); + if (vcpu->arch.psscr != host_psscr) + mtspr(SPRN_PSSCR_PR, host_psscr); return trap; } diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 976687c3709a..52690af66ca9 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -639,6 +639,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc unsigned long host_dawr0; unsigned long host_dawrx0; unsigned long host_psscr; + unsigned long host_hpsscr; unsigned long host_pidr; unsigned long host_dawr1; unsigned long host_dawrx1; @@ -656,7 +657,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc host_hfscr = mfspr(SPRN_HFSCR); host_ciabr = mfspr(SPRN_CIABR); - host_psscr = mfspr(SPRN_PSSCR); + host_psscr = mfspr(SPRN_PSSCR_PR); + if (cpu_has_feature(CPU_FTRS_POWER9_DD2_2)) + host_hpsscr = mfspr(SPRN_PSSCR); host_pidr = mfspr(SPRN_PID); if (dawr_enabled()) { @@ -740,8 +743,14 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc if (vcpu->arch.ciabr != host_ciabr) mtspr(SPRN_CIABR, vcpu->arch.ciabr); - mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | - (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); + + if (cpu_has_feature(CPU_FTRS_POWER9_DD2_2)) { + mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); + } else { + if (vcpu->arch.psscr != host_psscr) + mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); + } mtspr(SPRN_HFSCR, vcpu->arch.hfscr); @@ -947,7 +956,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc vcpu->arch.ic = mfspr(SPRN_IC); vcpu->arch.pid = mfspr(SPRN_PID); - vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS; + vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); @@ -993,9 +1002,12 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr); mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr); - /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */ - mtspr(SPRN_PSSCR, host_psscr | - (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); + if (cpu_has_feature(CPU_FTRS_POWER9_DD2_2)) { + /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */ + mtspr(SPRN_PSSCR, host_hpsscr | + (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG)); + } + mtspr(SPRN_HFSCR, host_hfscr); if (vcpu->arch.ciabr != host_ciabr) mtspr(SPRN_CIABR, host_ciabr); -- 2.23.0