Rearrange the MSR saving on entry so it does not follow the mtmsrd to disable interrupts, avoiding a possible RAW scoreboard stall.
Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/include/asm/kvm_book3s_64.h | 2 + arch/powerpc/kvm/book3s_hv.c | 18 ++----- arch/powerpc/kvm/book3s_hv_p9_entry.c | 66 +++++++++++++++--------- 3 files changed, 47 insertions(+), 39 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 52e2b7a352c7..4b0753e03731 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -154,6 +154,8 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) return radix; } +unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr); + int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb); #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index b95e0c5e5557..ee4e38cf5df4 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3858,6 +3858,8 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns s64 dec; int trap; + msr = mfmsr(); + save_p9_host_os_sprs(&host_os_sprs); /* @@ -3868,24 +3870,10 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns */ host_psscr = mfspr(SPRN_PSSCR_PR); - hard_irq_disable(); + kvmppc_msr_hard_disable_set_facilities(vcpu, msr); if (lazy_irq_pending()) return 0; - /* MSR bits may have been cleared by context switch */ - msr = 0; - if (IS_ENABLED(CONFIG_PPC_FPU)) - msr |= MSR_FP; - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - msr |= MSR_VEC; - if (cpu_has_feature(CPU_FTR_VSX)) - msr |= MSR_VSX; - if ((cpu_has_feature(CPU_FTR_TM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && - (vcpu->arch.hfscr & HFSCR_TM)) - msr |= MSR_TM; - msr = msr_check_and_set(msr); - if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) msr = mfmsr(); /* TM restore can update msr */ diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 1bb81be09d4f..1287dac918a0 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -622,6 +622,44 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) } } +unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr) +{ + unsigned long msr_needed = 0; + + msr &= ~MSR_EE; + + /* MSR bits may have been cleared by context switch so must recheck */ + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr_needed |= MSR_FP; + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + msr_needed |= MSR_VEC; + if (cpu_has_feature(CPU_FTR_VSX)) + msr_needed |= MSR_VSX; + if ((cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && + (vcpu->arch.hfscr & HFSCR_TM)) + msr_needed |= MSR_TM; + + /* + * This could be combined with MSR[RI] clearing, but that expands + * the unrecoverable window. It would be better to cover unrecoverable + * with KVM bad interrupt handling rather than use MSR[RI] at all. + * + * Much more difficult and less worthwhile to combine with IR/DR + * disable. + */ + if ((msr & msr_needed) != msr_needed) { + msr |= msr_needed; + __mtmsrd(msr, 0); + } else { + __hard_irq_disable(); + } + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; + + return msr; +} +EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities); + int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { struct p9_host_os_sprs host_os_sprs; @@ -655,6 +693,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc vcpu->arch.ceded = 0; + /* Save MSR for restore, with EE clear. */ + msr = mfmsr() & ~MSR_EE; + host_hfscr = mfspr(SPRN_HFSCR); host_ciabr = mfspr(SPRN_CIABR); host_psscr = mfspr(SPRN_PSSCR_PR); @@ -676,35 +717,12 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc save_p9_host_os_sprs(&host_os_sprs); - /* - * This could be combined with MSR[RI] clearing, but that expands - * the unrecoverable window. It would be better to cover unrecoverable - * with KVM bad interrupt handling rather than use MSR[RI] at all. - * - * Much more difficult and less worthwhile to combine with IR/DR - * disable. - */ - hard_irq_disable(); + msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr); if (lazy_irq_pending()) { trap = 0; goto out; } - /* MSR bits may have been cleared by context switch */ - msr = 0; - if (IS_ENABLED(CONFIG_PPC_FPU)) - msr |= MSR_FP; - if (cpu_has_feature(CPU_FTR_ALTIVEC)) - msr |= MSR_VEC; - if (cpu_has_feature(CPU_FTR_VSX)) - msr |= MSR_VSX; - if ((cpu_has_feature(CPU_FTR_TM) || - cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && - (vcpu->arch.hfscr & HFSCR_TM)) - msr |= MSR_TM; - msr = msr_check_and_set(msr); - /* Save MSR for restore. This is after hard disable, so EE is clear. */ - if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) msr = mfmsr(); /* MSR may have been updated */ -- 2.23.0