Use HFSCR facility disabling to implement demand faulting for EBB, with a hysteresis counter similar to the load_fp etc counters in context switching that implement the equivalent demand faulting for userspace facilities.
This speeds up guest entry/exit by avoiding the register save/restore when a guest is not frequently using them. When a guest does use them often, there will be some additional demand fault overhead, but these are not commonly used facilities. Reviewed-by: Fabiano Rosas <faro...@linux.ibm.com> Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kvm/book3s_hv.c | 16 +++++++++++++-- arch/powerpc/kvm/book3s_hv_p9_entry.c | 28 +++++++++++++++++++++------ 3 files changed, 37 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index f105eaeb4521..1c00c4a565f5 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -580,6 +580,7 @@ struct kvm_vcpu_arch { ulong cfar; ulong ppr; u32 pspb; + u8 load_ebb; ulong fscr; ulong shadow_fscr; ulong ebbhr; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 47ccea5ffba2..dd8199a423cf 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1441,6 +1441,16 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) + return EMULATE_FAIL; + + vcpu->arch.hfscr |= HFSCR_EBB; + + return RESUME_GUEST; +} + static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, struct task_struct *tsk) { @@ -1735,6 +1745,8 @@ XXX benchmark guest exits r = kvmppc_emulate_doorbell_instr(vcpu); if (cause == FSCR_PM_LG) r = kvmppc_pmu_unavailable(vcpu); + if (cause == FSCR_EBB_LG) + r = kvmppc_ebb_unavailable(vcpu); } if (r == EMULATE_FAIL) { kvmppc_core_queue_program(vcpu, SRR1_PROGILL); @@ -2751,9 +2763,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; /* - * PM is demand-faulted so start with it clear. + * PM, EBB is demand-faulted so start with it clear. */ - vcpu->arch.hfscr &= ~HFSCR_PM; + vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB); kvmppc_mmu_book3s_hv_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index c4e93167d120..f68a3d107d04 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -224,9 +224,12 @@ static void load_spr_state(struct kvm_vcpu *vcpu, struct p9_host_os_sprs *host_os_sprs) { mtspr(SPRN_TAR, vcpu->arch.tar); - mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); - mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); - mtspr(SPRN_BESCR, vcpu->arch.bescr); + + if (vcpu->arch.hfscr & HFSCR_EBB) { + mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); + mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); + mtspr(SPRN_BESCR, vcpu->arch.bescr); + } if (!cpu_has_feature(CPU_FTR_ARCH_31)) mtspr(SPRN_TIDR, vcpu->arch.tid); @@ -257,9 +260,22 @@ static void load_spr_state(struct kvm_vcpu *vcpu, static void store_spr_state(struct kvm_vcpu *vcpu) { vcpu->arch.tar = mfspr(SPRN_TAR); - vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); - vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); - vcpu->arch.bescr = mfspr(SPRN_BESCR); + + if (vcpu->arch.hfscr & HFSCR_EBB) { + vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); + vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); + vcpu->arch.bescr = mfspr(SPRN_BESCR); + /* + * This is like load_fp in context switching, turn off the + * facility after it wraps the u8 to try avoiding saving + * and restoring the registers each partition switch. + */ + if (!vcpu->arch.nested) { + vcpu->arch.load_ebb++; + if (!vcpu->arch.load_ebb) + vcpu->arch.hfscr &= ~HFSCR_EBB; + } + } if (!cpu_has_feature(CPU_FTR_ARCH_31)) vcpu->arch.tid = mfspr(SPRN_TIDR); -- 2.23.0