Use HFSCR facility disabling to implement demand faulting for TM, with
a hysteresis counter similar to the load_fp etc counters in context
switching that implement the equivalent demand faulting for userspace
facilities.

This speeds up guest entry/exit by avoiding the register save/restore
when a guest is not frequently using them. When a guest does use them
often, there will be some additional demand fault overhead, but these
are not commonly used facilities.

-304 cycles (6681) POWER9 virt-mode NULL hcall with the previous patch

Reviewed-by: Fabiano Rosas <faro...@linux.ibm.com>
Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/kvm_host.h   |  1 +
 arch/powerpc/kvm/book3s_hv.c          | 26 ++++++++++++++++++++------
 arch/powerpc/kvm/book3s_hv_p9_entry.c | 25 +++++++++++++++++--------
 3 files changed, 38 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 1c00c4a565f5..74ee3a5b110e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -581,6 +581,7 @@ struct kvm_vcpu_arch {
        ulong ppr;
        u32 pspb;
        u8 load_ebb;
+       u8 load_tm;
        ulong fscr;
        ulong shadow_fscr;
        ulong ebbhr;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index dd8199a423cf..5b2114c00c43 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1451,6 +1451,16 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
        return RESUME_GUEST;
 }
 
+static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
+{
+       if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
+               return EMULATE_FAIL;
+
+       vcpu->arch.hfscr |= HFSCR_TM;
+
+       return RESUME_GUEST;
+}
+
 static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
                                 struct task_struct *tsk)
 {
@@ -1747,6 +1757,8 @@ XXX benchmark guest exits
                                r = kvmppc_pmu_unavailable(vcpu);
                        if (cause == FSCR_EBB_LG)
                                r = kvmppc_ebb_unavailable(vcpu);
+                       if (cause == FSCR_TM_LG)
+                               r = kvmppc_tm_unavailable(vcpu);
                }
                if (r == EMULATE_FAIL) {
                        kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
@@ -2763,9 +2775,9 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu 
*vcpu)
        vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
 
        /*
-        * PM, EBB is demand-faulted so start with it clear.
+        * PM, EBB, TM are demand-faulted so start with it clear.
         */
-       vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB);
+       vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
 
        kvmppc_mmu_book3s_hv_init(vcpu);
 
@@ -3835,8 +3847,9 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu 
*vcpu, u64 time_limit, uns
                msr |= MSR_VEC;
        if (cpu_has_feature(CPU_FTR_VSX))
                msr |= MSR_VSX;
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                       (vcpu->arch.hfscr & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
 
@@ -4552,8 +4565,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
                msr |= MSR_VEC;
        if (cpu_has_feature(CPU_FTR_VSX))
                msr |= MSR_VSX;
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                       (vcpu->arch.hfscr & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
 
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c 
b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index f68a3d107d04..db5eb83e26d1 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -295,10 +295,11 @@ bool load_vcpu_state(struct kvm_vcpu *vcpu,
 {
        bool ret = false;
 
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                      (vcpu->arch.hfscr & HFSCR_TM)) {
                unsigned long guest_msr = vcpu->arch.shregs.msr;
-               if (MSR_TM_ACTIVE(guest_msr)) {
+               if (MSR_TM_ACTIVE(guest_msr) || 
local_paca->kvm_hstate.fake_suspend) {
                        kvmppc_restore_tm_hv(vcpu, guest_msr, true);
                        ret = true;
                } else {
@@ -330,15 +331,22 @@ void store_vcpu_state(struct kvm_vcpu *vcpu)
 #endif
        vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
 
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                      (vcpu->arch.hfscr & HFSCR_TM)) {
                unsigned long guest_msr = vcpu->arch.shregs.msr;
-               if (MSR_TM_ACTIVE(guest_msr)) {
+               if (MSR_TM_ACTIVE(guest_msr) || 
local_paca->kvm_hstate.fake_suspend) {
                        kvmppc_save_tm_hv(vcpu, guest_msr, true);
                } else {
                        vcpu->arch.texasr = mfspr(SPRN_TEXASR);
                        vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
                        vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
+
+                       if (!vcpu->arch.nested) {
+                               vcpu->arch.load_tm++; /* see load_ebb comment */
+                               if (!vcpu->arch.load_tm)
+                                       vcpu->arch.hfscr &= ~HFSCR_TM;
+                       }
                }
        }
 }
@@ -629,8 +637,9 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
                msr |= MSR_VEC;
        if (cpu_has_feature(CPU_FTR_VSX))
                msr |= MSR_VSX;
-       if (cpu_has_feature(CPU_FTR_TM) ||
-           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+       if ((cpu_has_feature(CPU_FTR_TM) ||
+           cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+                       (vcpu->arch.hfscr & HFSCR_TM))
                msr |= MSR_TM;
        msr = msr_check_and_set(msr);
        /* Save MSR for restore. This is after hard disable, so EE is clear. */
-- 
2.23.0

Reply via email to