Move the TB updates between saving and loading guest and host SPRs,
to improve scheduling by keeping issue-NTC operations together as
much as possible.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv_p9_entry.c | 36 +++++++++++++--------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c 
b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 814b0dfd590f..e7793bb806eb 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -215,15 +215,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
 
        vcpu->arch.ceded = 0;
 
-       if (vc->tb_offset) {
-               u64 new_tb = tb + vc->tb_offset;
-               mtspr(SPRN_TBU40, new_tb);
-               tb = mftb();
-               if ((tb & 0xffffff) < (new_tb & 0xffffff))
-                       mtspr(SPRN_TBU40, new_tb + 0x1000000);
-               vc->tb_offset_applied = vc->tb_offset;
-       }
-
        /* Could avoid mfmsr by passing around, but probably no big deal */
        msr = mfmsr();
 
@@ -238,6 +229,15 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
                host_dawrx1 = mfspr(SPRN_DAWRX1);
        }
 
+       if (vc->tb_offset) {
+               u64 new_tb = tb + vc->tb_offset;
+               mtspr(SPRN_TBU40, new_tb);
+               tb = mftb();
+               if ((tb & 0xffffff) < (new_tb & 0xffffff))
+                       mtspr(SPRN_TBU40, new_tb + 0x1000000);
+               vc->tb_offset_applied = vc->tb_offset;
+       }
+
        if (vc->pcr)
                mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
        mtspr(SPRN_DPDES, vc->dpdes);
@@ -469,6 +469,15 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
        tb = mftb();
        vcpu->arch.dec_expires = dec + tb;
 
+       if (vc->tb_offset_applied) {
+               u64 new_tb = tb - vc->tb_offset_applied;
+               mtspr(SPRN_TBU40, new_tb);
+               tb = mftb();
+               if ((tb & 0xffffff) < (new_tb & 0xffffff))
+                       mtspr(SPRN_TBU40, new_tb + 0x1000000);
+               vc->tb_offset_applied = 0;
+       }
+
        /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
        mtspr(SPRN_PSSCR, host_psscr |
              (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
@@ -503,15 +512,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 
time_limit, unsigned long lpc
        if (vc->pcr)
                mtspr(SPRN_PCR, PCR_MASK);
 
-       if (vc->tb_offset_applied) {
-               u64 new_tb = mftb() - vc->tb_offset_applied;
-               mtspr(SPRN_TBU40, new_tb);
-               tb = mftb();
-               if ((tb & 0xffffff) < (new_tb & 0xffffff))
-                       mtspr(SPRN_TBU40, new_tb + 0x1000000);
-               vc->tb_offset_applied = 0;
-       }
-
        /* HDEC must be at least as large as DEC, so decrementer_max fits */
        mtspr(SPRN_HDEC, decrementer_max);
 
-- 
2.23.0

Reply via email to