From: Wanpeng Li <wanpen...@tencent.com>

All the checks in lapic_timer_int_injected(), __kvm_wait_lapic_expire(), and 
these function calls waste cpu cycles when the timer mode is not tscdeadline. 
We can observe ~1.3% world switch time overhead by kvm-unit-tests/vmexit.flat 
vmcall testing on AMD server. This patch reduces the world switch latency 
caused by timer_advance_ns feature when the timer mode is not tscdeadline by 
simpling move the check against apic->lapic_timer.expired_tscdeadline much 
earlier.

Signed-off-by: Wanpeng Li <wanpen...@tencent.com>
---
v1 -> v2:
 * move the check against apic->lapic_timer.expired_tscdeadline much earlier 
   instead of reset timer_advance_ns

 arch/x86/kvm/lapic.c   | 11 +++++------
 arch/x86/kvm/svm/svm.c |  4 +---
 arch/x86/kvm/vmx/vmx.c |  4 +---
 3 files changed, 7 insertions(+), 12 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3b32d3b..51ed4f0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1582,9 +1582,6 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic = vcpu->arch.apic;
        u64 guest_tsc, tsc_deadline;
 
-       if (apic->lapic_timer.expired_tscdeadline == 0)
-               return;
-
        tsc_deadline = apic->lapic_timer.expired_tscdeadline;
        apic->lapic_timer.expired_tscdeadline = 0;
        guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
@@ -1599,7 +1596,10 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu 
*vcpu)
 
 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
 {
-       if (lapic_timer_int_injected(vcpu))
+       if (lapic_in_kernel(vcpu) &&
+           vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
+           vcpu->arch.apic->lapic_timer.timer_advance_ns &&
+           lapic_timer_int_injected(vcpu))
                __kvm_wait_lapic_expire(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
@@ -1635,8 +1635,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, 
bool from_timer_fn)
        }
 
        if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
-               if (apic->lapic_timer.timer_advance_ns)
-                       __kvm_wait_lapic_expire(vcpu);
+               kvm_wait_lapic_expire(vcpu);
                kvm_apic_inject_pending_timer_irqs(apic);
                return;
        }
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0194336..19e622a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3456,9 +3456,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu 
*vcpu)
        clgi();
        kvm_load_guest_xsave_state(vcpu);
 
-       if (lapic_in_kernel(vcpu) &&
-               vcpu->arch.apic->lapic_timer.timer_advance_ns)
-               kvm_wait_lapic_expire(vcpu);
+       kvm_wait_lapic_expire(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index a544351..d6e1656 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6800,9 +6800,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
 
-       if (lapic_in_kernel(vcpu) &&
-               vcpu->arch.apic->lapic_timer.timer_advance_ns)
-               kvm_wait_lapic_expire(vcpu);
+       kvm_wait_lapic_expire(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
-- 
2.7.4

Reply via email to