From: Sean Christopherson <sean.j.christopher...@intel.com>

Add an option to skip the IRR check in kvm_wait_lapic_expire().  This
will be used by TDX to wait if there is an outstanding notification for
a TD, i.e. a virtual interrupt is being triggered via posted interrupt
processing.  KVM TDX doesn't emulate PI processing, i.e. there will
never be a bit set in IRR/ISR, so the default behavior for APICv of
querying the IRR doesn't work as intended.

Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/lapic.c   | 6 +++---
 arch/x86/kvm/lapic.h   | 2 +-
 arch/x86/kvm/svm/svm.c | 2 +-
 arch/x86/kvm/vmx/vmx.c | 2 +-
 4 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e6c0aaf4044e..41dce91f5df0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1601,12 +1601,12 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu 
*vcpu)
                adjust_lapic_timer_advance(vcpu, 
apic->lapic_timer.advance_expire_delta);
 }
 
-void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
+void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu, bool force_wait)
 {
        if (lapic_in_kernel(vcpu) &&
            vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
            vcpu->arch.apic->lapic_timer.timer_advance_ns &&
-           lapic_timer_int_injected(vcpu))
+           (force_wait || lapic_timer_int_injected(vcpu)))
                __kvm_wait_lapic_expire(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
@@ -1642,7 +1642,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, 
bool from_timer_fn)
        }
 
        if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
-               kvm_wait_lapic_expire(vcpu);
+               kvm_wait_lapic_expire(vcpu, false);
                kvm_apic_inject_pending_timer_irqs(apic);
                return;
        }
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 4fb86e3a9dd3..30f036678f5c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -237,7 +237,7 @@ static inline int kvm_lapic_latched_init(struct kvm_vcpu 
*vcpu)
 
 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
 
-void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu);
+void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu, bool force_wait);
 
 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
                              unsigned long *vcpu_bitmap);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 15836446a9b8..8be23240c74f 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3580,7 +3580,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu 
*vcpu)
        clgi();
        kvm_load_guest_xsave_state(vcpu);
 
-       kvm_wait_lapic_expire(vcpu);
+       kvm_wait_lapic_expire(vcpu, false);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 3559b51f566d..deeec105e963 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6720,7 +6720,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
 
-       kvm_wait_lapic_expire(vcpu);
+       kvm_wait_lapic_expire(vcpu, false);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
-- 
2.17.1

Reply via email to