Add return value to __kvm_apic_update_irr/kvm_apic_update_irr.
Move vmx_sync_pir_to_irr around.

Signed-off-by: Paolo Bonzini <[email protected]>
---
 arch/x86/kvm/lapic.c | 25 +++++++++++++++++--------
 arch/x86/kvm/lapic.h |  4 ++--
 arch/x86/kvm/vmx.c   | 32 ++++++++++++++++----------------
 3 files changed, 35 insertions(+), 26 deletions(-)

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5fa546e27b7e..4c76c602576e 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -341,7 +341,7 @@ static int find_highest_vector(void *bitmap)
             vec >= 0; vec -= APIC_VECTORS_PER_REG) {
                reg = bitmap + REG_POS(vec);
                if (*reg)
-                       return fls(*reg) - 1 + vec;
+                       return __fls(*reg) + vec;
        }
 
        return -1;
@@ -361,27 +361,36 @@ static u8 count_vectors(void *bitmap)
        return count;
 }
 
-void __kvm_apic_update_irr(u32 *pir, void *regs)
+int __kvm_apic_update_irr(u32 *pir, void *regs)
 {
-       u32 i, pir_val;
+       u32 i, vec;
+       u32 pir_val, irr_val;
+       int max_irr = -1;
 
-       for (i = 0; i <= 7; i++) {
+       for (i = vec = 0; i <= 7; i++, vec += 32) {
                pir_val = READ_ONCE(pir[i]);
+               irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
                if (pir_val) {
-                       pir_val = xchg(&pir[i], 0);
-                       *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
+                       irr_val |= xchg(&pir[i], 0);
+                       *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
                }
+               if (irr_val)
+                       max_irr = __fls(irr_val) + vec;
        }
+
+       return max_irr;
 }
 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
 
-void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
+       int max_irr;
 
-       __kvm_apic_update_irr(pir, apic->regs);
+       max_irr = __kvm_apic_update_irr(pir, apic->regs);
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
+       return max_irr;
 }
 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
 
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 5b5b1ba644cb..8aa54fdc43d1 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -71,8 +71,8 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, 
int len,
 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
                           int short_hand, unsigned int dest, int dest_mode);
 
-void __kvm_apic_update_irr(u32 *pir, void *regs);
-void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
+int __kvm_apic_update_irr(u32 *pir, void *regs);
+int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
                     struct dest_map *dest_map);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 661956caf162..eab8ab023705 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5068,22 +5068,6 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu 
*vcpu, int vector)
                kvm_vcpu_kick(vcpu);
 }
 
-static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-       if (!pi_test_on(&vmx->pi_desc))
-               return;
-
-       pi_clear_on(&vmx->pi_desc);
-       /*
-        * IOMMU can write to PIR.ON, so the barrier matters even on UP.
-        * But on x86 this is just a compiler barrier anyway.
-        */
-       smp_mb__after_atomic();
-       kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
-}
-
 /*
  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
  * will not change in the lifetime of the guest.
@@ -8750,6 +8734,22 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, 
int max_irr)
        }
 }
 
+static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!pi_test_on(&vmx->pi_desc))
+               return;
+
+       pi_clear_on(&vmx->pi_desc);
+       /*
+        * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+        * But on x86 this is just a compiler barrier anyway.
+        */
+       smp_mb__after_atomic();
+       kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))
-- 
1.8.3.1


Reply via email to