The code can be simplified a little by factoring out the IS_ERR_OR_NULL
check from the platform-specific handle_irq implementations, and by
inlining the remaining call to generic_handle_irq_desc for 64bit
systems.

Signed-off-by: Heiner Kallweit <[email protected]>
---
v2:
- add "likely" to if clause and reorder it
- For 64bit, remove handle_irq and inline call to generic_handle_irq_desc
---
 arch/x86/include/asm/irq.h | 2 +-
 arch/x86/kernel/irq.c      | 9 +++++++--
 arch/x86/kernel/irq_32.c   | 7 +------
 arch/x86/kernel/irq_64.c   | 9 ---------
 4 files changed, 9 insertions(+), 18 deletions(-)

diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 8f95686ec..a176f6165 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -34,7 +34,7 @@ extern __visible void smp_kvm_posted_intr_nested_ipi(struct 
pt_regs *regs);
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
 
-extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
+extern void handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
 
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 4215653f8..f1c8f350d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -243,8 +243,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs 
*regs)
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
 
        desc = __this_cpu_read(vector_irq[vector]);
-
-       if (!handle_irq(desc, regs)) {
+       if (likely(!IS_ERR_OR_NULL(desc))) {
+#ifdef CONFIG_X86_32
+               handle_irq(desc, regs);
+#else
+               generic_handle_irq_desc(desc);
+#endif
+       } else {
                ack_APIC_irq();
 
                if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index fc34816c6..a759ca97c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -148,18 +148,13 @@ void do_softirq_own_stack(void)
        call_on_stack(__do_softirq, isp);
 }
 
-bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+void handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
        int overflow = check_stack_overflow();
 
-       if (IS_ERR_OR_NULL(desc))
-               return false;
-
        if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
                if (unlikely(overflow))
                        print_stack_overflow();
                generic_handle_irq_desc(desc);
        }
-
-       return true;
 }
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 6bf6517a0..12df3a4ab 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -26,15 +26,6 @@
 DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) 
__visible;
 DECLARE_INIT_PER_CPU(irq_stack_backing_store);
 
-bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
-{
-       if (IS_ERR_OR_NULL(desc))
-               return false;
-
-       generic_handle_irq_desc(desc);
-       return true;
-}
-
 #ifdef CONFIG_VMAP_STACK
 /*
  * VMAP the backing store with guard pages
-- 
2.22.1


Reply via email to