From: Thomas Gleixner <t...@linutronix.de>

The recursion protection for hard interrupt stacks is an unsigned int per
CPU variable initialized to -1 named __irq_count. 

The irq stack switching is only done when the variable is -1, which creates
worse code than just checking for 0. When the stack switching happens it
uses this_cpu_add/sub(1), but there is no reason to do so. It simply can
use straight writes. This is a historical leftover from the low level ASM
code which used inc and jz to make a decision.

Rename it to hardirq_stack_inuse, make it a bool and use plain stores.

Signed-off-by: Thomas Gleixner <t...@linutronix.de>
Reviewed-by: Kees Cook <keesc...@chromium.org>

---
 arch/x86/include/asm/irq_stack.h |   14 +++++++-------
 arch/x86/include/asm/processor.h |    2 +-
 arch/x86/kernel/cpu/common.c     |    2 +-
 arch/x86/kernel/process_64.c     |    2 +-
 4 files changed, 10 insertions(+), 10 deletions(-)

--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -9,7 +9,7 @@
 #ifdef CONFIG_X86_64
 static __always_inline bool irqstack_active(void)
 {
-       return __this_cpu_read(irq_count) != -1;
+       return __this_cpu_read(hardirq_stack_inuse);
 }
 
 void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
@@ -22,9 +22,9 @@ static __always_inline void __run_on_irq
 {
        void *tos = __this_cpu_read(hardirq_stack_ptr);
 
-       __this_cpu_add(irq_count, 1);
+       __this_cpu_write(hardirq_stack_inuse, true);
        asm_call_on_stack(tos - 8, func, NULL);
-       __this_cpu_sub(irq_count, 1);
+       __this_cpu_write(hardirq_stack_inuse, false);
 }
 
 static __always_inline void
@@ -33,9 +33,9 @@ static __always_inline void
 {
        void *tos = __this_cpu_read(hardirq_stack_ptr);
 
-       __this_cpu_add(irq_count, 1);
+       __this_cpu_write(hardirq_stack_inuse, true);
        asm_call_sysvec_on_stack(tos - 8, func, regs);
-       __this_cpu_sub(irq_count, 1);
+       __this_cpu_write(hardirq_stack_inuse, false);
 }
 
 static __always_inline void
@@ -44,9 +44,9 @@ static __always_inline void
 {
        void *tos = __this_cpu_read(hardirq_stack_ptr);
 
-       __this_cpu_add(irq_count, 1);
+       __this_cpu_write(hardirq_stack_inuse, true);
        asm_call_irq_on_stack(tos - 8, func, desc);
-       __this_cpu_sub(irq_count, 1);
+       __this_cpu_write(hardirq_stack_inuse, false);
 }
 
 #else /* CONFIG_X86_64 */
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -454,7 +454,7 @@ static inline unsigned long cpu_kernelmo
        return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
 }
 
-DECLARE_PER_CPU(unsigned int, irq_count);
+DECLARE_PER_CPU(bool, hardirq_stack_inuse);
 extern asmlinkage void ignore_sysret(void);
 
 /* Save actual FS/GS selectors and bases to current->thread */
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1740,7 +1740,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
 EXPORT_PER_CPU_SYMBOL(current_task);
 
 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
-DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
+DEFINE_PER_CPU(bool, hardirq_stack_inuse);
 
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -539,7 +539,7 @@ void compat_start_thread(struct pt_regs
        int cpu = smp_processor_id();
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
-                    this_cpu_read(irq_count) != -1);
+                    this_cpu_read(hardirq_stack_inuse));
 
        if (!test_thread_flag(TIF_NEED_FPU_LOAD))
                switch_fpu_prepare(prev_fpu, cpu);


Reply via email to