For reasons unknown, the x86_64 irq stack starts at an offset 64 bytes
from the end of the page.  At least make that explicit.

FIXME: Can we just remove the 64 byte gap?  If not, at least document
why.

Signed-off-by: Josh Poimboeuf <jpoim...@redhat.com>
---
 arch/x86/include/asm/page_64_types.h | 19 +++++++++++--------
 arch/x86/kernel/cpu/common.c         |  2 +-
 arch/x86/kernel/dumpstack_64.c       |  8 +++-----
 arch/x86/kernel/setup_percpu.c       |  2 +-
 4 files changed, 16 insertions(+), 15 deletions(-)

diff --git a/arch/x86/include/asm/page_64_types.h 
b/arch/x86/include/asm/page_64_types.h
index 9215e05..6256baf 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -12,17 +12,20 @@
 #endif
 
 #define THREAD_SIZE_ORDER      (2 + KASAN_STACK_ORDER)
-#define THREAD_SIZE  (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE - 1))
+#define THREAD_SIZE            (PAGE_SIZE << THREAD_SIZE_ORDER)
+#define CURRENT_MASK           (~(THREAD_SIZE - 1))
 
-#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
-#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+#define EXCEPTION_STACK_ORDER  (0 + KASAN_STACK_ORDER)
+#define EXCEPTION_STKSZ                (PAGE_SIZE << EXCEPTION_STACK_ORDER)
 
-#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+#define DEBUG_STACK_ORDER      (EXCEPTION_STACK_ORDER + 1)
+#define DEBUG_STKSZ            (PAGE_SIZE << DEBUG_STACK_ORDER)
 
-#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
-#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+#define IRQ_STACK_ORDER                (2 + KASAN_STACK_ORDER)
+#define IRQ_STACK_SIZE         (PAGE_SIZE << IRQ_STACK_ORDER)
+
+/* FIXME: why? */
+#define IRQ_USABLE_STACK_SIZE  (IRQ_STACK_SIZE - 64)
 
 #define DOUBLEFAULT_STACK 1
 #define NMI_STACK 2
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 809eda0..8f3f7a4 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1281,7 +1281,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) 
____cacheline_aligned =
 EXPORT_PER_CPU_SYMBOL(current_task);
 
 DEFINE_PER_CPU(char *, irq_stack_ptr) =
-       init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
+       init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_USABLE_STACK_SIZE;
 
 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 55cc88f..6a2d14e 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -103,9 +103,6 @@ in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
        return (stack >= irq_stack && stack < irq_stack_end);
 }
 
-static const unsigned long irq_stack_size =
-       (IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
-
 enum stack_type {
        STACK_IS_UNKNOWN,
        STACK_IS_NORMAL,
@@ -133,7 +130,7 @@ analyze_stack(int cpu, struct task_struct *task, unsigned 
long *stack,
                return STACK_IS_NORMAL;
 
        *stack_end = irq_stack;
-       irq_stack = irq_stack - irq_stack_size;
+       irq_stack -= (IRQ_USABLE_STACK_SIZE / sizeof(long));
 
        if (in_irq_stack(stack, irq_stack, *stack_end))
                return STACK_IS_IRQ;
@@ -246,7 +243,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs 
*regs,
        cpu = smp_processor_id();
 
        irq_stack_end   = (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
-       irq_stack       = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - 
IRQ_STACK_SIZE);
+       irq_stack       = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) -
+                         IRQ_USABLE_STACK_SIZE);
 
        sp = sp ? : get_stack_pointer(task, regs);
 
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e4fcb87..043454f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -244,7 +244,7 @@ void __init setup_per_cpu_areas(void)
 #ifdef CONFIG_X86_64
                per_cpu(irq_stack_ptr, cpu) =
                        per_cpu(irq_stack_union.irq_stack, cpu) +
-                       IRQ_STACK_SIZE - 64;
+                       IRQ_USABLE_STACK_SIZE;
 #endif
 #ifdef CONFIG_NUMA
                per_cpu(x86_cpu_to_node_map, cpu) =
-- 
2.7.4

Reply via email to