call_do_irq() and call_do_softirq() are simple enough to be
worth inlining.

Inlining them avoids an mflr/mtlr pair plus a save/reload on stack.
It also allows GCC to keep the saved ksp_limit in an nonvolatile reg.

This is inspired from S390 arch. Several other arches do more or
less the same. The way sparc arch does seems odd thought.

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
Reviewed-by: Segher Boessenkool <seg...@kernel.crashing.org>

---
v2: no change.
v3: no change.
v4:
- comment reminding the purpose of the inline asm block.
- added r2 as clobbered reg
v5:
- Limiting the change to PPC32 for now.
- removed r2 from the clobbered regs list (on PPC32 r2 points to current all 
the time)
- Removed patch 1 and merged ksp_limit handling in here.
v6:
- rebased after removal of ksp_limit
---
 arch/powerpc/include/asm/irq.h |  2 ++
 arch/powerpc/kernel/irq.c      | 34 ++++++++++++++++++++++++++++++++++
 arch/powerpc/kernel/misc_32.S  | 25 -------------------------
 3 files changed, 36 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 814dfab7e392..e4a92f0b4ad4 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -56,8 +56,10 @@ extern void *mcheckirq_ctx[NR_CPUS];
 extern void *hardirq_ctx[NR_CPUS];
 extern void *softirq_ctx[NR_CPUS];
 
+#ifdef CONFIG_PPC64
 void call_do_softirq(void *sp);
 void call_do_irq(struct pt_regs *regs, void *sp);
+#endif
 extern void do_IRQ(struct pt_regs *regs);
 extern void __init init_IRQ(void);
 extern void __do_irq(struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index add67498c126..4690e5270806 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -611,6 +611,40 @@ static inline void check_stack_overflow(void)
 #endif
 }
 
+#ifdef CONFIG_PPC32
+static inline void call_do_softirq(const void *sp)
+{
+       register unsigned long ret asm("r3");
+
+       /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
+       asm volatile(
+               "       "PPC_STLU"      1, %2(%1);\n"
+               "       mr              1, %1;\n"
+               "       bl              %3;\n"
+               "       "PPC_LL"        1, 0(1);\n" :
+               "=r"(ret) :
+               "b"(sp), "i"(THREAD_SIZE - STACK_FRAME_OVERHEAD), 
"i"(__do_softirq) :
+               "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", "cr7",
+               "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+}
+
+static inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+       register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+       /* Temporarily switch r1 to sp, call __do_irq() then restore r1 */
+       asm volatile(
+               "       "PPC_STLU"      1, %2(%1);\n"
+               "       mr              1, %1;\n"
+               "       bl              %3;\n"
+               "       "PPC_LL"        1, 0(1);\n" :
+               "+r"(r3) :
+               "b"(sp), "i"(THREAD_SIZE - STACK_FRAME_OVERHEAD), "i"(__do_irq) 
:
+               "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", "cr7",
+               "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12");
+}
+#endif
+
 void __do_irq(struct pt_regs *regs)
 {
        unsigned int irq;
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index bb5995fa6884..341a3cd199cb 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -27,31 +27,6 @@
 
        .text
 
-_GLOBAL(call_do_softirq)
-       mflr    r0
-       stw     r0,4(r1)
-       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
-       mr      r1,r3
-       bl      __do_softirq
-       lwz     r1,0(r1)
-       lwz     r0,4(r1)
-       mtlr    r0
-       blr
-
-/*
- * void call_do_irq(struct pt_regs *regs, void *sp);
- */
-_GLOBAL(call_do_irq)
-       mflr    r0
-       stw     r0,4(r1)
-       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
-       mr      r1,r4
-       bl      __do_irq
-       lwz     r1,0(r1)
-       lwz     r0,4(r1)
-       mtlr    r0
-       blr
-
 /*
  * This returns the high 64 bits of the product of two 64-bit numbers.
  */
-- 
2.13.3

Reply via email to