The scv instruction causes an interrupt which can enter the kernel with
MSR[EE]=1, thus allowing interrupts to hit at any time. These must not
be taken as normal interrupts, because they come from MSR[PR]=0 context,
and yet the kernel stack is not yet set up and r13 is not set to the
PACA).

Treat this as a soft-masked interrupt regardless of the soft masked
state. This does not affect behaviour yet, because currently all
interrupts are taken with MSR[EE]=0.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kernel/exceptions-64s.S | 34 +++++++++++++++++++++++++---
 1 file changed, 31 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/exceptions-64s.S 
b/arch/powerpc/kernel/exceptions-64s.S
index 1bccc869ebd3..07e00f690730 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -492,10 +492,33 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
        .endif /* IVIRT */
 .endm
 
+#define LOAD_IMM(reg, expr)                    \
+       lis     reg,(expr)@highest;             \
+       ori     reg,reg,(expr)@higher;          \
+       rldicr  reg,reg,32,31;                  \
+       oris    reg,reg,(expr)@__AS_ATHIGH;     \
+       ori     reg,reg,(expr)@l
+
 .macro __GEN_COMMON_BODY name
        .if IMASK
+               .if ! ISTACK
+               .error "No support for masked interrupt to use custom stack"
+               .endif
+
+               /* If coming from user, skip soft-mask tests. */
+               andi.   r10,r12,MSR_PR
+               bne     2f
+
+               /* Kernel code running below __end_interrupts is implicitly
+                * soft-masked */
+               LOAD_HANDLER(r10, __end_interrupts)
+               cmpd    r11,r10
+               li      r10,IMASK
+               blt-    1f
+
+               /* Test the soft mask state against our interrupt's bit */
                lbz     r10,PACAIRQSOFTMASK(r13)
-               andi.   r10,r10,IMASK
+1:             andi.   r10,r10,IMASK
                /* Associate vector numbers with bits in paca->irq_happened */
                .if IVEC == 0x500 || IVEC == 0xea0
                li      r10,PACA_IRQ_EE
@@ -526,7 +549,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
 
        .if ISTACK
        andi.   r10,r12,MSR_PR          /* See if coming from user      */
-       mr      r10,r1                  /* Save r1                      */
+2:     mr      r10,r1                  /* Save r1                      */
        subi    r1,r1,INT_FRAME_SIZE    /* alloc frame on kernel stack  */
        beq-    100f
        ld      r1,PACAKSAVE(r13)       /* kernel stack to use          */
@@ -2771,7 +2794,8 @@ masked_interrupt:
        ld      r10,PACA_EXGEN+EX_R10(r13)
        ld      r11,PACA_EXGEN+EX_R11(r13)
        ld      r12,PACA_EXGEN+EX_R12(r13)
-       /* returns to kernel where r13 must be set up, so don't restore it */
+       ld      r13,PACA_EXGEN+EX_R13(r13)
+       /* May return to masked low address where r13 is not set up */
        .if \hsrr
        HRFI_TO_KERNEL
        .else
@@ -2930,6 +2954,10 @@ EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
 
 USE_FIXED_SECTION(virt_trampolines)
        /*
+        * All code below __end_interrupts is treated as soft-masked. If
+        * any code runs here with MSR[EE]=1, it must then cope with pending
+        * soft interrupt being raised (i.e., by ensuring it is replayed).
+        *
         * The __end_interrupts marker must be past the out-of-line (OOL)
         * handlers, so that they are copied to real address 0x100 when running
         * a relocatable kernel. This ensures they can be reached from the short
-- 
2.23.0

Reply via email to