Re: [PATCH v2 10/12] powerpc/entry32: Blacklist exception entry points for kprobe.

2020-03-31 Thread Naveen N. Rao

Christophe Leroy wrote:

kprobe does not handle events happening in real mode.

As exception entry points are running with MMU disabled,
blacklist them.

The handling of TLF_NAPPING and TLF_SLEEPING is moved before the
CONFIG_TRACE_IRQFLAGS which contains 'reenable_mmu' because from there
kprobe will be possible as the kernel will run with MMU enabled.

Signed-off-by: Christophe Leroy 
Acked-by: Naveen N. Rao 
---
v2: Moved TLF_NAPPING and TLF_SLEEPING handling
---
 arch/powerpc/kernel/entry_32.S | 37 --
 1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 94f78c03cb79..215aa3a6d4f7 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -51,6 +51,7 @@ mcheck_transfer_to_handler:
mfspr   r0,SPRN_DSRR1
stw r0,_DSRR1(r11)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)

.globl  debug_transfer_to_handler
 debug_transfer_to_handler:
@@ -59,6 +60,7 @@ debug_transfer_to_handler:
mfspr   r0,SPRN_CSRR1
stw r0,_CSRR1(r11)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)

.globl  crit_transfer_to_handler
 crit_transfer_to_handler:
@@ -94,6 +96,7 @@ crit_transfer_to_handler:
rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
 #endif

 #ifdef CONFIG_40x
@@ -115,6 +118,7 @@ crit_transfer_to_handler:
rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
 #endif

 /*
@@ -127,6 +131,7 @@ crit_transfer_to_handler:
.globl  transfer_to_handler_full
 transfer_to_handler_full:
SAVE_NVGPRS(r11)
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
/* fall through */

.globl  transfer_to_handler
@@ -227,6 +232,23 @@ transfer_to_handler_cont:
SYNC
RFI /* jump to handler, enable MMU */

+#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
+4: rlwinm  r12,r12,0,~_TLF_NAPPING
+   stw r12,TI_LOCAL_FLAGS(r2)
+   b   power_save_ppc32_restore
+
+7: rlwinm  r12,r12,0,~_TLF_SLEEPING
+   stw r12,TI_LOCAL_FLAGS(r2)
+   lwz r9,_MSR(r11)/* if sleeping, clear MSR.EE */
+   rlwinm  r9,r9,0,~MSR_EE
+   lwz r12,_LINK(r11)  /* and return to address in LR */
+   kuap_restore r11, r2, r3, r4, r5
+   lwz r2, GPR2(r11)
+   b   fast_exception_return
+#endif
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
+


A very minor nit is that the above NOKPROBE annotation actually covers 
the block of code below between the label '1:' till 'reenable_mmu', but 
isn't obvious from the code. Splitting off 'reenable_mmu' would have 
made that clear.


You don't have to fix that though -- a kprobe still won't be allowed 
there and anyone interested should be able to look up this mail chain.



- Naveen



[PATCH v2 10/12] powerpc/entry32: Blacklist exception entry points for kprobe.

2020-03-31 Thread Christophe Leroy
kprobe does not handle events happening in real mode.

As exception entry points are running with MMU disabled,
blacklist them.

The handling of TLF_NAPPING and TLF_SLEEPING is moved before the
CONFIG_TRACE_IRQFLAGS which contains 'reenable_mmu' because from there
kprobe will be possible as the kernel will run with MMU enabled.

Signed-off-by: Christophe Leroy 
Acked-by: Naveen N. Rao 
---
v2: Moved TLF_NAPPING and TLF_SLEEPING handling
---
 arch/powerpc/kernel/entry_32.S | 37 --
 1 file changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 94f78c03cb79..215aa3a6d4f7 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -51,6 +51,7 @@ mcheck_transfer_to_handler:
mfspr   r0,SPRN_DSRR1
stw r0,_DSRR1(r11)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
 
.globl  debug_transfer_to_handler
 debug_transfer_to_handler:
@@ -59,6 +60,7 @@ debug_transfer_to_handler:
mfspr   r0,SPRN_CSRR1
stw r0,_CSRR1(r11)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
 
.globl  crit_transfer_to_handler
 crit_transfer_to_handler:
@@ -94,6 +96,7 @@ crit_transfer_to_handler:
rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
 #endif
 
 #ifdef CONFIG_40x
@@ -115,6 +118,7 @@ crit_transfer_to_handler:
rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
stw r0,KSP_LIMIT(r8)
/* fall through */
+_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
 #endif
 
 /*
@@ -127,6 +131,7 @@ crit_transfer_to_handler:
.globl  transfer_to_handler_full
 transfer_to_handler_full:
SAVE_NVGPRS(r11)
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
/* fall through */
 
.globl  transfer_to_handler
@@ -227,6 +232,23 @@ transfer_to_handler_cont:
SYNC
RFI /* jump to handler, enable MMU */
 
+#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
+4: rlwinm  r12,r12,0,~_TLF_NAPPING
+   stw r12,TI_LOCAL_FLAGS(r2)
+   b   power_save_ppc32_restore
+
+7: rlwinm  r12,r12,0,~_TLF_SLEEPING
+   stw r12,TI_LOCAL_FLAGS(r2)
+   lwz r9,_MSR(r11)/* if sleeping, clear MSR.EE */
+   rlwinm  r9,r9,0,~MSR_EE
+   lwz r12,_LINK(r11)  /* and return to address in LR */
+   kuap_restore r11, r2, r3, r4, r5
+   lwz r2, GPR2(r11)
+   b   fast_exception_return
+#endif
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
+_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
+
 #ifdef CONFIG_TRACE_IRQFLAGS
 1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
 * keep interrupts disabled at this point otherwise we might risk
@@ -272,21 +294,6 @@ reenable_mmu:
bctr/* jump to handler */
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
-#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
-4: rlwinm  r12,r12,0,~_TLF_NAPPING
-   stw r12,TI_LOCAL_FLAGS(r2)
-   b   power_save_ppc32_restore
-
-7: rlwinm  r12,r12,0,~_TLF_SLEEPING
-   stw r12,TI_LOCAL_FLAGS(r2)
-   lwz r9,_MSR(r11)/* if sleeping, clear MSR.EE */
-   rlwinm  r9,r9,0,~MSR_EE
-   lwz r12,_LINK(r11)  /* and return to address in LR */
-   kuap_restore r11, r2, r3, r4, r5
-   lwz r2, GPR2(r11)
-   b   fast_exception_return
-#endif
-
 #ifndef CONFIG_VMAP_STACK
 /*
  * On kernel stack overflow, load up an initial stack pointer
-- 
2.25.0