On 40x and 8xx, kernel text is pinned.
On book3s/32, kernel text is mapped by BATs.

Enable instruction translation at the same time as data translation, it
makes things simpler.

MSR_RI can also be set at the same time because srr0/srr1 are already
saved and r1 is set properly.

On booke, translation is always on, so at the end all PPC32
have translation on early.

This reduces null_syscall benchmark by 13 cycles on 8xx
(296 ==> 283 cycles).

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/kernel/head_32.h    | 26 +++++++++-----------------
 arch/powerpc/kernel/head_booke.h |  7 ++-----
 2 files changed, 11 insertions(+), 22 deletions(-)

diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index fdc07beab844..4029c51dce5d 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -125,9 +125,13 @@
        lwz     r1,TASK_STACK-THREAD(r12)
        beq-    99f
        addi    r1, r1, THREAD_SIZE - INT_FRAME_SIZE
-       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR | MSR_RI)) /* can take 
DTLB miss */
-       mtmsr   r10
-       isync
+       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)             /* can take exceptions 
*/
+       mtspr   SPRN_SRR1, r10
+       lis     r10, 1f@h
+       ori     r10, r10, 1f@l
+       mtspr   SPRN_SRR0, r10
+       rfi
+1:
        tovirt(r12, r12)
        stw     r11,GPR1(r1)
        stw     r11,0(r1)
@@ -141,9 +145,6 @@
        stw     r10,_CCR(r11)           /* save registers */
 #ifdef CONFIG_40x
        rlwinm  r9,r9,0,14,12           /* clear MSR_WE (necessary?) */
-#else
-       LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
-       mtmsr   r10                     /* (except for mach check in rtas) */
 #endif
        lis     r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
        stw     r2,GPR2(r11)
@@ -180,8 +181,6 @@
 #endif
 
 3:
-       lis     r11, transfer_to_syscall@h
-       ori     r11, r11, transfer_to_syscall@l
 #ifdef CONFIG_TRACE_IRQFLAGS
        /*
         * If MSR is changing we need to keep interrupts disabled at this point
@@ -193,15 +192,8 @@
 #else
        LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
 #endif
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
-       mtspr   SPRN_NRI, r0
-#endif
-       mtspr   SPRN_SRR1,r10
-       mtspr   SPRN_SRR0,r11
-       rfi                             /* jump to handler, enable MMU */
-#ifdef CONFIG_40x
-       b .     /* Prevent prefetch past rfi */
-#endif
+       mtmsr   r10
+       b       transfer_to_syscall             /* jump to handler */
 99:    b       ret_from_kernel_syscall
 .endm
 
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 706cd9368992..b3c502c503a0 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -157,8 +157,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
        stw     r12,4(r11)
 
 3:
-       lis     r11, transfer_to_syscall@h
-       ori     r11, r11, transfer_to_syscall@l
 #ifdef CONFIG_TRACE_IRQFLAGS
        /*
         * If MSR is changing we need to keep interrupts disabled at this point
@@ -172,9 +170,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
        lis     r10, (MSR_KERNEL | MSR_EE)@h
        ori     r10, r10, (MSR_KERNEL | MSR_EE)@l
 #endif
-       mtspr   SPRN_SRR1,r10
-       mtspr   SPRN_SRR0,r11
-       rfi                             /* jump to handler, enable MMU */
+       mtmsr   r10
+       b       transfer_to_syscall     /* jump to handler */
 99:    b       ret_from_kernel_syscall
 .endm
 
-- 
2.25.0

Reply via email to