Turning the MMU on is a popular sport in the arm64 kernel, and
we do it more than once, or even twice. As we are about to add
even more, let's turn it into a macro.

No expected functional change.

Signed-off-by: Marc Zyngier <m...@kernel.org>
Acked-by: Catalin Marinas <catalin.mari...@arm.com>
Acked-by: David Brazdil <dbraz...@google.com>
---
 arch/arm64/include/asm/assembler.h | 17 +++++++++++++++++
 arch/arm64/kernel/head.S           | 19 ++++---------------
 arch/arm64/mm/proc.S               | 12 +-----------
 3 files changed, 22 insertions(+), 26 deletions(-)

diff --git a/arch/arm64/include/asm/assembler.h 
b/arch/arm64/include/asm/assembler.h
index bf125c591116..8cded93f99c3 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -675,6 +675,23 @@ USER(\label, ic    ivau, \tmp2)                    // 
invalidate I line PoU
        .endif
        .endm
 
+/*
+ * Set SCTLR_EL1 to the passed value, and invalidate the local icache
+ * in the process. This is called when setting the MMU on.
+ */
+.macro set_sctlr_el1, reg
+       msr     sctlr_el1, \reg
+       isb
+       /*
+        * Invalidate the local I-cache so that any instructions fetched
+        * speculatively from the PoC are discarded, since they may have
+        * been dynamically patched at the PoU.
+        */
+       ic      iallu
+       dsb     nsh
+       isb
+.endm
+
 /*
  * Check whether to yield to another runnable task from kernel mode NEON code
  * (which runs with preemption disabled).
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a0dc987724ed..28e9735302df 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -703,16 +703,9 @@ SYM_FUNC_START(__enable_mmu)
        offset_ttbr1 x1, x3
        msr     ttbr1_el1, x1                   // load TTBR1
        isb
-       msr     sctlr_el1, x0
-       isb
-       /*
-        * Invalidate the local I-cache so that any instructions fetched
-        * speculatively from the PoC are discarded, since they may have
-        * been dynamically patched at the PoU.
-        */
-       ic      iallu
-       dsb     nsh
-       isb
+
+       set_sctlr_el1   x0
+
        ret
 SYM_FUNC_END(__enable_mmu)
 
@@ -883,11 +876,7 @@ SYM_FUNC_START_LOCAL(__primary_switch)
        tlbi    vmalle1                         // Remove any stale TLB entries
        dsb     nsh
 
-       msr     sctlr_el1, x19                  // re-enable the MMU
-       isb
-       ic      iallu                           // flush instructions fetched
-       dsb     nsh                             // via old mapping
-       isb
+       set_sctlr_el1   x19                     // re-enable the MMU
 
        bl      __relocate_kernel
 #endif
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index ece785477bdc..c967bfd30d2b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -291,17 +291,7 @@ skip_pgd:
        /* We're done: fire up the MMU again */
        mrs     x17, sctlr_el1
        orr     x17, x17, #SCTLR_ELx_M
-       msr     sctlr_el1, x17
-       isb
-
-       /*
-        * Invalidate the local I-cache so that any instructions fetched
-        * speculatively from the PoC are discarded, since they may have
-        * been dynamically patched at the PoU.
-        */
-       ic      iallu
-       dsb     nsh
-       isb
+       set_sctlr_el1   x17
 
        /* Set the flag to zero to indicate that we're all done */
        str     wzr, [flag_ptr]
-- 
2.29.2

Reply via email to