Map linear memory space with 512k and 8M pages whenever
possible.

Three mappings are performed:
- One for kernel text
- One for RO data
- One for the rest

Separating the mappings is done to be able to update the
protection later when using STRICT_KERNEL_RWX.

The ITLB miss handler now need to also handle huge TLBs
unless kernel text in pinned.

Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu>
---
 arch/powerpc/kernel/head_8xx.S |  4 +--
 arch/powerpc/mm/nohash/8xx.c   | 50 +++++++++++++++++++++++++++++++++-
 2 files changed, 51 insertions(+), 3 deletions(-)

diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 9a117b9f0998..abb71fad7d6a 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -224,7 +224,7 @@ InstructionTLBMiss:
 3:
        mtcr    r11
 #endif
-#ifdef CONFIG_HUGETLBFS
+#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
        lwz     r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)        /* Get level 1 
entry */
        mtspr   SPRN_MD_TWC, r11
 #else
@@ -234,7 +234,7 @@ InstructionTLBMiss:
 #endif
        mfspr   r10, SPRN_MD_TWC
        lwz     r10, 0(r10)     /* Get the pte */
-#ifdef CONFIG_HUGETLBFS
+#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
        rlwimi  r11, r10, 32 - 9, _PMD_PAGE_512K
        mtspr   SPRN_MI_TWC, r11
 #endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index f8fff1fa72e3..ec3ef75895d8 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -127,20 +127,68 @@ void __init mmu_mapin_immr(void)
                                    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
 }
 
+static void __init mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+                                      pgprot_t prot, bool new)
+{
+       unsigned long v = PAGE_OFFSET + offset;
+       unsigned long p = offset;
+
+       WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
+
+       for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
+               __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+       for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
+               __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+       for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += 
SZ_512K)
+               __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+
+       if (!new)
+               flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
+}
+
 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 {
+       unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+       unsigned long sinittext = __pa(_sinittext);
+       unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : 
etext8;
+       unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+       WARN_ON(top < einittext8);
+
        mmu_mapin_immr();
 
-       return 0;
+       if (__map_without_ltlbs)
+               return 0;
+
+       mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
+       mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
+       mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
+
+       if (top > SZ_32M)
+               memblock_set_current_limit(top);
+
+       block_mapped_ram = top;
+
+       return top;
 }
 
 void mmu_mark_initmem_nx(void)
 {
+       unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+       unsigned long sinittext = __pa(_sinittext);
+       unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : 
etext8;
+       unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+       mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
+       mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
 }
 
 #ifdef CONFIG_STRICT_KERNEL_RWX
 void mmu_mark_rodata_ro(void)
 {
+       unsigned long sinittext = __pa(_sinittext);
+
+       mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
 }
 #endif
 
-- 
2.25.0

Reply via email to