KASAN requires early activation of hash table, before memblock()
functions are available.

This patch implements an early hash_table statically defined in
__initdata.

During early boot, a single page table is used. For hash32, when doing
the final init, one page table is allocated for each PGD entry because
of the _PAGE_HASHPTE flag which can't be common to several virt pages.

Signed-off-by: Christophe Leroy <christophe.le...@c-s.fr>
---
 arch/powerpc/kernel/head_32.S         | 40 ++++++++++++++++++++++++++---------
 arch/powerpc/mm/kasan/kasan_init_32.c | 32 ++++++++++++++++++++++++----
 arch/powerpc/mm/mmu_decl.h            |  1 +
 3 files changed, 59 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index e644aab2cf5b..65c9e8819da1 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -160,6 +160,10 @@ __after_mmu_off:
        bl      flush_tlbs
 
        bl      initial_bats
+       bl      load_segment_registers
+#ifdef CONFIG_KASAN
+       bl      early_hash_table
+#endif
 #if defined(CONFIG_BOOTX_TEXT)
        bl      setup_disp_bat
 #endif
@@ -205,7 +209,7 @@ __after_mmu_off:
  */
 turn_on_mmu:
        mfmsr   r0
-       ori     r0,r0,MSR_DR|MSR_IR
+       ori     r0,r0,MSR_DR|MSR_IR|MSR_RI
        mtspr   SPRN_SRR1,r0
        lis     r0,start_here@h
        ori     r0,r0,start_here@l
@@ -881,11 +885,24 @@ _ENTRY(__restore_cpu_setup)
        blr
 #endif /* !defined(CONFIG_PPC_BOOK3S_32) */
 
-
 /*
  * Load stuff into the MMU.  Intended to be called with
  * IR=0 and DR=0.
  */
+#ifdef CONFIG_KASAN
+early_hash_table:
+       sync                    /* Force all PTE updates to finish */
+       isync
+       tlbia                   /* Clear all TLB entries */
+       sync                    /* wait for tlbia/tlbie to finish */
+       TLBSYNC                 /* ... on all CPUs */
+       /* Load the SDR1 register (hash table base & size) */
+       lis     r6, early_hash - PAGE_OFFSET@h
+       ori     r6, r6, 3       /* 256kB table */
+       mtspr   SPRN_SDR1, r6
+       blr
+#endif
+
 load_up_mmu:
        sync                    /* Force all PTE updates to finish */
        isync
@@ -897,14 +914,6 @@ load_up_mmu:
        tophys(r6,r6)
        lwz     r6,_SDR1@l(r6)
        mtspr   SPRN_SDR1,r6
-       li      r0,16           /* load up segment register values */
-       mtctr   r0              /* for context 0 */
-       lis     r3,0x2000       /* Ku = 1, VSID = 0 */
-       li      r4,0
-3:     mtsrin  r3,r4
-       addi    r3,r3,0x111     /* increment VSID */
-       addis   r4,r4,0x1000    /* address of next segment */
-       bdnz    3b
 
 /* Load the BAT registers with the values set up by MMU_init.
    MMU_init takes care of whether we're on a 601 or not. */
@@ -926,6 +935,17 @@ BEGIN_MMU_FTR_SECTION
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
        blr
 
+load_segment_registers:
+       li      r0, 16          /* load up segment register values */
+       mtctr   r0              /* for context 0 */
+       lis     r3, 0x2000      /* Ku = 1, VSID = 0 */
+       li      r4, 0
+3:     mtsrin  r3, r4
+       addi    r3, r3, 0x111   /* increment VSID */
+       addis   r4, r4, 0x1000  /* address of next segment */
+       bdnz    3b
+       blr
+
 /*
  * This is where the main kernel code starts.
  */
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c 
b/arch/powerpc/mm/kasan/kasan_init_32.c
index 42f8534ce3ea..8c25c1e8c2c8 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -60,10 +60,13 @@ static int __ref kasan_init_region(void *start, size_t size)
        unsigned long k_cur;
        pmd_t *pmd;
        void *block = NULL;
-       int ret = kasan_init_shadow_page_tables(k_start, k_end);
 
-       if (ret)
-               return ret;
+       if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+               int ret = kasan_init_shadow_page_tables(k_start, k_end);
+
+               if (ret)
+                       return ret;
+       }
 
        if (!slab_is_available())
                block = memblock_alloc(k_end - k_start, PAGE_SIZE);
@@ -94,6 +97,13 @@ void __init kasan_init(void)
        int ret;
        struct memblock_region *reg;
 
+       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+               ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, 
KASAN_SHADOW_END);
+
+               if (ret)
+                       panic("kasan: kasan_init_shadow_page_tables() failed");
+       }
+
        for_each_memblock(memory, reg) {
                phys_addr_t base = reg->base;
                phys_addr_t top = min(base + reg->size, total_lowmem);
@@ -132,6 +142,20 @@ void *module_alloc(unsigned long size)
 }
 #endif
 
+#ifdef CONFIG_PPC_BOOK3S_32
+u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
+
+static void __init kasan_early_hash_table(void)
+{
+       modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) 
>> 16);
+       modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) 
>> 16);
+
+       Hash = (struct hash_pte *)early_hash;
+}
+#else
+static void __init kasan_early_hash_table(void) {}
+#endif
+
 void __init kasan_early_init(void)
 {
        unsigned long addr = KASAN_SHADOW_START;
@@ -149,5 +173,5 @@ void __init kasan_early_init(void)
        } while (pmd++, addr = next, addr != end);
 
        if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
-               WARN(1, "KASAN not supported on hash 6xx");
+               kasan_early_hash_table();
 }
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d726ff776054..31fce3914ddc 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -106,6 +106,7 @@ extern unsigned int rtas_data, rtas_size;
 struct hash_pte;
 extern struct hash_pte *Hash, *Hash_end;
 extern unsigned long Hash_size, Hash_mask;
+extern u8 early_hash[];
 
 #endif /* CONFIG_PPC32 */
 
-- 
2.13.3

Reply via email to