From: David Feng <feng...@phytium.com.cn> 1. Fix a bug of mmu_setup that variable i and j should be type long instead of int. 2. Give mmu_setup weak attribute so that other implementation can redfine it. 3. A more common set_pgtable_section(). 4. Give device memory PXN and UXN attribute. CPU may speculatively prefetch instructions from device memory, but the IO subsystem of some implementation may not support this operation. The two attributes prevent this behavior.
Signed-off-by: David Feng <feng...@phytium.com.cn> --- arch/arm/cpu/armv8/cache_v8.c | 73 +++++++++++++++++++++++------------- arch/arm/cpu/armv8/fsl-lsch3/cpu.c | 22 +++++------ arch/arm/include/asm/armv8/mmu.h | 71 +++++++++++++++++++---------------- 3 files changed, 95 insertions(+), 71 deletions(-) diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index e4415e8..023dd23 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -22,55 +22,74 @@ __weak void outer_cache_flush_range(unsigned long start, unsigned long end) {} __weak void outer_cache_inval_range(unsigned long start, unsigned long end) {} #ifndef CONFIG_SYS_DCACHE_OFF -void set_pgtable_section(u64 *page_table, u64 index, u64 section, - u64 memory_type) -{ - u64 value; - value = section | PMD_TYPE_SECT | PMD_SECT_AF; - value |= PMD_ATTRINDX(memory_type); - page_table[index] = value; +/* 42 bits virtual address */ +#define VA_BITS (42) + +/* 42 bits physical address */ +#define TCR_EL1_IPS_BITS TCR_EL1_IPS_BITS_42 +#define TCR_EL2_IPS_BITS TCR_EL2_IPS_BITS_42 +#define TCR_EL3_IPS_BITS TCR_EL3_IPS_BITS_42 + +/* PTWs cacheable, inner/outer WBWA and inner-shareable */ +#define TCR_FLAGS (TCR_TG0_64K | \ + TCR_SHARED_INNER | \ + TCR_ORGN_WBWA | \ + TCR_IRGN_WBWA | \ + TCR_T0SZ(VA_BITS)) + +void set_pgtable_section(u64 *page_table, u64 index, u64 output_address, + u64 memory_type, u64 extra_flags) +{ + page_table[index] = output_address | PMD_TYPE_SECT | PMD_SECT_AF | + PMD_SECT_SHARED_INNER | PMD_ATTRINDX(memory_type) | + extra_flags; } /* to activate the MMU we need to set up virtual memory */ -static void mmu_setup(void) +void __weak mmu_setup(void) { - int i, j, el; + int el; + u64 i, j, tcr; bd_t *bd = gd->bd; u64 *page_table = (u64 *)gd->arch.tlb_addr; /* Setup an identity-mapping for all spaces */ for (i = 0; i < (PGTABLE_SIZE >> 3); i++) { set_pgtable_section(page_table, i, i << SECTION_SHIFT, - MT_DEVICE_NGNRNE); + MT_DEVICE_NGNRNE, + PMD_SECT_PXN | PMD_SECT_UXN); + } - /* Setup an identity-mapping for all RAM space */ + /* + * Setup an identity-mapping for all RAM space. + * The secure or non-secure memory access make sense + * when u-boot running at el3 and is mostly implementation + * specific. + */ for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { - ulong start = bd->bi_dram[i].start; - ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size; + u64 start = bd->bi_dram[i].start; + u64 end = bd->bi_dram[i].start + bd->bi_dram[i].size; for (j = start >> SECTION_SHIFT; j < end >> SECTION_SHIFT; j++) { set_pgtable_section(page_table, j, j << SECTION_SHIFT, - MT_NORMAL); + MT_NORMAL, PMD_SECT_NS); } } /* load TTBR0 */ + tcr = TCR_FLAGS; el = current_el(); - if (el == 1) { - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - TCR_FLAGS | TCR_EL1_IPS_BITS, - MEMORY_ATTRIBUTES); - } else if (el == 2) { - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - TCR_FLAGS | TCR_EL2_IPS_BITS, - MEMORY_ATTRIBUTES); - } else { - set_ttbr_tcr_mair(el, gd->arch.tlb_addr, - TCR_FLAGS | TCR_EL3_IPS_BITS, - MEMORY_ATTRIBUTES); - } + if (el == 1) + tcr |= TCR_EL1_IPS_BITS; + else if (el == 2) + tcr |= TCR_EL2_IPS_BITS; + else + tcr |= TCR_EL3_IPS_BITS; + + set_ttbr_tcr_mair(el, gd->arch.tlb_addr, tcr, MEMORY_ATTRIBUTES); + /* enable the mmu */ set_sctlr(get_sctlr() | CR_M); } diff --git a/arch/arm/cpu/armv8/fsl-lsch3/cpu.c b/arch/arm/cpu/armv8/fsl-lsch3/cpu.c index 1f36f7e..bcd2cf2 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/cpu.c +++ b/arch/arm/cpu/armv8/fsl-lsch3/cpu.c @@ -82,11 +82,11 @@ static inline void early_mmu_setup(void) section_l2 = 0; for (i = 0; i < 512; i++) { set_pgtable_section(level1_table_0, i, section_l1t0, - MT_DEVICE_NGNRNE); + MT_DEVICE_NGNRNE, 0); set_pgtable_section(level1_table_1, i, section_l1t1, - MT_NORMAL); + MT_NORMAL, 0); set_pgtable_section(level2_table, i, section_l2, - MT_DEVICE_NGNRNE); + MT_DEVICE_NGNRNE, 0); section_l1t0 += BLOCK_SIZE_L1; section_l1t1 += BLOCK_SIZE_L1; section_l2 += BLOCK_SIZE_L2; @@ -108,13 +108,13 @@ static inline void early_mmu_setup(void) set_pgtable_section(level2_table, CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2, CONFIG_SYS_FSL_OCRAM_BASE, - MT_NORMAL); + MT_NORMAL, 0); for (i = CONFIG_SYS_IFC_BASE >> SECTION_SHIFT_L2; i < (CONFIG_SYS_IFC_BASE + CONFIG_SYS_IFC_SIZE) >> SECTION_SHIFT_L2; i++) { section_l2 = i << SECTION_SHIFT_L2; set_pgtable_section(level2_table, i, - section_l2, MT_NORMAL); + section_l2, MT_NORMAL, 0); } el = current_el(); @@ -154,11 +154,11 @@ static inline void final_mmu_setup(void) section_l2 = 0; for (i = 0; i < 512; i++) { set_pgtable_section(level1_table_0, i, section_l1t0, - MT_DEVICE_NGNRNE); + MT_DEVICE_NGNRNE, 0); set_pgtable_section(level1_table_1, i, section_l1t1, - MT_NORMAL); + MT_NORMAL, 0); set_pgtable_section(level2_table_0, i, section_l2, - MT_DEVICE_NGNRNE); + MT_DEVICE_NGNRNE, 0); section_l1t0 += BLOCK_SIZE_L1; section_l1t1 += BLOCK_SIZE_L1; section_l2 += BLOCK_SIZE_L2; @@ -177,7 +177,7 @@ static inline void final_mmu_setup(void) set_pgtable_section(level2_table_0, CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2, CONFIG_SYS_FSL_OCRAM_BASE, - MT_NORMAL); + MT_NORMAL, 0); /* * Fill in other part of tables if cache is needed @@ -190,7 +190,7 @@ static inline void final_mmu_setup(void) section_l2 = section_base; for (i = 0; i < 512; i++) { set_pgtable_section(level2_table_1, i, section_l2, - MT_DEVICE_NGNRNE); + MT_DEVICE_NGNRNE, 0); section_l2 += BLOCK_SIZE_L2; } tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1); @@ -200,7 +200,7 @@ static inline void final_mmu_setup(void) i < tbl_limit >> SECTION_SHIFT_L2; i++) { section_l2 = section_base + (i << SECTION_SHIFT_L2); set_pgtable_section(level2_table_1, i, - section_l2, MT_NORMAL); + section_l2, MT_NORMAL, 0); } /* flush new MMU table */ diff --git a/arch/arm/include/asm/armv8/mmu.h b/arch/arm/include/asm/armv8/mmu.h index 4b7b67b..2ae5ab6 100644 --- a/arch/arm/include/asm/armv8/mmu.h +++ b/arch/arm/include/asm/armv8/mmu.h @@ -16,27 +16,6 @@ #define UL(x) _AC(x, UL) -/***************************************************************/ -/* - * The following definitions are related each other, shoud be - * calculated specifically. - */ -#define VA_BITS (42) /* 42 bits virtual address */ - -/* PAGE_SHIFT determines the page size */ -#undef PAGE_SIZE -#define PAGE_SHIFT 16 -#define PAGE_SIZE (1 << PAGE_SHIFT) -#define PAGE_MASK (~(PAGE_SIZE-1)) - -/* - * section address mask and size definitions. - */ -#define SECTION_SHIFT 29 -#define SECTION_SIZE (UL(1) << SECTION_SHIFT) -#define SECTION_MASK (~(SECTION_SIZE-1)) -/***************************************************************/ - /* * Memory types */ @@ -65,7 +44,10 @@ /* * Section */ -#define PMD_SECT_S (3 << 8) +#define PMD_SECT_NS (1 << 5) +#define PMD_SECT_SHARED_NON (0 << 8) +#define PMD_SECT_SHARED_OUTER (2 << 8) +#define PMD_SECT_SHARED_INNER (3 << 8) #define PMD_SECT_AF (1 << 10) #define PMD_SECT_NG (1 << 11) #define PMD_SECT_PXN (UL(1) << 53) @@ -97,20 +79,42 @@ #define TCR_TG0_4K (0 << 14) #define TCR_TG0_64K (1 << 14) #define TCR_TG0_16K (2 << 14) -#define TCR_EL1_IPS_BITS (UL(3) << 32) /* 42 bits physical address */ -#define TCR_EL2_IPS_BITS (3 << 16) /* 42 bits physical address */ -#define TCR_EL3_IPS_BITS (3 << 16) /* 42 bits physical address */ +#define TCR_EL1_IPS_BITS_32 (UL(0) << 32) /* 32 bits physical address */ +#define TCR_EL1_IPS_BITS_36 (UL(1) << 32) /* 36 bits physical address */ +#define TCR_EL1_IPS_BITS_40 (UL(2) << 32) /* 40 bits physical address */ +#define TCR_EL1_IPS_BITS_42 (UL(3) << 32) /* 42 bits physical address */ +#define TCR_EL1_IPS_BITS_44 (UL(4) << 32) /* 44 bits physical address */ +#define TCR_EL1_IPS_BITS_48 (UL(5) << 32) /* 48 bits physical address */ +#define TCR_EL2_IPS_BITS_32 (0 << 16) /* 32 bits physical address */ +#define TCR_EL2_IPS_BITS_36 (1 << 16) /* 36 bits physical address */ +#define TCR_EL2_IPS_BITS_40 (2 << 16) /* 40 bits physical address */ +#define TCR_EL2_IPS_BITS_42 (3 << 16) /* 42 bits physical address */ +#define TCR_EL2_IPS_BITS_44 (4 << 16) /* 44 bits physical address */ +#define TCR_EL2_IPS_BITS_48 (5 << 16) /* 48 bits physical address */ +#define TCR_EL3_IPS_BITS_32 TCR_EL2_IPS_BITS_32 +#define TCR_EL3_IPS_BITS_36 TCR_EL2_IPS_BITS_36 +#define TCR_EL3_IPS_BITS_40 TCR_EL2_IPS_BITS_40 +#define TCR_EL3_IPS_BITS_42 TCR_EL2_IPS_BITS_42 +#define TCR_EL3_IPS_BITS_44 TCR_EL2_IPS_BITS_44 +#define TCR_EL3_IPS_BITS_48 TCR_EL2_IPS_BITS_48 -/* PTWs cacheable, inner/outer WBWA and non-shareable */ -#define TCR_FLAGS (TCR_TG0_64K | \ - TCR_SHARED_NON | \ - TCR_ORGN_WBWA | \ - TCR_IRGN_WBWA | \ - TCR_T0SZ(VA_BITS)) +/***************************************************************/ +/* PAGE_SHIFT determines the page size */ +#undef PAGE_SIZE +#define PAGE_SHIFT 16 +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +/* Section address mask and size definitions */ +#define SECTION_SHIFT 29 +#define SECTION_SIZE (UL(1) << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) +/***************************************************************/ #ifndef __ASSEMBLY__ -void set_pgtable_section(u64 *page_table, u64 index, - u64 section, u64 memory_type); +void set_pgtable_section(u64 *page_table, u64 index, u64 output_address, + u64 memory_type, u64 extra_flags); + static inline void set_ttbr_tcr_mair(int el, u64 table, u64 tcr, u64 attr) { asm volatile("dsb sy"); @@ -132,4 +136,5 @@ static inline void set_ttbr_tcr_mair(int el, u64 table, u64 tcr, u64 attr) asm volatile("isb"); } #endif + #endif /* _ASM_ARMV8_MMU_H_ */ -- 1.7.9.5 _______________________________________________ U-Boot mailing list U-Boot@lists.denx.de http://lists.denx.de/mailman/listinfo/u-boot