Add PBL-specific memory remapping function that always uses page-wise mapping (ARCH_MAP_FLAG_PAGEWISE) for fine-grained permissions on adjacent ELF segments with different protection requirements.
Wraps arch-specific __arch_remap_range() for ARMv7 (4KB pages) and ARMv8 (page tables with BBM). Needed for ELF segment permission setup. Signed-off-by: Sascha Hauer <[email protected]> --- arch/arm/cpu/mmu_32.c | 7 +++++++ arch/arm/cpu/mmu_64.c | 8 ++++++++ include/mmu.h | 3 +++ 3 files changed, 18 insertions(+) diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c index 97c7107290ce95ddb21a322a5d0e74f3d324c528..86a55d165ba3cec5154c345a1a3a9cb959f0996f 100644 --- a/arch/arm/cpu/mmu_32.c +++ b/arch/arm/cpu/mmu_32.c @@ -435,6 +435,13 @@ static void early_remap_range(u32 addr, size_t size, maptype_t map_type) __arch_remap_range((void *)addr, addr, size, map_type); } +void pbl_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, + maptype_t map_type) +{ + __arch_remap_range(virt_addr, phys_addr, size, + map_type | ARCH_MAP_FLAG_PAGEWISE); +} + static bool pte_is_cacheable(uint32_t pte, int level) { return (level == 2 && (pte & PTE_CACHEABLE)) || diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c index afb3d2d7efd0bc7ecde1177d1544f54d751b5dc1..63faaa46703697e527eae766392e7ea7ae186b3d 100644 --- a/arch/arm/cpu/mmu_64.c +++ b/arch/arm/cpu/mmu_64.c @@ -282,6 +282,14 @@ static void early_remap_range(uint64_t addr, size_t size, maptype_t map_type) __arch_remap_range(addr, addr, size, map_type, false); } +void pbl_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, + maptype_t map_type) +{ + __arch_remap_range((uint64_t)virt_addr, phys_addr, + (uint64_t)size, map_type | ARCH_MAP_FLAG_PAGEWISE, + true); +} + int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptype_t map_type) { map_type = arm_mmu_maybe_skip_permissions(map_type); diff --git a/include/mmu.h b/include/mmu.h index 53603b7956c229b4c715c57b19d0398931eb2d6b..37df7b482e1d83e94e61997db6cf9834d8cf7f3c 100644 --- a/include/mmu.h +++ b/include/mmu.h @@ -64,6 +64,9 @@ static inline bool arch_can_remap(void) } #endif +void pbl_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, + maptype_t map_type); + static inline int remap_range(void *start, size_t size, maptype_t map_type) { return arch_remap_range(start, virt_to_phys(start), size, map_type); -- 2.47.3
