From: Ard Biesheuvel <[email protected]> The fixmap page tables are statically allocated, and are currently mapped read-write both in the kernel mapping as well as its linear alias. Due to lack of randomization of the linear map, these tables will appear at a priori known offsets in the virtual address space when booting without physical randomization, which means that a single kernel write primitive is sufficient for an attacker to map memory of their own choosing with any permissions at a known virtual address in the kernel's address space.
To harden against this, move the fixmap PUD and PMD tables to .pgdir_rodata, so that both their kernel mappings as well as their linear aliases are mapped read-only during ordinary execution. The PTE table needs to remain read-write accessible via the kernel mapping, but its linear alias can be remapped read-only as well. Signed-off-by: Ard Biesheuvel <[email protected]> --- arch/arm64/include/asm/pgtable.h | 6 ++++-- arch/arm64/kernel/vmlinux.lds.S | 1 + arch/arm64/mm/fixmap.c | 5 +++-- arch/arm64/mm/mmu.c | 5 +++++ 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 94235dd428be..21afe923cd71 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -822,8 +822,10 @@ extern void set_rodata_pte(pte_t *ptep, pte_t pte); static inline bool in_pgdir_rodata(void *addr) { - return addr >= (void *)__pgdir_rodata_start && - addr < (void *)__pgdir_rodata_end; + phys_addr_t pa = __pa_nodebug(addr); + + return pa >= __pa_symbol_nodebug(__pgdir_rodata_start) && + pa < __pa_symbol_nodebug(__pgdir_rodata_end); } static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index e5e1d0fd7f27..9b346dd24d1c 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -247,6 +247,7 @@ SECTIONS __pgdir_rodata_start = .; swapper_pg_dir = .; . += PAGE_SIZE; + *(.fixmap_rodata) __pgdir_rodata_end = .; } diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c index b649ea1a46e4..ad6d46e5c23e 100644 --- a/arch/arm64/mm/fixmap.c +++ b/arch/arm64/mm/fixmap.c @@ -32,9 +32,10 @@ static_assert(NR_BM_PMD_TABLES == 1); #define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT) #define __fixmap_bss __section(".fixmap_bss") __aligned(PAGE_SIZE) +#define __fixmap_rodata __section(".fixmap_rodata") __aligned(PAGE_SIZE) static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __fixmap_bss; -static pmd_t bm_pmd[PTRS_PER_PMD] __fixmap_bss __maybe_unused; -static pud_t bm_pud[PTRS_PER_PUD] __fixmap_bss __maybe_unused; +static pmd_t bm_pmd[PTRS_PER_PMD] __fixmap_rodata __maybe_unused; +static pud_t bm_pud[PTRS_PER_PUD] __fixmap_rodata __maybe_unused; static inline pte_t *fixmap_pte(unsigned long addr) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 84d81bae07a7..e76fe5b0c5fe 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1076,6 +1076,11 @@ void __init mark_linear_text_alias_ro(void) (unsigned long)__init_begin - (unsigned long)_text, pgprot_tagged(PAGE_KERNEL_RO)); + /* Map the fixmap PTE table at __fixmap_pgdir_start R/O in linear map too */ + update_mapping_prot(__pa_symbol(__fixmap_pgdir_start), + (unsigned long)lm_alias(__fixmap_pgdir_start), + PAGE_SIZE, pgprot_tagged(PAGE_KERNEL_RO)); + remap_linear_data_alias(true); if (IS_ENABLED(CONFIG_HIBERNATION)) { -- 2.54.0.rc2.544.gc7ae2d5bb8-goog

