From: Ard Biesheuvel <[email protected]> Before moving the fixmap PUD/PMD tables into .rodata, update the existing descriptor manipulation code so it will fallback to the fixmap for any descriptor located in the .pgdir_rodata section.
This is slightly more costly, as it evaluates whether or not a descriptor is in the kernel's rodata region at levels PMD and higher for any configuration, rather than only when the level in question is the root level. Signed-off-by: Ard Biesheuvel <[email protected]> --- arch/arm64/include/asm/pgtable.h | 27 ++++++++++---------- arch/arm64/kernel/vmlinux.lds.S | 8 ++++-- arch/arm64/mm/mmu.c | 24 ++++++++--------- 3 files changed, 31 insertions(+), 28 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index a1c5894332d9..94235dd428be 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -816,23 +816,22 @@ extern pgd_t swapper_pg_dir[]; extern pgd_t idmap_pg_dir[]; extern pgd_t tramp_pg_dir[]; extern pgd_t reserved_pg_dir[]; +extern pgd_t __pgdir_rodata_start[], __pgdir_rodata_end[]; -extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); +extern void set_rodata_pte(pte_t *ptep, pte_t pte); -static inline bool in_swapper_pgdir(void *addr) +static inline bool in_pgdir_rodata(void *addr) { - return ((unsigned long)addr & PAGE_MASK) == - ((unsigned long)swapper_pg_dir & PAGE_MASK); + return addr >= (void *)__pgdir_rodata_start && + addr < (void *)__pgdir_rodata_end; } static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { -#ifdef __PAGETABLE_PMD_FOLDED - if (in_swapper_pgdir(pmdp)) { - set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); + if (in_pgdir_rodata(pmdp)) { + set_rodata_pte((pte_t *)pmdp, __pte(pmd_val(pmd))); return; } -#endif /* __PAGETABLE_PMD_FOLDED */ WRITE_ONCE(*pmdp, pmd); @@ -893,8 +892,8 @@ static inline bool pgtable_l4_enabled(void); static inline void set_pud(pud_t *pudp, pud_t pud) { - if (!pgtable_l4_enabled() && in_swapper_pgdir(pudp)) { - set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); + if (in_pgdir_rodata(pudp)) { + set_rodata_pte((pte_t *)pudp, __pte(pud_val(pud))); return; } @@ -974,8 +973,8 @@ static inline bool mm_pud_folded(const struct mm_struct *mm) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { - if (in_swapper_pgdir(p4dp)) { - set_swapper_pgd((pgd_t *)p4dp, __pgd(p4d_val(p4d))); + if (in_pgdir_rodata(p4dp)) { + set_rodata_pte((pte_t *)p4dp, __pte(p4d_val(p4d))); return; } @@ -1102,8 +1101,8 @@ static inline bool mm_p4d_folded(const struct mm_struct *mm) static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { - if (in_swapper_pgdir(pgdp)) { - set_swapper_pgd(pgdp, __pgd(pgd_val(pgd))); + if (in_pgdir_rodata(pgdp)) { + set_rodata_pte((pte_t *)pgdp, __pte(pgd_val(pgd))); return; } diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 2dca18574619..e5e1d0fd7f27 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -243,8 +243,12 @@ SECTIONS reserved_pg_dir = .; . += PAGE_SIZE; - swapper_pg_dir = .; - . += PAGE_SIZE; + .pgdir_rodata : { + __pgdir_rodata_start = .; + swapper_pg_dir = .; + . += PAGE_SIZE; + __pgdir_rodata_end = .; + } . = ALIGN(SEGMENT_ALIGN); __init_begin = .; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index a464f3d2d2df..84d81bae07a7 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -65,34 +65,34 @@ static bool rodata_is_rw __ro_after_init = true; */ long __section(".mmuoff.data.write") __early_cpu_boot_status; -static DEFINE_SPINLOCK(swapper_pgdir_lock); +static DEFINE_SPINLOCK(rodata_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); -void noinstr set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) +void noinstr set_rodata_pte(pte_t *ptep, pte_t pte) { - pgd_t *fixmap_pgdp; + pte_t *fixmap_ptep; /* - * Don't bother with the fixmap if swapper_pg_dir is still mapped - * writable in the kernel mapping. + * Don't bother with the fixmap if rodata is still mapped + * writable in the kernel and linear mappings. */ if (rodata_is_rw) { - WRITE_ONCE(*pgdp, pgd); + WRITE_ONCE(*ptep, pte); dsb(ishst); isb(); return; } - spin_lock(&swapper_pgdir_lock); - fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); - WRITE_ONCE(*fixmap_pgdp, pgd); + spin_lock(&rodata_pgdir_lock); + fixmap_ptep = pte_set_fixmap(__pa_nodebug(ptep)); + WRITE_ONCE(*fixmap_ptep, pte); /* * We need dsb(ishst) here to ensure the page-table-walker sees * our new entry before set_p?d() returns. The fixmap's * flush_tlb_kernel_range() via clear_fixmap() does this for us. */ - pgd_clear_fixmap(); - spin_unlock(&swapper_pgdir_lock); + pte_clear_fixmap(); + spin_unlock(&rodata_pgdir_lock); } pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, @@ -1071,6 +1071,7 @@ void __init mark_linear_text_alias_ro(void) /* * Remove the write permissions from the linear alias of .text/.rodata */ + WRITE_ONCE(rodata_is_rw, false); update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), (unsigned long)__init_begin - (unsigned long)_text, pgprot_tagged(PAGE_KERNEL_RO)); @@ -1221,7 +1222,6 @@ void mark_rodata_ro(void) * to cover NOTES and EXCEPTION_TABLE. */ section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; - WRITE_ONCE(rodata_is_rw, false); update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, section_size, PAGE_KERNEL_RO); /* mark the range between _text and _stext as read only. */ -- 2.54.0.rc2.544.gc7ae2d5bb8-goog

