The Hash MMU already supports XOM (i.e. mmap with PROT_EXEC only) through the execute-only pkey. A PROT_EXEC-only mapping will actually map to RX, and then the pkey will be applied on top of it.
Radix doesn't have pkeys, but it does have execute permissions built-in to the MMU, so all we have to do to support XOM is expose it. That's not possible with protection_map being const, so make it RO after init instead. Signed-off-by: Russell Currey <rus...@russell.cc> --- v2: Make protection_map __ro_after_init and set it in an initcall (v1 didn't work, I tested before rebasing on Anshuman's patches) basic test: https://raw.githubusercontent.com/ruscur/junkcode/main/mmap_test.c arch/powerpc/include/asm/book3s/64/radix.h | 3 +++ arch/powerpc/include/asm/pgtable.h | 1 - arch/powerpc/mm/fault.c | 10 ++++++++++ arch/powerpc/mm/pgtable.c | 16 +++++++++++++++- 4 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 686001eda936..bf316b773d73 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h @@ -19,6 +19,9 @@ #include <asm/cpu_has_feature.h> #endif +/* Execute-only page protections, Hash can use RX + execute-only pkey */ +#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC) + /* An empty PTE can still have a R or C writeback */ #define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 33f4bf8d22b0..3cbb6de20f9d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -60,7 +60,6 @@ extern void paging_init(void); void poking_init(void); extern unsigned long ioremap_bot; -extern const pgprot_t protection_map[16]; /* * kern_addr_valid is intended to indicate whether an address is a valid diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 014005428687..887c0cc45ca6 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -270,6 +270,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma return false; } + if (unlikely(!(vma->vm_flags & VM_READ))) { + /* + * If we're on Radix, then this could be a read attempt on + * execute-only memory. On other MMUs, an "exec-only" page + * will be given RX flags, so this might be redundant. + */ + if (radix_enabled()) + return true; + } + if (unlikely(!vma_is_accessible(vma))) return true; /* diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 0b2bbde5fb65..6e1a6a999c3c 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -475,7 +475,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, EXPORT_SYMBOL_GPL(__find_linux_pte); /* Note due to the way vm flags are laid out, the bits are XWR */ -const pgprot_t protection_map[16] = { +static pgprot_t protection_map[16] __ro_after_init = { [VM_NONE] = PAGE_NONE, [VM_READ] = PAGE_READONLY, [VM_WRITE] = PAGE_COPY, @@ -494,6 +494,20 @@ const pgprot_t protection_map[16] = { [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X }; +#ifdef CONFIG_PPC_RADIX_MMU +static int __init radix_update_protection_map(void) +{ + if (early_radix_enabled()) { + /* Radix directly supports execute-only page protections */ + protection_map[VM_EXEC] = PAGE_EXECONLY; + protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY; + } + + return 0; +} +arch_initcall(radix_update_protection_map); +#endif /* CONFIG_PPC_RADIX_MMU */ + #ifdef CONFIG_PPC_BOOK3S_64 pgprot_t vm_get_page_prot(unsigned long vm_flags) { -- 2.37.1