We can kexec into a kernel that doesn't use memory keys for kernel mapping (such as an older kernel which doesn't support kuap/kuep with hash translation). We need to make sure we reset the AMR/IAMR value on kexec otherwise, the new kernel will use key 0 for kernel mapping and the old AMR value prevents access to key 0.
This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR is not needed and the IAMR reset is partial (it doesn't do the reset on secondary cpus) and is redundant with this patch. Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com> --- arch/powerpc/include/asm/book3s/64/kup.h | 20 ++++++++++++++++++++ arch/powerpc/include/asm/kup.h | 14 ++++++++++++++ arch/powerpc/kernel/misc_64.S | 14 -------------- arch/powerpc/kexec/core_64.c | 3 +++ arch/powerpc/mm/book3s64/pgtable.c | 3 +++ 5 files changed, 40 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h index 5b00592479d1..1cd0d849bd1b 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -341,6 +341,26 @@ static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, return !!(error_code & DSISR_KEYFAULT); } +#define reset_kuap reset_kuap +static inline void reset_kuap(void) +{ + if (mmu_has_feature(MMU_FTR_KUAP)) { + mtspr(SPRN_AMR, 0); + /* Do we need isync()? We are going via a kexec reset */ + isync(); + } +} + +#define reset_kuep reset_kuep +static inline void reset_kuep(void) +{ + if (mmu_has_feature(MMU_FTR_KUEP)) { + mtspr(SPRN_IAMR, 0); + /* Do we need isync()? We are going via a kexec reset */ + isync(); + } +} + #else /* CONFIG_PPC_MEM_KEYS */ static inline void kuap_restore_user_amr(struct pt_regs *regs) { diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 249eed77a06b..b22becc1705c 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -101,6 +101,20 @@ static inline void prevent_current_access_user(void) prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT); } +#ifndef reset_kuap +#define reset_kuap reset_kuap +static inline void reset_kuap(void) +{ +} +#endif + +#ifndef reset_kuep +#define reset_kuep reset_kuep +static inline void reset_kuep(void) +{ +} +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_KUAP_H_ */ diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 1864605eca29..7bb46ad98207 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -413,20 +413,6 @@ _GLOBAL(kexec_sequence) li r0,0 std r0,16(r1) -BEGIN_FTR_SECTION - /* - * This is the best time to turn AMR/IAMR off. - * key 0 is used in radix for supervisor<->user - * protection, but on hash key 0 is reserved - * ideally we want to enter with a clean state. - * NOTE, we rely on r0 being 0 from above. - */ - mtspr SPRN_IAMR,r0 -BEGIN_FTR_SECTION_NESTED(42) - mtspr SPRN_AMOR,r0 -END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42) -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - /* save regs for local vars on new stack. * yes, we won't go back, but ... */ diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c index b4184092172a..a124715f33ea 100644 --- a/arch/powerpc/kexec/core_64.c +++ b/arch/powerpc/kexec/core_64.c @@ -152,6 +152,9 @@ static void kexec_smp_down(void *arg) if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0, 1); + reset_kuap(); + reset_kuep(); + kexec_smp_wait(); /* NOTREACHED */ } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index e0bb69c616e4..cf3d65067d48 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -168,6 +168,9 @@ void mmu_cleanup_all(void) radix__mmu_cleanup_all(); else if (mmu_hash_ops.hpte_clear_all) mmu_hash_ops.hpte_clear_all(); + + reset_kuap(); + reset_kuep(); } #ifdef CONFIG_MEMORY_HOTPLUG -- 2.26.2