Re: [PATCH v4 20/21] arm64: Defer enabling pointer authentication on boot core

2021-01-23 Thread Catalin Marinas
On Mon, Jan 18, 2021 at 09:45:32AM +, Marc Zyngier wrote:
> From: Srinivas Ramana 
> 
> Defer enabling pointer authentication on boot core until
> after its required to be enabled by cpufeature framework.
> This will help in controlling the feature dynamically
> with a boot parameter.
> 
> Signed-off-by: Ajay Patil 
> Signed-off-by: Prasad Sodagudi 
> Signed-off-by: Srinivas Ramana 
> Signed-off-by: Marc Zyngier 
> Link: 
> https://lore.kernel.org/r/1610152163-16554-2-git-send-email-sram...@codeaurora.org

Reviewed-by: Catalin Marinas 


[PATCH v4 20/21] arm64: Defer enabling pointer authentication on boot core

2021-01-18 Thread Marc Zyngier
From: Srinivas Ramana 

Defer enabling pointer authentication on boot core until
after its required to be enabled by cpufeature framework.
This will help in controlling the feature dynamically
with a boot parameter.

Signed-off-by: Ajay Patil 
Signed-off-by: Prasad Sodagudi 
Signed-off-by: Srinivas Ramana 
Signed-off-by: Marc Zyngier 
Link: 
https://lore.kernel.org/r/1610152163-16554-2-git-send-email-sram...@codeaurora.org
---
 arch/arm64/include/asm/pointer_auth.h   | 10 ++
 arch/arm64/include/asm/stackprotector.h |  1 +
 arch/arm64/kernel/head.S|  4 
 3 files changed, 11 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/pointer_auth.h 
b/arch/arm64/include/asm/pointer_auth.h
index c6b4f0603024..b112a11e9302 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -76,6 +76,15 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned 
long ptr)
return ptrauth_clear_pac(ptr);
 }
 
+static __always_inline void ptrauth_enable(void)
+{
+   if (!system_supports_address_auth())
+   return;
+   sysreg_clear_set(sctlr_el1, 0, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+   SCTLR_ELx_ENDA | SCTLR_ELx_ENDB));
+   isb();
+}
+
 #define ptrauth_thread_init_user(tsk)  \
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
 #define ptrauth_thread_init_kernel(tsk)
\
@@ -84,6 +93,7 @@ static inline unsigned long ptrauth_strip_insn_pac(unsigned 
long ptr)
ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
 
 #else /* CONFIG_ARM64_PTR_AUTH */
+#define ptrauth_enable()
 #define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
 #define ptrauth_strip_insn_pac(lr) (lr)
 #define ptrauth_thread_init_user(tsk)
diff --git a/arch/arm64/include/asm/stackprotector.h 
b/arch/arm64/include/asm/stackprotector.h
index 7263e0bac680..33f1bb453150 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -41,6 +41,7 @@ static __always_inline void boot_init_stack_canary(void)
 #endif
ptrauth_thread_init_kernel(current);
ptrauth_thread_switch_kernel(current);
+   ptrauth_enable();
 }
 
 #endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b3c4dd04f74b..2a152d96d767 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -404,10 +404,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
adr_l   x5, init_task
msr sp_el0, x5  // Save thread_info
 
-#ifdef CONFIG_ARM64_PTR_AUTH
-   __ptrauth_keys_init_cpu x5, x6, x7, x8
-#endif
-
adr_l   x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8// vector table address
isb
-- 
2.29.2