Hi Neeraj,

On 09/07/2019 12:22, Neeraj Upadhyay wrote:
> For cpus which do not support pstate.ssbs feature, el0
> might not retain spsr.ssbs. This is problematic, if this
> task migrates to a cpu supporting this feature, thus
> relying on its state to be correct. On kernel entry,
> explicitly set spsr.ssbs, so that speculation is enabled
> for el0, when this task migrates to a cpu supporting
> ssbs feature. Restoring state at kernel entry ensures
> that el0 ssbs state is always consistent while we are
> in el1.
> 
> As alternatives are applied by boot cpu, at the end of smp
> init, presence/absence of ssbs feature on boot cpu, is used
> for deciding, whether the capability is uniformly provided.

I've seen the same issue, but went for a slightly different
approach, see below.

> 
> Signed-off-by: Neeraj Upadhyay <neer...@codeaurora.org>
> ---
>  arch/arm64/kernel/cpu_errata.c | 16 ++++++++++++++++
>  arch/arm64/kernel/entry.S      | 26 +++++++++++++++++++++++++-
>  2 files changed, 41 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
> index ca11ff7..c84a56d 100644
> --- a/arch/arm64/kernel/cpu_errata.c
> +++ b/arch/arm64/kernel/cpu_errata.c
> @@ -336,6 +336,22 @@ void __init arm64_enable_wa2_handling(struct alt_instr 
> *alt,
>               *updptr = cpu_to_le32(aarch64_insn_gen_nop());
>  }
>  
> +void __init arm64_restore_ssbs_state(struct alt_instr *alt,
> +                                  __le32 *origptr, __le32 *updptr,
> +                                  int nr_inst)
> +{
> +     BUG_ON(nr_inst != 1);
> +     /*
> +      * Only restore EL0 SSBS state on EL1 entry if cpu does not
> +      * support the capability and capability is present for at
> +      * least one cpu and if the SSBD state allows it to
> +      * be changed.
> +      */
> +     if (!this_cpu_has_cap(ARM64_SSBS) && cpus_have_cap(ARM64_SSBS) &&
> +         arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
> +             *updptr = cpu_to_le32(aarch64_insn_gen_nop());
> +}
> +
>  void arm64_set_ssbd_mitigation(bool state)
>  {
>       if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 9cdc459..7e79305 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -143,6 +143,25 @@ alternative_cb_end
>  #endif
>       .endm
>  
> +     // This macro updates spsr. It also corrupts the condition
> +     // codes state.
> +     .macro  restore_ssbs_state, saved_spsr, tmp
> +#ifdef CONFIG_ARM64_SSBD
> +alternative_cb       arm64_restore_ssbs_state
> +     b       .L__asm_ssbs_skip\@
> +alternative_cb_end
> +     ldr     \tmp, [tsk, #TSK_TI_FLAGS]
> +     tbnz    \tmp, #TIF_SSBD, .L__asm_ssbs_skip\@
> +     tst     \saved_spsr, #PSR_MODE32_BIT    // native task?
> +     b.ne    .L__asm_ssbs_compat\@
> +     orr     \saved_spsr, \saved_spsr, #PSR_SSBS_BIT
> +     b       .L__asm_ssbs_skip\@
> +.L__asm_ssbs_compat\@:
> +     orr     \saved_spsr, \saved_spsr, #PSR_AA32_SSBS_BIT
> +.L__asm_ssbs_skip\@:
> +#endif
> +     .endm

Although this is in keeping with the rest of entry.S (perfectly
unreadable ;-), I think we can do something a bit simpler, that
doesn't rely on patching. Also, this doesn't seem to take the
SSBD options such as ARM64_SSBD_FORCE_ENABLE into account.

> +
>       .macro  kernel_entry, el, regsize = 64
>       .if     \regsize == 32
>       mov     w0, w0                          // zero upper 32 bits of x0
> @@ -182,8 +201,13 @@ alternative_cb_end
>       str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
>       /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
>       .endif /* \el == 0 */
> -     mrs     x22, elr_el1
>       mrs     x23, spsr_el1
> +
> +     .if     \el == 0
> +     restore_ssbs_state x23, x22
> +     .endif
> +
> +     mrs     x22, elr_el1
>       stp     lr, x21, [sp, #S_LR]
>  
>       /*
> 

How about the patch below?

Thanks,

        M.

>From 7d4314d1ef3122d8bf56a7ef239c8c68e0c81277 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <marc.zyng...@arm.com>
Date: Tue, 4 Jun 2019 17:35:18 +0100
Subject: [PATCH] arm64: Force SSBS on context switch

On a CPU that doesn't support SSBS, PSTATE[12] is RES0.  In a system
where only some of the CPUs implement SSBS, we end-up losing track of
the SSBS bit across task migration.

To address this issue, let's force the SSBS bit on context switch.

Signed-off-by: Marc Zyngier <marc.zyng...@arm.com>
---
 arch/arm64/include/asm/processor.h | 14 ++++++++++++--
 arch/arm64/kernel/process.c        | 14 ++++++++++++++
 2 files changed, 26 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/processor.h 
b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs 
*regs, unsigned long pc)
                regs->pmr_save = GIC_PRIO_IRQON;
 }
 
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+       regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+       regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
 static inline void start_thread(struct pt_regs *regs, unsigned long pc,
                                unsigned long sp)
 {
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, 
unsigned long pc,
        regs->pstate = PSR_MODE_EL0t;
 
        if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-               regs->pstate |= PSR_SSBS_BIT;
+               set_ssbs_bit(regs);
 
        regs->sp = sp;
 }
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs 
*regs, unsigned long pc,
 #endif
 
        if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
-               regs->pstate |= PSR_AA32_SSBS_BIT;
+               set_compat_ssbs_bit(regs);
 
        regs->compat_sp = sp;
 }
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 9856395ccdb7..d451b3b248cf 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -442,6 +442,19 @@ void uao_thread_switch(struct task_struct *next)
        }
 }
 
+static void ssbs_thread_switch(struct task_struct *next)
+{
+       if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE &&
+           !test_tsk_thread_flag(next, TIF_SSBD)) {
+               struct pt_regs *regs = task_pt_regs(next);
+
+               if (compat_user_mode(regs))
+                       set_compat_ssbs_bit(regs);
+               else if (user_mode(regs))
+                       set_ssbs_bit(regs);
+       }
+}
+
 /*
  * We store our current task in sp_el0, which is clobbered by userspace. Keep a
  * shadow copy so that we can restore this upon entry from userspace.
@@ -471,6 +484,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct 
task_struct *prev,
        entry_task_switch(next);
        uao_thread_switch(next);
        ptrauth_thread_switch(next);
+       ssbs_thread_switch(next);
 
        /*
         * Complete any pending TLB or cache maintenance on this CPU in case
-- 
2.20.1


-- 
Jazz is not dead. It just smells funny...

Reply via email to