Hi Deepak,

Deepak Gupta <de...@rivosinc.com> 於 2024年9月13日 週五 上午1:20寫道:
>
> Save shadow stack pointer in sigcontext structure while delivering signal.
> Restore shadow stack pointer from sigcontext on sigreturn.
>
> As part of save operation, kernel uses `ssamoswap` to save snapshot of
> current shadow stack on shadow stack itself (can be called as a save
> token). During restore on sigreturn, kernel retrieves token from top of
> shadow stack and validates it. This allows that user mode can't arbitrary
> pivot to any shadow stack address without having a token and thus provide
> strong security assurance between signaly delivery and sigreturn window.
>
> Signed-off-by: Deepak Gupta <de...@rivosinc.com>
> Suggested-by: Andy Chiu <andy.c...@sifive.com>
> ---
>  arch/riscv/include/asm/usercfi.h | 19 ++++++++++
>  arch/riscv/kernel/signal.c       | 62 +++++++++++++++++++++++++++++++-
>  arch/riscv/kernel/usercfi.c      | 57 +++++++++++++++++++++++++++++
>  3 files changed, 137 insertions(+), 1 deletion(-)
>
> diff --git a/arch/riscv/include/asm/usercfi.h 
> b/arch/riscv/include/asm/usercfi.h
> index 20a9102cce51..d5050a5df26c 100644
> --- a/arch/riscv/include/asm/usercfi.h
> +++ b/arch/riscv/include/asm/usercfi.h
> @@ -8,6 +8,7 @@
>  #ifndef __ASSEMBLY__
>  #include <linux/types.h>
>  #include <linux/prctl.h>
> +#include <linux/errno.h>
>
>  struct task_struct;
>  struct kernel_clone_args;
> @@ -35,6 +36,9 @@ bool is_shstk_locked(struct task_struct *task);
>  bool is_shstk_allocated(struct task_struct *task);
>  void set_shstk_lock(struct task_struct *task);
>  void set_shstk_status(struct task_struct *task, bool enable);
> +unsigned long get_active_shstk(struct task_struct *task);
> +int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr);
> +int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr);
>  bool is_indir_lp_enabled(struct task_struct *task);
>  bool is_indir_lp_locked(struct task_struct *task);
>  void set_indir_lp_status(struct task_struct *task, bool enable);
> @@ -96,6 +100,21 @@ static inline void set_shstk_status(struct task_struct 
> *task, bool enable)
>
>  }
>
> +static inline int restore_user_shstk(struct task_struct *tsk, unsigned long 
> shstk_ptr)
> +{
> +       return -EINVAL;
> +}
> +
> +static inline int save_user_shstk(struct task_struct *tsk, unsigned long 
> *saved_shstk_ptr)
> +{
> +       return -EINVAL;
> +}
> +
> +static inline unsigned long get_active_shstk(struct task_struct *task)
> +{
> +       return 0;
> +}
> +
>  static inline bool is_indir_lp_enabled(struct task_struct *task)
>  {
>         return false;
> diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
> index dcd282419456..7d5c1825650f 100644
> --- a/arch/riscv/kernel/signal.c
> +++ b/arch/riscv/kernel/signal.c
> @@ -22,6 +22,7 @@
>  #include <asm/vector.h>
>  #include <asm/csr.h>
>  #include <asm/cacheflush.h>
> +#include <asm/usercfi.h>
>
>  unsigned long signal_minsigstksz __ro_after_init;
>
> @@ -153,6 +154,16 @@ static long restore_sigcontext(struct pt_regs *regs,
>         void __user *sc_ext_ptr = &sc->sc_extdesc.hdr;
>         __u32 rsvd;
>         long err;
> +       unsigned long ss_ptr = 0;
> +       struct __sc_riscv_cfi_state __user *sc_cfi = NULL;
> +
> +       sc_cfi = (struct __sc_riscv_cfi_state *)
> +                ((unsigned long) sc_ext_ptr + sizeof(struct 
> __riscv_ctx_hdr));
> +
> +       if (has_vector() && riscv_v_vstate_query(regs))
> +               sc_cfi = (struct __sc_riscv_cfi_state *)
> +                        ((unsigned long) sc_cfi + riscv_v_sc_size);
> +
>         /* sc_regs is structured the same as the start of pt_regs */
>         err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
>         if (unlikely(err))
> @@ -172,6 +183,24 @@ static long restore_sigcontext(struct pt_regs *regs,
>         if (unlikely(rsvd))
>                 return -EINVAL;
>
> +       /*
> +        * Restore shadow stack as a form of token stored on shadow stack 
> itself as a safe
> +        * way to restore.
> +        * A token on shadow gives following properties
> +        *      - Safe save and restore for shadow stack switching. Any save 
> of shadow stack
> +        *        must have had saved a token on shadow stack. Similarly any 
> restore of shadow
> +        *        stack must check the token before restore. Since writing to 
> shadow stack with
> +        *        address of shadow stack itself is not easily allowed. A 
> restore without a save
> +        *        is quite difficult for an attacker to perform.
> +        *      - A natural break. A token in shadow stack provides a natural 
> break in shadow stack
> +        *        So a single linear range can be bucketed into different 
> shadow stack segments.
> +        *        sspopchk will detect the condition and fault to kernel as 
> sw check exception.
> +        */
> +       if (is_shstk_enabled(current)) {
> +               err |= __copy_from_user(&ss_ptr, &sc_cfi->ss_ptr, 
> sizeof(unsigned long));
> +               err |= restore_user_shstk(current, ss_ptr);
> +       }
> +
>         while (!err) {
>                 __u32 magic, size;
>                 struct __riscv_ctx_hdr __user *head = sc_ext_ptr;
> @@ -215,6 +244,10 @@ static size_t get_rt_frame_size(bool cal_all)
>                 if (cal_all || riscv_v_vstate_query(task_pt_regs(current)))
>                         total_context_size += riscv_v_sc_size;
>         }
> +
> +       if (is_shstk_enabled(current))
> +               total_context_size += sizeof(struct __sc_riscv_cfi_state);
> +
>         /*
>          * Preserved a __riscv_ctx_hdr for END signal context header if an
>          * extension uses __riscv_extra_ext_header
> @@ -276,18 +309,40 @@ static long setup_sigcontext(struct rt_sigframe __user 
> *frame,
>  {
>         struct sigcontext __user *sc = &frame->uc.uc_mcontext;
>         struct __riscv_ctx_hdr __user *sc_ext_ptr = &sc->sc_extdesc.hdr;
> +       unsigned long ss_ptr = 0;
> +       struct __sc_riscv_cfi_state __user *sc_cfi = NULL;
>         long err;
>
> +       sc_cfi = (struct __sc_riscv_cfi_state *) (sc_ext_ptr + 1);
> +

Is it intended that cfi sigcontext does not follow the sigcontext rule
setup by Vector? It seems like there is no extension header (struct
__riscv_ctx_hdr) defined for cfi sigcontext here. If the sigcontext is
directly appended to the signal stack, the user may not be able to
recognize the meaning without defining a new ABI.

BTW, I have sent a patch[1] that refactor setup_sigcontext so it'd be
easier for future extensions to expand on the signal stack.

>         /* sc_regs is structured the same as the start of pt_regs */
>         err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
>         /* Save the floating-point state. */
>         if (has_fpu())
>                 err |= save_fp_state(regs, &sc->sc_fpregs);
>         /* Save the vector state. */
> -       if (has_vector() && riscv_v_vstate_query(regs))
> +       if (has_vector() && riscv_v_vstate_query(regs)) {
>                 err |= save_v_state(regs, (void __user **)&sc_ext_ptr);
> +               sc_cfi = (struct __sc_riscv_cfi_state *) ((unsigned long) 
> sc_cfi + riscv_v_sc_size);
> +       }
>         /* Write zero to fp-reserved space and check it on restore_sigcontext 
> */
>         err |= __put_user(0, &sc->sc_extdesc.reserved);
> +       /*
> +        * Save a pointer to shadow stack itself on shadow stack as a form of 
> token.
> +        * A token on shadow gives following properties
> +        *      - Safe save and restore for shadow stack switching. Any save 
> of shadow stack
> +        *        must have had saved a token on shadow stack. Similarly any 
> restore of shadow
> +        *        stack must check the token before restore. Since writing to 
> shadow stack with
> +        *        address of shadow stack itself is not easily allowed. A 
> restore without a save
> +        *        is quite difficult for an attacker to perform.
> +        *      - A natural break. A token in shadow stack provides a natural 
> break in shadow stack
> +        *        So a single linear range can be bucketed into different 
> shadow stack segments. Any
> +        *        sspopchk will detect the condition and fault to kernel as 
> sw check exception.
> +        */
> +       if (is_shstk_enabled(current)) {
> +               err |= save_user_shstk(current, &ss_ptr);
> +               err |= __put_user(ss_ptr, &sc_cfi->ss_ptr);
> +       }
>         /* And put END __riscv_ctx_hdr at the end. */
>         err |= __put_user(END_MAGIC, &sc_ext_ptr->magic);
>         err |= __put_user(END_HDR_SIZE, &sc_ext_ptr->size);
> @@ -345,6 +400,11 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t 
> *set,
>  #ifdef CONFIG_MMU
>         regs->ra = (unsigned long)VDSO_SYMBOL(
>                 current->mm->context.vdso, rt_sigreturn);
> +
> +       /* if bcfi is enabled x1 (ra) and x5 (t0) must match. not sure if we 
> need this? */
> +       if (is_shstk_enabled(current))
> +               regs->t0 = regs->ra;
> +
>  #else
>         /*
>          * For the nommu case we don't have a VDSO.  Instead we push two
> diff --git a/arch/riscv/kernel/usercfi.c b/arch/riscv/kernel/usercfi.c
> index 8da509afdbe9..40c32258b6ec 100644
> --- a/arch/riscv/kernel/usercfi.c
> +++ b/arch/riscv/kernel/usercfi.c
> @@ -52,6 +52,11 @@ void set_active_shstk(struct task_struct *task, unsigned 
> long shstk_addr)
>         task->thread_info.user_cfi_state.user_shdw_stk = shstk_addr;
>  }
>
> +unsigned long get_active_shstk(struct task_struct *task)
> +{
> +       return task->thread_info.user_cfi_state.user_shdw_stk;
> +}
> +
>  void set_shstk_status(struct task_struct *task, bool enable)
>  {
>         task->thread_info.user_cfi_state.ubcfi_en = enable ? 1 : 0;
> @@ -164,6 +169,58 @@ static int create_rstor_token(unsigned long ssp, 
> unsigned long *token_addr)
>         return 0;
>  }
>
> +/*
> + * Save user shadow stack pointer on shadow stack itself and return pointer 
> to saved location
> + * returns -EFAULT if operation was unsuccessful
> + */
> +int save_user_shstk(struct task_struct *tsk, unsigned long *saved_shstk_ptr)
> +{
> +       unsigned long ss_ptr = 0;
> +       unsigned long token_loc = 0;
> +       int ret = 0;
> +
> +       if (saved_shstk_ptr == NULL)
> +               return -EINVAL;
> +
> +       ss_ptr = get_active_shstk(tsk);
> +       ret = create_rstor_token(ss_ptr, &token_loc);
> +
> +       if (!ret) {
> +               *saved_shstk_ptr = token_loc;
> +               set_active_shstk(tsk, token_loc);
> +       }
> +
> +       return ret;
> +}
> +
> +/*
> + * Restores user shadow stack pointer from token on shadow stack for task 
> `tsk`
> + * returns -EFAULT if operation was unsuccessful
> + */
> +int restore_user_shstk(struct task_struct *tsk, unsigned long shstk_ptr)
> +{
> +       unsigned long token = 0;
> +
> +       token = amo_user_shstk((unsigned long __user *)shstk_ptr, 0);
> +
> +       if (token == -1)
> +               return -EFAULT;
> +
> +       /* invalid token, return EINVAL */
> +       if ((token - shstk_ptr) != SHSTK_ENTRY_SIZE) {
> +               pr_info_ratelimited(
> +                               "%s[%d]: bad restore token in %s: pc=%p 
> sp=%p, token=%p, shstk_ptr=%p\n",
> +                               tsk->comm, task_pid_nr(tsk), __func__,
> +                               (void *)(task_pt_regs(tsk)->epc), (void 
> *)(task_pt_regs(tsk)->sp),
> +                               (void *)token, (void *)shstk_ptr);
> +               return -EINVAL;
> +       }
> +
> +       /* all checks passed, set active shstk and return success */
> +       set_active_shstk(tsk, token);
> +       return 0;
> +}
> +
>  static unsigned long allocate_shadow_stack(unsigned long addr, unsigned long 
> size,
>                                 unsigned long token_offset,
>                                 bool set_tok)
> --
> 2.45.0
>
>
> _______________________________________________
> linux-riscv mailing list
> linux-ri...@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-riscv

- [1]: 
https://lore.kernel.org/all/20240628-dev-signal-refactor-v1-1-0c391b260...@sifive.com/

Thanks,
Andy

Reply via email to