The kernel allocates (and frees on thread exit) a new shadow stack for a
pthread child.

    It is possible for the kernel to complete the clone syscall and set the
    child's shadow stack pointer to NULL and let the child thread allocate
    a shadow stack for itself.  There are two issues in this approach: It
    is not compatible with existing code that does inline syscall and it
    cannot handle signals before the child can successfully allocate a
    shadow stack.

Use stack_size passed from clone3() syscall for thread shadow stack size,
but cap it to min(RLIMIT_STACK, 4 GB).  A compat-mode thread shadow stack
size is further reduced to 1/4.  This allows more threads to run in a 32-
bit address space.

Signed-off-by: Yu-cheng Yu <yu-cheng...@intel.com>
---
 arch/x86/include/asm/cet.h         |  5 +++
 arch/x86/include/asm/mmu_context.h |  3 ++
 arch/x86/kernel/cet.c              | 49 ++++++++++++++++++++++++++++++
 arch/x86/kernel/process.c          | 15 +++++++--
 4 files changed, 69 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/cet.h b/arch/x86/include/asm/cet.h
index 73435856ce54..5d66340c7a13 100644
--- a/arch/x86/include/asm/cet.h
+++ b/arch/x86/include/asm/cet.h
@@ -18,12 +18,17 @@ struct cet_status {
 
 #ifdef CONFIG_X86_CET
 int cet_setup_shstk(void);
+int cet_setup_thread_shstk(struct task_struct *p, unsigned long clone_flags,
+                          unsigned long stack_size);
 void cet_disable_shstk(void);
 void cet_free_shstk(struct task_struct *p);
 int cet_verify_rstor_token(bool ia32, unsigned long ssp, unsigned long 
*new_ssp);
 void cet_restore_signal(struct sc_ext *sc);
 int cet_setup_signal(bool ia32, unsigned long rstor, struct sc_ext *sc);
 #else
+static inline int cet_setup_thread_shstk(struct task_struct *p,
+                                        unsigned long clone_flags,
+                                        unsigned long stack_size) { return 0; }
 static inline void cet_disable_shstk(void) {}
 static inline void cet_free_shstk(struct task_struct *p) {}
 static inline void cet_restore_signal(struct sc_ext *sc) { return; }
diff --git a/arch/x86/include/asm/mmu_context.h 
b/arch/x86/include/asm/mmu_context.h
index 27516046117a..e90bd2ee8498 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -11,6 +11,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/paravirt.h>
+#include <asm/cet.h>
 #include <asm/debugreg.h>
 
 extern atomic64_t last_mm_ctx_id;
@@ -146,6 +147,8 @@ do {                                                \
 #else
 #define deactivate_mm(tsk, mm)                 \
 do {                                           \
+       if (!tsk->vfork_done)                   \
+               cet_free_shstk(tsk);            \
        load_gs_index(0);                       \
        loadsegment(fs, 0);                     \
 } while (0)
diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c
index 08e43d9b5176..12738cdfb5f2 100644
--- a/arch/x86/kernel/cet.c
+++ b/arch/x86/kernel/cet.c
@@ -172,6 +172,55 @@ int cet_setup_shstk(void)
        return 0;
 }
 
+int cet_setup_thread_shstk(struct task_struct *tsk, unsigned long clone_flags,
+                          unsigned long stack_size)
+{
+       unsigned long addr, size;
+       struct cet_user_state *state;
+       struct cet_status *cet = &tsk->thread.cet;
+
+       if (!cet->shstk_size)
+               return 0;
+
+       if ((clone_flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM)
+               return 0;
+
+       state = get_xsave_addr(&tsk->thread.fpu.state.xsave,
+                              XFEATURE_CET_USER);
+
+       if (!state)
+               return -EINVAL;
+
+       if (stack_size == 0)
+               return -EINVAL;
+
+       /* Cap shadow stack size to 4 GB */
+       size = min(rlimit(RLIMIT_STACK), 1UL << 32);
+       size = min(size, stack_size);
+
+       /*
+        * Compat-mode pthreads share a limited address space.
+        * If each function call takes an average of four slots
+        * stack space, allocate 1/4 of stack size for shadow stack.
+        */
+       if (in_compat_syscall())
+               size /= 4;
+       size = round_up(size, PAGE_SIZE);
+       addr = alloc_shstk(size, 0);
+
+       if (IS_ERR_VALUE(addr)) {
+               cet->shstk_base = 0;
+               cet->shstk_size = 0;
+               return PTR_ERR((void *)addr);
+       }
+
+       fpu__prepare_write(&tsk->thread.fpu);
+       state->user_ssp = (u64)(addr + size);
+       cet->shstk_base = addr;
+       cet->shstk_size = size;
+       return 0;
+}
+
 void cet_disable_shstk(void)
 {
        struct cet_status *cet = &current->thread.cet;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9c214d7085a4..b7c8fe2d93ec 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -43,6 +43,7 @@
 #include <asm/io_bitmap.h>
 #include <asm/proto.h>
 #include <asm/frame.h>
+#include <asm/cet.h>
 
 #include "process.h"
 
@@ -109,6 +110,7 @@ void exit_thread(struct task_struct *tsk)
 
        free_vm86(t);
 
+       cet_free_shstk(tsk);
        fpu__drop(fpu);
 }
 
@@ -122,8 +124,9 @@ static int set_new_tls(struct task_struct *p, unsigned long 
tls)
                return do_set_thread_area_64(p, ARCH_SET_FS, tls);
 }
 
-int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
-               struct task_struct *p, unsigned long tls)
+int copy_thread(unsigned long clone_flags, unsigned long sp,
+               unsigned long stack_size, struct task_struct *p,
+               unsigned long tls)
 {
        struct inactive_task_frame *frame;
        struct fork_frame *fork_frame;
@@ -163,7 +166,7 @@ int copy_thread(unsigned long clone_flags, unsigned long 
sp, unsigned long arg,
        /* Kernel thread ? */
        if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
                memset(childregs, 0, sizeof(struct pt_regs));
-               kthread_frame_init(frame, sp, arg);
+               kthread_frame_init(frame, sp, stack_size);
                return 0;
        }
 
@@ -181,6 +184,12 @@ int copy_thread(unsigned long clone_flags, unsigned long 
sp, unsigned long arg,
        if (clone_flags & CLONE_SETTLS)
                ret = set_new_tls(p, tls);
 
+#ifdef CONFIG_X86_64
+       /* Allocate a new shadow stack for pthread */
+       if (!ret)
+               ret = cet_setup_thread_shstk(p, clone_flags, stack_size);
+#endif
+
        if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
                io_bitmap_share(p);
 
-- 
2.21.0

Reply via email to