Current quirky ptrace behavior with vsyscall and seccomp
does not allow tracers to bypass the call.  This change
provides that ability by checking if orig_ax changed.

Signed-off-by: Will Drewry <[email protected]>
---
 arch/x86/kernel/vsyscall_64.c |   10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 5db36ca..5f9640c 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -142,11 +142,15 @@ static int addr_to_vsyscall_nr(unsigned long addr)
 #ifdef CONFIG_SECCOMP
 static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
 {
+       int ret;
        if (!seccomp_mode(&tsk->seccomp))
                return 0;
        task_pt_regs(tsk)->orig_ax = syscall_nr;
        task_pt_regs(tsk)->ax = syscall_nr;
-       return __secure_computing(syscall_nr);
+       ret = __secure_computing(syscall_nr);
+       if (task_pt_regs(tsk)->orig_ax != syscall_nr)
+               return 1; /* ptrace syscall skip */
+       return ret;
 }
 #else
 #define vsyscall_seccomp(_tsk, _nr) 0
@@ -278,9 +282,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long 
address)
        current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
 
        if (skip) {
-               if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+               if ((long)regs->ax <= 0L || skip == 1) /* seccomp errno/trace */
                        goto do_ret;
-               goto done; /* seccomp trace/trap */
+               goto done; /* seccomp trap */
        }
 
        if (ret == -EFAULT) {
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to