From: Richard Henderson <r...@twiddle.net> Signed-off-by: Richard Henderson <r...@twiddle.net> Signed-off-by: Riku Voipio <riku.voi...@linaro.org> --- linux-user/host/s390x/hostdep.h | 23 ++++++++ linux-user/host/s390x/safe-syscall.inc.S | 90 ++++++++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 linux-user/host/s390x/safe-syscall.inc.S
diff --git a/linux-user/host/s390x/hostdep.h b/linux-user/host/s390x/hostdep.h index 7609bf5..e95871c 100644 --- a/linux-user/host/s390x/hostdep.h +++ b/linux-user/host/s390x/hostdep.h @@ -12,4 +12,27 @@ #ifndef QEMU_HOSTDEP_H #define QEMU_HOSTDEP_H +/* We have a safe-syscall.inc.S */ +#define HAVE_SAFE_SYSCALL + +#ifndef __ASSEMBLER__ + +/* These are defined by the safe-syscall.inc.S file */ +extern char safe_syscall_start[]; +extern char safe_syscall_end[]; + +/* Adjust the signal context to rewind out of safe-syscall if we're in it */ +static inline void rewind_if_in_safe_syscall(void *puc) +{ + struct ucontext *uc = puc; + unsigned long *pcreg = &uc->uc_mcontext.psw.addr; + + if (*pcreg > (uintptr_t)safe_syscall_start + && *pcreg < (uintptr_t)safe_syscall_end) { + *pcreg = (uintptr_t)safe_syscall_start; + } +} + +#endif /* __ASSEMBLER__ */ + #endif diff --git a/linux-user/host/s390x/safe-syscall.inc.S b/linux-user/host/s390x/safe-syscall.inc.S new file mode 100644 index 0000000..f1b446a --- /dev/null +++ b/linux-user/host/s390x/safe-syscall.inc.S @@ -0,0 +1,90 @@ +/* + * safe-syscall.inc.S : host-specific assembly fragment + * to handle signals occurring at the same time as system calls. + * This is intended to be included by linux-user/safe-syscall.S + * + * Written by Richard Henderson <r...@twiddle.net> + * Copyright (C) 2016 Red Hat, Inc. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + + .global safe_syscall_base + .global safe_syscall_start + .global safe_syscall_end + .type safe_syscall_base, @function + + /* This is the entry point for making a system call. The calling + * convention here is that of a C varargs function with the + * first argument an 'int *' to the signal_pending flag, the + * second one the system call number (as a 'long'), and all further + * arguments being syscall arguments (also 'long'). + * We return a long which is the syscall's return value, which + * may be negative-errno on failure. Conversion to the + * -1-and-errno-set convention is done by the calling wrapper. + */ +safe_syscall_base: + .cfi_startproc + stmg %r6,%r15,48(%r15) /* save all call-saved registers */ + .cfi_offset %r15,-40 + .cfi_offset %r14,-48 + .cfi_offset %r13,-56 + .cfi_offset %r12,-64 + .cfi_offset %r11,-72 + .cfi_offset %r10,-80 + .cfi_offset %r9,-88 + .cfi_offset %r8,-96 + .cfi_offset %r7,-104 + .cfi_offset %r6,-112 + lgr %r1,%r15 + lg %r0,8(%r15) /* load eos */ + aghi %r15,-160 + .cfi_adjust_cfa_offset 160 + stg %r1,0(%r15) /* store back chain */ + stg %r0,8(%r15) /* store eos */ + + /* The syscall calling convention isn't the same as the + * C one: + * we enter with r2 == *signal_pending + * r3 == syscall number + * r4, r5, r6, (stack) == syscall arguments + * and return the result in r2 + * and the syscall instruction needs + * r1 == syscall number + * r2 ... r7 == syscall arguments + * and returns the result in r2 + * Shuffle everything around appropriately. + */ + lgr %r8,%r2 /* signal_pending pointer */ + lgr %r1,%r3 /* syscall number */ + lgr %r2,%r4 /* syscall args */ + lgr %r3,%r5 + lgr %r4,%r6 + lmg %r5,%r7,320(%r15) + + /* This next sequence of code works in conjunction with the + * rewind_if_safe_syscall_function(). If a signal is taken + * and the interrupted PC is anywhere between 'safe_syscall_start' + * and 'safe_syscall_end' then we rewind it to 'safe_syscall_start'. + * The code sequence must therefore be able to cope with this, and + * the syscall instruction must be the final one in the sequence. + */ +safe_syscall_start: + /* if signal_pending is non-zero, don't do the call */ + lt %r0,0(%r8) + jne 2f + svc 0 +safe_syscall_end: + +1: lg %r15,0(%r15) /* load back chain */ + .cfi_remember_state + .cfi_adjust_cfa_offset -160 + lmg %r6,%r15,48(%r15) /* load saved registers */ + br %r14 + .cfi_restore_state +2: lghi %r2, -TARGET_ERESTARTSYS + j 1b + .cfi_endproc + + .size safe_syscall_base, .-safe_syscall_base -- 2.1.4