Set the SM bit in the SVE record on signal delivery, create the ZA record. Restore SM and ZA state according to the records present on return.
Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- linux-user/aarch64/signal.c | 158 ++++++++++++++++++++++++++++++++++-- 1 file changed, 149 insertions(+), 9 deletions(-) diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 73b15038ad..79b2fc1cfe 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -104,6 +104,22 @@ struct target_sve_context { #define TARGET_SVE_SIG_FLAG_SM 1 +#define TARGET_ZA_MAGIC 0x54366345 + +struct target_za_context { + struct target_aarch64_ctx head; + uint16_t vl; + uint16_t reserved[3]; + /* The actual ZA data immediately follows. */ +}; + +#define TARGET_ZA_SIG_REGS_OFFSET \ + QEMU_ALIGN_UP(sizeof(struct target_za_context), TARGET_SVE_VQ_BYTES) +#define TARGET_ZA_SIG_ZAV_OFFSET(VQ, N) \ + (TARGET_ZA_SIG_REGS_OFFSET + (VQ) * TARGET_SVE_VQ_BYTES * (N)) +#define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ + TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) + struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; @@ -207,6 +223,32 @@ static void target_setup_sve_record(struct target_sve_context *sve, } } +static void target_setup_za_record(struct target_za_context *za, + CPUARMState *env, int vq, int size) +{ + int i, j, vl = vq * TARGET_SVE_VQ_BYTES; + + memset(za, 0, sizeof(*za)); + __put_user(TARGET_ZA_MAGIC, &za->head.magic); + __put_user(size, &za->head.size); + __put_user(vl, &za->vl); + + if (size == TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return; + } + + /* + * Note that ZA vectors are stored as a byte stream, + * with each byte element at a subsequent address. + */ + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __put_user_e(env->zarray[i].d[j], z + j, le); + } + } +} + static void target_restore_general_frame(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -252,16 +294,28 @@ static void target_restore_fpsimd_record(CPUARMState *env, static bool target_restore_sve_record(CPUARMState *env, struct target_sve_context *sve, - int size) + int size, int *svcr) { - int i, j, vl, vq; + int i, j, vl, vq, flags; + bool sm; + /* ??? Kernel tests SVE && (!sm || SME); suggest (sm ? SME : SVE). */ if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) { return false; } __get_user(vl, &sve->vl); - vq = sve_vq_cached(env); + __get_user(flags, &sve->flags); + + sm = flags & TARGET_SVE_SIG_FLAG_SM; + if (sm) { + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { + return false; + } + vq = sme_vq_cached(env); + } else { + vq = sve_vq_cached(env); + } /* Reject mismatched VL. */ if (vl != vq * TARGET_SVE_VQ_BYTES) { @@ -278,6 +332,8 @@ static bool target_restore_sve_record(CPUARMState *env, return false; } + *svcr = FIELD_DP64(*svcr, SVCR, SM, sm); + /* * Note that SVE regs are stored as a byte stream, with each byte element * at a subsequent address. This corresponds to a little-endian load @@ -304,15 +360,57 @@ static bool target_restore_sve_record(CPUARMState *env, return true; } +static bool target_restore_za_record(CPUARMState *env, + struct target_za_context *za, + int size, int *svcr) +{ + int i, j, vl, vq; + + if (!cpu_isar_feature(aa64_sme, env_archcpu(env))) { + return false; + } + + __get_user(vl, &za->vl); + vq = sme_vq_cached(env); + + /* Reject mismatched VL. */ + if (vl != vq * TARGET_SVE_VQ_BYTES) { + return false; + } + + /* Accept empty record -- used to clear PSTATE.ZA. */ + if (size <= TARGET_ZA_SIG_CONTEXT_SIZE(0)) { + return true; + } + + /* Reject non-empty but incomplete record. */ + if (size < TARGET_ZA_SIG_CONTEXT_SIZE(vq)) { + return false; + } + + *svcr = FIELD_DP64(*svcr, SVCR, ZA, 1); + + for (i = 0; i < vl; ++i) { + uint64_t *z = (void *)za + TARGET_ZA_SIG_ZAV_OFFSET(vq, i); + for (j = 0; j < vq * 2; ++j) { + __get_user_e(env->zarray[i].d[j], z + j, le); + } + } + return true; +} + static int target_restore_sigframe(CPUARMState *env, struct target_rt_sigframe *sf) { struct target_aarch64_ctx *ctx, *extra = NULL; struct target_fpsimd_context *fpsimd = NULL; struct target_sve_context *sve = NULL; + struct target_za_context *za = NULL; uint64_t extra_datap = 0; bool used_extra = false; int sve_size = 0; + int za_size = 0; + int svcr = 0; target_restore_general_frame(env, sf); @@ -350,6 +448,14 @@ static int target_restore_sigframe(CPUARMState *env, sve_size = size; break; + case TARGET_ZA_MAGIC: + if (za || size < sizeof(struct target_za_context)) { + goto err; + } + za = (struct target_za_context *)ctx; + za_size = size; + break; + case TARGET_EXTRA_MAGIC: if (extra || size != sizeof(struct target_extra_context)) { goto err; @@ -381,9 +487,16 @@ static int target_restore_sigframe(CPUARMState *env, } /* SVE data, if present, overwrites FPSIMD data. */ - if (sve && !target_restore_sve_record(env, sve, sve_size)) { + if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { goto err; } + if (za && !target_restore_za_record(env, za, za_size, &svcr)) { + goto err; + } + if (env->svcr != svcr) { + env->svcr = svcr; + arm_rebuild_hflags(env); + } unlock_user(extra, extra_datap, 0); return 0; @@ -451,7 +564,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, .total_size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved), }; - int fpsimd_ofs, fr_ofs, sve_ofs = 0, vq = 0, sve_size = 0; + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; + int sve_vq = 0, sve_size = 0, za_vq = 0, za_size = 0; struct target_rt_sigframe *frame; struct target_rt_frame_record *fr; abi_ulong frame_addr, return_addr; @@ -461,11 +575,22 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, &layout); /* SVE state needs saving only if it exists. */ - if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { - vq = sve_vq_cached(env); - sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); + if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || + cpu_isar_feature(aa64_sme, env_archcpu(env))) { + sve_vq = sve_vq_cached(env); + sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(sve_vq), 16); sve_ofs = alloc_sigframe_space(sve_size, &layout); } + if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + /* ZA state needs saving only if it is enabled. */ + za_vq = sme_vq_cached(env); + if (FIELD_EX64(env->svcr, SVCR, ZA)) { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(za_vq); + } else { + za_size = TARGET_ZA_SIG_CONTEXT_SIZE(0); + } + za_ofs = alloc_sigframe_space(za_size, &layout); + } if (layout.extra_ofs) { /* Reserve space for the extra end marker. The standard end marker @@ -512,7 +637,10 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, target_setup_end_record((void *)frame + layout.extra_end_ofs); } if (sve_ofs) { - target_setup_sve_record((void *)frame + sve_ofs, env, vq, sve_size); + target_setup_sve_record((void *)frame + sve_ofs, env, sve_vq, sve_size); + } + if (za_ofs) { + target_setup_za_record((void *)frame + za_ofs, env, za_vq, za_size); } /* Set up the stack frame for unwinding. */ @@ -536,6 +664,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, env->btype = 2; } + /* + * Invoke the signal handler with both SM and ZA disabled. + * When clearing SM, ResetSVEState, per SMSTOP. + */ + if (FIELD_EX64(env->svcr, SVCR, SM)) { + arm_reset_sve_state(env); + } + if (env->svcr) { + env->svcr = 0; + arm_rebuild_hflags(env); + } + if (info) { tswap_siginfo(&frame->info, info); env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info); -- 2.34.1