Implement the MVE VADD, VSUB and VMUL insns. Signed-off-by: Peter Maydell <peter.mayd...@linaro.org> --- target/arm/helper-mve.h | 12 ++++++++++++ target/arm/mve.decode | 5 +++++ target/arm/mve_helper.c | 14 ++++++++++++++ target/arm/translate-mve.c | 16 ++++++++++++++++ 4 files changed, 47 insertions(+)
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index ad09170c9cf..b7e9af2461e 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -71,3 +71,15 @@ DEF_HELPER_FLAGS_4(mve_vbic, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vorr, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vorn, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_veor, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vaddb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vaddh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vaddw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vsubb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vsubh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vsubw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vmulb, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmulh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vmulw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 332e0b8d1d6..f7d1d303f17 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -33,6 +33,7 @@ @1op .... .... .... size:2 .. .... .... .... .... &1op qd=%qd qm=%qm @1op_nosz .... .... .... .... .... .... .... .... &1op qd=%qd qm=%qm size=0 +@2op .... .... .. size:2 .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn @2op_nosz .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn size=0 # Vector loads and stores @@ -77,6 +78,10 @@ VORR 1110 1111 0 . 10 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz VORN 1110 1111 0 . 11 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz VEOR 1111 1111 0 . 00 ... 0 ... 0 0001 . 1 . 1 ... 0 @2op_nosz +VADD 1110 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op +VSUB 1111 1111 0 . .. ... 0 ... 0 1000 . 1 . 0 ... 0 @2op +VMUL 1110 1111 0 . .. ... 0 ... 0 1001 . 1 . 1 ... 0 @2op + # Vector miscellaneous VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 6b3d4dbf2da..39ab684c0c3 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -323,6 +323,12 @@ DO_1OP(vfnegs, 4, uint32_t, H4, DO_FNEG) mve_advance_vpt(env); \ } +/* provide unsigned 2-op helpers for all sizes */ +#define DO_2OP_U(OP, FN) \ + DO_2OP(OP##b, 1, uint8_t, H1, FN) \ + DO_2OP(OP##h, 2, uint16_t, H2, FN) \ + DO_2OP(OP##w, 4, uint32_t, H4, FN) + #define DO_AND(N, M) ((N) & (M)) #define DO_BIC(N, M) ((N) & ~(M)) #define DO_ORR(N, M) ((N) | (M)) @@ -334,3 +340,11 @@ DO_2OP(vbic, 1, uint8_t, H1, DO_BIC) DO_2OP(vorr, 1, uint8_t, H1, DO_ORR) DO_2OP(vorn, 1, uint8_t, H1, DO_ORN) DO_2OP(veor, 1, uint8_t, H1, DO_EOR) + +#define DO_ADD(N, M) ((N) + (M)) +#define DO_SUB(N, M) ((N) - (M)) +#define DO_MUL(N, M) ((N) * (M)) + +DO_2OP_U(vadd, DO_ADD) +DO_2OP_U(vsub, DO_SUB) +DO_2OP_U(vmul, DO_MUL) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 0e0fa252364..1b2c8cd5ff7 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -363,3 +363,19 @@ DO_LOGIC(VBIC, gen_helper_mve_vbic) DO_LOGIC(VORR, gen_helper_mve_vorr) DO_LOGIC(VORN, gen_helper_mve_vorn) DO_LOGIC(VEOR, gen_helper_mve_veor) + +#define DO_2OP(INSN, FN) \ + static bool trans_##INSN(DisasContext *s, arg_2op *a) \ + { \ + MVEGenTwoOpFn *fns[] = { \ + gen_helper_mve_##FN##b, \ + gen_helper_mve_##FN##h, \ + gen_helper_mve_##FN##w, \ + NULL, \ + }; \ + return do_2op(s, a, fns[a->size]); \ + } + +DO_2OP(VADD, vadd) +DO_2OP(VSUB, vsub) +DO_2OP(VMUL, vmul) -- 2.20.1