The instructions have the same function as RVV1.0. Overall there are only general differences between XTheadVector and RVV1.0.
Signed-off-by: Huang Tao <eric.hu...@linux.alibaba.com> --- target/riscv/helper.h | 9 +++++++++ .../riscv/insn_trans/trans_xtheadvector.c.inc | 6 ++++-- target/riscv/vector_helper.c | 8 ++++---- target/riscv/vector_internals.h | 6 ++++++ target/riscv/xtheadvector_helper.c | 19 +++++++++++++++++++ 5 files changed, 42 insertions(+), 6 deletions(-) diff --git a/target/riscv/helper.h b/target/riscv/helper.h index aab2979328..85962f7253 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -1935,3 +1935,12 @@ DEF_HELPER_6(th_vasub_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(th_vasub_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(th_vasub_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(th_vasub_vx_d, void, ptr, ptr, tl, ptr, env, i32) + +DEF_HELPER_6(th_vsmul_vv_b, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vv_h, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vv_w, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vv_d, void, ptr, ptr, ptr, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vx_b, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vx_h, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vx_w, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(th_vsmul_vx_d, void, ptr, ptr, tl, ptr, env, i32) diff --git a/target/riscv/insn_trans/trans_xtheadvector.c.inc b/target/riscv/insn_trans/trans_xtheadvector.c.inc index 59da1e4b3f..df653bd1c9 100644 --- a/target/riscv/insn_trans/trans_xtheadvector.c.inc +++ b/target/riscv/insn_trans/trans_xtheadvector.c.inc @@ -1717,14 +1717,16 @@ GEN_OPIVX_TRANS_TH(th_vaadd_vx, opivx_check_th) GEN_OPIVX_TRANS_TH(th_vasub_vx, opivx_check_th) GEN_OPIVI_TRANS_TH(th_vaadd_vi, IMM_SX, th_vaadd_vx, opivx_check_th) +/* Vector Single-Width Fractional Multiply with Rounding and Saturation */ +GEN_OPIVV_TRANS_TH(th_vsmul_vv, opivv_check_th) +GEN_OPIVX_TRANS_TH(th_vsmul_vx, opivx_check_th) + #define TH_TRANS_STUB(NAME) \ static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \ { \ return require_xtheadvector(s); \ } -TH_TRANS_STUB(th_vsmul_vv) -TH_TRANS_STUB(th_vsmul_vx) TH_TRANS_STUB(th_vwsmaccu_vv) TH_TRANS_STUB(th_vwsmaccu_vx) TH_TRANS_STUB(th_vwsmacc_vv) diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index ea1e449174..331a9a9c7a 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -2474,7 +2474,7 @@ GEN_VEXT_VX_RM(vasubu_vx_w, 4) GEN_VEXT_VX_RM(vasubu_vx_d, 8) /* Vector Single-Width Fractional Multiply with Rounding and Saturation */ -static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) +int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) { uint8_t round; int16_t res; @@ -2494,7 +2494,7 @@ static inline int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b) } } -static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) +int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) { uint8_t round; int32_t res; @@ -2514,7 +2514,7 @@ static int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b) } } -static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) +int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) { uint8_t round; int64_t res; @@ -2534,7 +2534,7 @@ static int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b) } } -static int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) +int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b) { uint8_t round; uint64_t hi_64, lo_64; diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h index 19f174f4c8..c76ff5abac 100644 --- a/target/riscv/vector_internals.h +++ b/target/riscv/vector_internals.h @@ -308,4 +308,10 @@ int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b); int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b); int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b); int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b); + +int8_t vsmul8(CPURISCVState *env, int vxrm, int8_t a, int8_t b); +int16_t vsmul16(CPURISCVState *env, int vxrm, int16_t a, int16_t b); +int32_t vsmul32(CPURISCVState *env, int vxrm, int32_t a, int32_t b); +int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b); + #endif /* TARGET_RISCV_VECTOR_INTERNALS_H */ diff --git a/target/riscv/xtheadvector_helper.c b/target/riscv/xtheadvector_helper.c index 06ac5940b7..e4acb4d176 100644 --- a/target/riscv/xtheadvector_helper.c +++ b/target/riscv/xtheadvector_helper.c @@ -2294,3 +2294,22 @@ GEN_TH_VX_RM(th_vasub_vx_b, 1, 1, clearb_th) GEN_TH_VX_RM(th_vasub_vx_h, 2, 2, clearh_th) GEN_TH_VX_RM(th_vasub_vx_w, 4, 4, clearl_th) GEN_TH_VX_RM(th_vasub_vx_d, 8, 8, clearq_th) + +/* Vector Single-Width Fractional Multiply with Rounding and Saturation */ +THCALL(TH_OPIVV2_RM, th_vsmul_vv_b, OP_SSS_B, H1, H1, H1, vsmul8) +THCALL(TH_OPIVV2_RM, th_vsmul_vv_h, OP_SSS_H, H2, H2, H2, vsmul16) +THCALL(TH_OPIVV2_RM, th_vsmul_vv_w, OP_SSS_W, H4, H4, H4, vsmul32) +THCALL(TH_OPIVV2_RM, th_vsmul_vv_d, OP_SSS_D, H8, H8, H8, vsmul64) +GEN_TH_VV_RM(th_vsmul_vv_b, 1, 1, clearb_th) +GEN_TH_VV_RM(th_vsmul_vv_h, 2, 2, clearh_th) +GEN_TH_VV_RM(th_vsmul_vv_w, 4, 4, clearl_th) +GEN_TH_VV_RM(th_vsmul_vv_d, 8, 8, clearq_th) + +THCALL(TH_OPIVX2_RM, th_vsmul_vx_b, OP_SSS_B, H1, H1, vsmul8) +THCALL(TH_OPIVX2_RM, th_vsmul_vx_h, OP_SSS_H, H2, H2, vsmul16) +THCALL(TH_OPIVX2_RM, th_vsmul_vx_w, OP_SSS_W, H4, H4, vsmul32) +THCALL(TH_OPIVX2_RM, th_vsmul_vx_d, OP_SSS_D, H8, H8, vsmul64) +GEN_TH_VX_RM(th_vsmul_vx_b, 1, 1, clearb_th) +GEN_TH_VX_RM(th_vsmul_vx_h, 2, 2, clearh_th) +GEN_TH_VX_RM(th_vsmul_vx_w, 4, 4, clearl_th) +GEN_TH_VX_RM(th_vsmul_vx_d, 8, 8, clearq_th) -- 2.44.0