From: TANG Tiancheng <tangtiancheng....@alibaba-inc.com> 1.Address immediate value constraints in RISC-V Vector Extension 1.0 for comparison instructions.
2.Extend comparison results from mask registers to SEW-width elements, following recommendations in The RISC-V SPEC Volume I (Version 20240411). This aligns with TCG's cmp_vec behavior by expanding compare results to full element width: all 1s for true, all 0s for false. Expand cmp with cmpsel. Signed-off-by: TANG Tiancheng <tangtiancheng....@alibaba-inc.com> Reviewed-by: Liu Zhiwei <zhiwei_...@linux.alibaba.com> --- tcg/riscv/tcg-target-con-set.h | 2 + tcg/riscv/tcg-target-con-str.h | 2 + tcg/riscv/tcg-target.c.inc | 251 +++++++++++++++++++++++++++------ tcg/riscv/tcg-target.h | 2 +- 4 files changed, 209 insertions(+), 48 deletions(-) diff --git a/tcg/riscv/tcg-target-con-set.h b/tcg/riscv/tcg-target-con-set.h index 4c4bc99355..cc06102ccf 100644 --- a/tcg/riscv/tcg-target-con-set.h +++ b/tcg/riscv/tcg-target-con-set.h @@ -25,3 +25,5 @@ C_O0_I2(v, r) C_O1_I1(v, r) C_O1_I1(v, v) C_O1_I2(v, v, v) +C_O1_I2(v, v, vL) +C_O1_I4(v, v, vL, vK, vK) diff --git a/tcg/riscv/tcg-target-con-str.h b/tcg/riscv/tcg-target-con-str.h index b2b3211bcb..089efe96ca 100644 --- a/tcg/riscv/tcg-target-con-str.h +++ b/tcg/riscv/tcg-target-con-str.h @@ -17,6 +17,8 @@ REGS('v', ALL_VECTOR_REGS) */ CONST('I', TCG_CT_CONST_S12) CONST('J', TCG_CT_CONST_J12) +CONST('K', TCG_CT_CONST_S5) +CONST('L', TCG_CT_CONST_CMP_VI) CONST('N', TCG_CT_CONST_N12) CONST('M', TCG_CT_CONST_M12) CONST('Z', TCG_CT_CONST_ZERO) diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index 9b325295b7..2ddfb3738a 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -106,11 +106,13 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) return TCG_REG_A0 + slot; } -#define TCG_CT_CONST_ZERO 0x100 -#define TCG_CT_CONST_S12 0x200 -#define TCG_CT_CONST_N12 0x400 -#define TCG_CT_CONST_M12 0x800 -#define TCG_CT_CONST_J12 0x1000 +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_S12 0x200 +#define TCG_CT_CONST_N12 0x400 +#define TCG_CT_CONST_M12 0x800 +#define TCG_CT_CONST_J12 0x1000 +#define TCG_CT_CONST_S5 0x2000 +#define TCG_CT_CONST_CMP_VI 0x4000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) @@ -119,48 +121,6 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define sextreg sextract64 -/* test if a constant matches the constraint */ -static bool tcg_target_const_match(int64_t val, int ct, - TCGType type, TCGCond cond, int vece) -{ - if (ct & TCG_CT_CONST) { - return 1; - } - if ((ct & TCG_CT_CONST_ZERO) && val == 0) { - return 1; - } - /* - * Sign extended from 12 bits: [-0x800, 0x7ff]. - * Used for most arithmetic, as this is the isa field. - */ - if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { - return 1; - } - /* - * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. - * Used for subtraction, where a constant must be handled by ADDI. - */ - if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { - return 1; - } - /* - * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. - * Used by addsub2 and movcond, which may need the negative value, - * and requires the modified constant to be representable. - */ - if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { - return 1; - } - /* - * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. - * Used to map ANDN back to ANDI, etc. - */ - if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { - return 1; - } - return 0; -} - /* * RISC-V Base ISA opcodes (IM) */ @@ -310,6 +270,9 @@ typedef enum { OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3), OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7), + OPC_VMERGE_VIM = 0x5c000057 | V_OPIVI, + OPC_VMERGE_VVM = 0x5c000057 | V_OPIVV, + OPC_VADD_VV = 0x57 | V_OPIVV, OPC_VSUB_VV = 0x8000057 | V_OPIVV, OPC_VAND_VV = 0x24000057 | V_OPIVV, @@ -317,6 +280,29 @@ typedef enum { OPC_VXOR_VV = 0x2c000057 | V_OPIVV, OPC_VXOR_VI = 0x2c000057 | V_OPIVI, + OPC_VMSEQ_VV = 0x60000057 | V_OPIVV, + OPC_VMSEQ_VI = 0x60000057 | V_OPIVI, + OPC_VMSEQ_VX = 0x60000057 | V_OPIVX, + OPC_VMSNE_VV = 0x64000057 | V_OPIVV, + OPC_VMSNE_VI = 0x64000057 | V_OPIVI, + OPC_VMSNE_VX = 0x64000057 | V_OPIVX, + + OPC_VMSLTU_VV = 0x68000057 | V_OPIVV, + OPC_VMSLTU_VX = 0x68000057 | V_OPIVX, + OPC_VMSLT_VV = 0x6c000057 | V_OPIVV, + OPC_VMSLT_VX = 0x6c000057 | V_OPIVX, + OPC_VMSLEU_VV = 0x70000057 | V_OPIVV, + OPC_VMSLEU_VX = 0x70000057 | V_OPIVX, + OPC_VMSLE_VV = 0x74000057 | V_OPIVV, + OPC_VMSLE_VX = 0x74000057 | V_OPIVX, + + OPC_VMSLEU_VI = 0x70000057 | V_OPIVI, + OPC_VMSLE_VI = 0x74000057 | V_OPIVI, + OPC_VMSGTU_VI = 0x78000057 | V_OPIVI, + OPC_VMSGTU_VX = 0x78000057 | V_OPIVX, + OPC_VMSGT_VI = 0x7c000057 | V_OPIVI, + OPC_VMSGT_VX = 0x7c000057 | V_OPIVX, + OPC_VMV_V_V = 0x5e000057 | V_OPIVV, OPC_VMV_V_I = 0x5e000057 | V_OPIVI, OPC_VMV_V_X = 0x5e000057 | V_OPIVX, @@ -324,6 +310,97 @@ typedef enum { OPC_VMVNR_V = 0x9e000057 | V_OPIVI, } RISCVInsn; +static const struct { + RISCVInsn op; + bool swap; +} tcg_cmpcond_to_rvv_vv[] = { + [TCG_COND_EQ] = { OPC_VMSEQ_VV, false }, + [TCG_COND_NE] = { OPC_VMSNE_VV, false }, + [TCG_COND_LT] = { OPC_VMSLT_VV, false }, + [TCG_COND_GE] = { OPC_VMSLE_VV, true }, + [TCG_COND_GT] = { OPC_VMSLT_VV, true }, + [TCG_COND_LE] = { OPC_VMSLE_VV, false }, + [TCG_COND_LTU] = { OPC_VMSLTU_VV, false }, + [TCG_COND_GEU] = { OPC_VMSLEU_VV, true }, + [TCG_COND_GTU] = { OPC_VMSLTU_VV, true }, + [TCG_COND_LEU] = { OPC_VMSLEU_VV, false } +}; + +static const struct { + RISCVInsn op; + int min; + int max; + bool adjust; +} tcg_cmpcond_to_rvv_vi[] = { + [TCG_COND_EQ] = { OPC_VMSEQ_VI, -16, 15, false }, + [TCG_COND_NE] = { OPC_VMSNE_VI, -16, 15, false }, + [TCG_COND_GT] = { OPC_VMSGT_VI, -16, 15, false }, + [TCG_COND_LE] = { OPC_VMSLE_VI, -16, 15, false }, + [TCG_COND_LT] = { OPC_VMSLE_VI, -15, 16, true }, + [TCG_COND_GE] = { OPC_VMSGT_VI, -15, 16, true }, + [TCG_COND_LEU] = { OPC_VMSLEU_VI, 0, 15, false }, + [TCG_COND_GTU] = { OPC_VMSGTU_VI, 0, 15, false }, + [TCG_COND_LTU] = { OPC_VMSLEU_VI, 1, 16, true }, + [TCG_COND_GEU] = { OPC_VMSGTU_VI, 1, 16, true }, +}; + +/* test if a constant matches the constraint */ +static bool tcg_target_const_match(int64_t val, int ct, + TCGType type, TCGCond cond, int vece) +{ + if (ct & TCG_CT_CONST) { + return 1; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } + /* + * Sign extended from 12 bits: [-0x800, 0x7ff]. + * Used for most arithmetic, as this is the isa field. + */ + if ((ct & TCG_CT_CONST_S12) && val >= -0x800 && val <= 0x7ff) { + return 1; + } + /* + * Sign extended from 12 bits, negated: [-0x7ff, 0x800]. + * Used for subtraction, where a constant must be handled by ADDI. + */ + if ((ct & TCG_CT_CONST_N12) && val >= -0x7ff && val <= 0x800) { + return 1; + } + /* + * Sign extended from 12 bits, +/- matching: [-0x7ff, 0x7ff]. + * Used by addsub2 and movcond, which may need the negative value, + * and requires the modified constant to be representable. + */ + if ((ct & TCG_CT_CONST_M12) && val >= -0x7ff && val <= 0x7ff) { + return 1; + } + /* + * Inverse of sign extended from 12 bits: ~[-0x800, 0x7ff]. + * Used to map ANDN back to ANDI, etc. + */ + if ((ct & TCG_CT_CONST_J12) && ~val >= -0x800 && ~val <= 0x7ff) { + return 1; + } + /* + * Sign extended from 5 bits: [-0x10, 0x0f]. + * Used for vector-immediate. + */ + if ((ct & TCG_CT_CONST_S5) && val >= -0x10 && val <= 0x0f) { + return 1; + } + /* + * Used for vector compare OPIVI instructions. + */ + if ((ct & TCG_CT_CONST_CMP_VI) && + val >= tcg_cmpcond_to_rvv_vi[cond].min && + val <= tcg_cmpcond_to_rvv_vi[cond].max) { + return true; + } + return 0; +} + /* * RISC-V immediate and instruction encoders (excludes 16-bit RVC) */ @@ -593,6 +670,18 @@ static void tcg_out_opc_vi(TCGContext *s, RISCVInsn opc, TCGReg vd, tcg_out32(s, encode_vi(opc, vd, imm, vs2, vm)); } +static void tcg_out_opc_vim_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, + TCGReg vs2, int32_t imm) +{ + tcg_out32(s, encode_vi(opc, vd, imm, vs2, false)); +} + +static void tcg_out_opc_vvm_mask(TCGContext *s, RISCVInsn opc, TCGReg vd, + TCGReg vs2, TCGReg vs1) +{ + tcg_out32(s, encode_v(opc, vd, vs1, vs2, false)); +} + /* * Only unit-stride addressing implemented; may extend in future. */ @@ -1430,6 +1519,49 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn, } } +static void tcg_out_cmpsel(TCGContext *s, TCGType type, unsigned vece, + TCGCond cond, TCGReg ret, + TCGReg cmp1, TCGReg cmp2, bool c_cmp2, + TCGReg val1, bool c_val1, + TCGReg val2, bool c_val2) +{ + set_vtype_len_sew(s, type, vece); + + /* Use only vmerge_vim if possible, by inverting the test. */ + if (c_val2 && !c_val1) { + TCGArg temp = val1; + cond = tcg_invert_cond(cond); + val1 = val2; + val2 = temp; + c_val1 = true; + c_val2 = false; + } + + /* Perform the comparison into V0 mask. */ + if (c_cmp2) { + tcg_out_opc_vi(s, tcg_cmpcond_to_rvv_vi[cond].op, + TCG_REG_V0, cmp1, + cmp2 - tcg_cmpcond_to_rvv_vi[cond].adjust, true); + } else if (tcg_cmpcond_to_rvv_vv[cond].swap) { + tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, + TCG_REG_V0, cmp2, cmp1, true); + } else { + tcg_out_opc_vv(s, tcg_cmpcond_to_rvv_vv[cond].op, + TCG_REG_V0, cmp1, cmp2, true); + } + if (c_val1) { + if (c_val2) { + tcg_out_opc_vi(s, OPC_VMV_V_I, ret, TCG_REG_V0, val2, true); + val2 = ret; + } + /* vd[i] == v0.mask[i] ? imm : vs2[i] */ + tcg_out_opc_vim_mask(s, OPC_VMERGE_VIM, ret, val2, val1); + } else { + /* vd[i] == v0.mask[i] ? vs1[i] : vs2[i] */ + tcg_out_opc_vvm_mask(s, OPC_VMERGE_VVM, ret, val2, val1); + } +} + static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) { TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; @@ -2253,6 +2385,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, set_vtype_len(s, type); tcg_out_opc_vi(s, OPC_VXOR_VI, a0, a1, -1, true); break; + case INDEX_op_cmpsel_vec: + tcg_out_cmpsel(s, type, vece, args[5], a0, a1, a2, const_args[2], + args[3], const_args[3], args[4], const_args[4]); + break; case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ default: @@ -2263,7 +2399,21 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, TCGArg a0, ...) { + va_list va; + TCGArg a1, a2, a3; + + va_start(va, a0); + a1 = va_arg(va, TCGArg); + a2 = va_arg(va, TCGArg); + va_end(va); + switch (opc) { + case INDEX_op_cmp_vec: + a3 = va_arg(va, TCGArg); + vec_gen_6(INDEX_op_cmpsel_vec, type, vece, a0, a1, a2, + tcgv_i64_arg(tcg_constant_i64(-1)), + tcgv_i64_arg(tcg_constant_i64(0)), a3); + break; default: g_assert_not_reached(); } @@ -2278,7 +2428,10 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_or_vec: case INDEX_op_xor_vec: case INDEX_op_not_vec: + case INDEX_op_cmpsel_vec: return 1; + case INDEX_op_cmp_vec: + return -1; default: return 0; } @@ -2437,6 +2590,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_or_vec: case INDEX_op_xor_vec: return C_O1_I2(v, v, v); + case INDEX_op_cmp_vec: + return C_O1_I2(v, v, vL); + case INDEX_op_cmpsel_vec: + return C_O1_I4(v, v, vL, vK, vK); default: g_assert_not_reached(); } diff --git a/tcg/riscv/tcg-target.h b/tcg/riscv/tcg-target.h index acb8dfdf16..94034504b2 100644 --- a/tcg/riscv/tcg-target.h +++ b/tcg/riscv/tcg-target.h @@ -164,7 +164,7 @@ typedef enum { #define TCG_TARGET_HAS_sat_vec 0 #define TCG_TARGET_HAS_minmax_vec 0 #define TCG_TARGET_HAS_bitsel_vec 0 -#define TCG_TARGET_HAS_cmpsel_vec 0 +#define TCG_TARGET_HAS_cmpsel_vec 1 #define TCG_TARGET_HAS_tst_vec 0 -- 2.43.0