Re: [PATCH v3 05/16] tcg/loongarch64: Lower add/sub_vec to vadd/vsub
On 9/1/23 22:02, Jiajie Chen wrote: Lower the following ops: - add_vec - sub_vec Signed-off-by: Jiajie Chen --- tcg/loongarch64/tcg-target-con-set.h | 1 + tcg/loongarch64/tcg-target-con-str.h | 1 + tcg/loongarch64/tcg-target.c.inc | 60 3 files changed, 62 insertions(+) diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h index 8c8ea5d919..2d5dce75c3 100644 --- a/tcg/loongarch64/tcg-target-con-set.h +++ b/tcg/loongarch64/tcg-target-con-set.h @@ -32,4 +32,5 @@ C_O1_I2(r, rZ, ri) C_O1_I2(r, rZ, rJ) C_O1_I2(r, rZ, rZ) C_O1_I2(w, w, wM) +C_O1_I2(w, w, wA) C_O1_I4(r, rZ, rJ, rZ, rZ) diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h index a8a1c44014..2ba9c135ac 100644 --- a/tcg/loongarch64/tcg-target-con-str.h +++ b/tcg/loongarch64/tcg-target-con-str.h @@ -27,3 +27,4 @@ CONST('Z', TCG_CT_CONST_ZERO) CONST('C', TCG_CT_CONST_C12) CONST('W', TCG_CT_CONST_WSZ) CONST('M', TCG_CT_CONST_VCMP) +CONST('A', TCG_CT_CONST_VADD) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 129dd92910..0edcf5be35 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -177,6 +177,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_C12 0x1000 #define TCG_CT_CONST_WSZ 0x2000 #define TCG_CT_CONST_VCMP 0x4000 +#define TCG_CT_CONST_VADD 0x8000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_VECTOR_REGSMAKE_64BIT_MASK(32, 32) @@ -214,6 +215,9 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { return true; } +if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { +return true; +} return false; } @@ -1646,6 +1650,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU}, }; LoongArchInsn insn; +static const LoongArchInsn add_vec_insn[4] = { +OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D +}; +static const LoongArchInsn add_vec_imm_insn[4] = { +OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU +}; +static const LoongArchInsn sub_vec_insn[4] = { +OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D +}; +static const LoongArchInsn sub_vec_imm_insn[4] = { +OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU +}; a0 = args[0]; a1 = args[1]; @@ -1712,6 +1728,44 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, } tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); break; +case INDEX_op_add_vec: +if (const_args[2]) { +int64_t value = sextract64(a2, 0, 8 << vece); +/* Try vaddi/vsubi */ +if (0 <= value && value <= 0x1f) { +tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ + a1, value)); +break; +} else if (-0x1f <= value && value < 0) { +tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ + a1, -value)); +break; +} + +/* constraint TCG_CT_CONST_VADD ensures unreachable */ +g_assert_not_reached(); +} +tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); +break; +case INDEX_op_sub_vec: +if (const_args[2]) { +int64_t value = sextract64(a2, 0, 8 << vece); +/* Try vaddi/vsubi */ +if (0 <= value && value <= 0x1f) { +tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ + a1, value)); +break; +} else if (-0x1f <= value && value < 0) { +tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ + a1, -value)); +break; +} + +/* constraint TCG_CT_CONST_VADD ensures unreachable */ +g_assert_not_reached(); +} +tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); It would be nice to share code here. Perhaps case INDEX_op_sub_vec: if (!const_args[2]) { tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); break; } a2 = -a2; goto do_addi_vec; case INDEX_op_add_vec: if (!const_args[2]) { tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); break; } do_addi_vec: ... or a helper function. Otherwise, Reviewed-by: Richard Henderson r~
[PATCH v3 05/16] tcg/loongarch64: Lower add/sub_vec to vadd/vsub
Lower the following ops: - add_vec - sub_vec Signed-off-by: Jiajie Chen --- tcg/loongarch64/tcg-target-con-set.h | 1 + tcg/loongarch64/tcg-target-con-str.h | 1 + tcg/loongarch64/tcg-target.c.inc | 60 3 files changed, 62 insertions(+) diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h index 8c8ea5d919..2d5dce75c3 100644 --- a/tcg/loongarch64/tcg-target-con-set.h +++ b/tcg/loongarch64/tcg-target-con-set.h @@ -32,4 +32,5 @@ C_O1_I2(r, rZ, ri) C_O1_I2(r, rZ, rJ) C_O1_I2(r, rZ, rZ) C_O1_I2(w, w, wM) +C_O1_I2(w, w, wA) C_O1_I4(r, rZ, rJ, rZ, rZ) diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h index a8a1c44014..2ba9c135ac 100644 --- a/tcg/loongarch64/tcg-target-con-str.h +++ b/tcg/loongarch64/tcg-target-con-str.h @@ -27,3 +27,4 @@ CONST('Z', TCG_CT_CONST_ZERO) CONST('C', TCG_CT_CONST_C12) CONST('W', TCG_CT_CONST_WSZ) CONST('M', TCG_CT_CONST_VCMP) +CONST('A', TCG_CT_CONST_VADD) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc index 129dd92910..0edcf5be35 100644 --- a/tcg/loongarch64/tcg-target.c.inc +++ b/tcg/loongarch64/tcg-target.c.inc @@ -177,6 +177,7 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) #define TCG_CT_CONST_C12 0x1000 #define TCG_CT_CONST_WSZ 0x2000 #define TCG_CT_CONST_VCMP 0x4000 +#define TCG_CT_CONST_VADD 0x8000 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) #define ALL_VECTOR_REGSMAKE_64BIT_MASK(32, 32) @@ -214,6 +215,9 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece) if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) { return true; } +if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) { +return true; +} return false; } @@ -1646,6 +1650,18 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU}, }; LoongArchInsn insn; +static const LoongArchInsn add_vec_insn[4] = { +OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D +}; +static const LoongArchInsn add_vec_imm_insn[4] = { +OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU +}; +static const LoongArchInsn sub_vec_insn[4] = { +OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D +}; +static const LoongArchInsn sub_vec_imm_insn[4] = { +OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU +}; a0 = args[0]; a1 = args[1]; @@ -1712,6 +1728,44 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, } tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2)); break; +case INDEX_op_add_vec: +if (const_args[2]) { +int64_t value = sextract64(a2, 0, 8 << vece); +/* Try vaddi/vsubi */ +if (0 <= value && value <= 0x1f) { +tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ + a1, value)); +break; +} else if (-0x1f <= value && value < 0) { +tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ + a1, -value)); +break; +} + +/* constraint TCG_CT_CONST_VADD ensures unreachable */ +g_assert_not_reached(); +} +tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2)); +break; +case INDEX_op_sub_vec: +if (const_args[2]) { +int64_t value = sextract64(a2, 0, 8 << vece); +/* Try vaddi/vsubi */ +if (0 <= value && value <= 0x1f) { +tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \ + a1, value)); +break; +} else if (-0x1f <= value && value < 0) { +tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \ + a1, -value)); +break; +} + +/* constraint TCG_CT_CONST_VADD ensures unreachable */ +g_assert_not_reached(); +} +tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2)); +break; case INDEX_op_dupm_vec: tcg_out_dupm_vec(s, type, vece, a0, a1, a2); break; @@ -1728,6 +1782,8 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) case INDEX_op_dup_vec: case INDEX_op_dupm_vec: case INDEX_op_cmp_vec: +case INDEX_op_add_vec: +case INDEX_op_sub_vec: return 1; default: return 0; @@ -1892,6 +1948,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_cmp_vec: return C_O1_I2(w, w, wM); +case INDEX_op_add_vec: +case INDEX_op_sub_vec: +