Since I found this two instructions implemented with tcg, I refactored
them so they are consistent with other similar implementations that
I introduced in this patch.
Also, a new dual macro GEN_VXFORM_TRANS_DUAL is added. This macro is
used if one instruction is realized with direct translation, and second
one with a helper.
Signed-off-by: Stefan Brankovic
---
target/ppc/translate/vmx-impl.inc.c | 66 +
1 file changed, 37 insertions(+), 29 deletions(-)
diff --git a/target/ppc/translate/vmx-impl.inc.c
b/target/ppc/translate/vmx-impl.inc.c
index e8a0fb6..6af9c73 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -350,6 +350,28 @@ static void glue(gen_, name0##_##name1)(DisasContext *ctx)
\
} \
}
+/*
+ * We use this macro if one instruction is realized with direct
+ * translation, and second one with helper.
+ */
+#define GEN_VXFORM_TRANS_DUAL(name0, flg0, flg2_0, name1, flg1, flg2_1)\
+static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
+{ \
+if ((Rc(ctx->opcode) == 0) && \
+((ctx->insns_flags & flg0) || (ctx->insns_flags2 & flg2_0))) { \
+if (unlikely(!ctx->altivec_enabled)) { \
+gen_exception(ctx, POWERPC_EXCP_VPU); \
+return;\
+} \
+trans_##name0(ctx);\
+} else if ((Rc(ctx->opcode) == 1) && \
+((ctx->insns_flags & flg1) || (ctx->insns_flags2 & flg2_1))) { \
+gen_##name1(ctx); \
+} else { \
+gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL);\
+} \
+}
+
/* Adds support to provide invalid mask */
#define GEN_VXFORM_DUAL_EXT(name0, flg0, flg2_0, inval0,\
name1, flg1, flg2_1, inval1)\
@@ -431,20 +453,13 @@ GEN_VXFORM(vmrglb, 6, 4);
GEN_VXFORM(vmrglh, 6, 5);
GEN_VXFORM(vmrglw, 6, 6);
-static void gen_vmrgew(DisasContext *ctx)
+static void trans_vmrgew(DisasContext *ctx)
{
-TCGv_i64 tmp;
-TCGv_i64 avr;
-int VT, VA, VB;
-if (unlikely(!ctx->altivec_enabled)) {
-gen_exception(ctx, POWERPC_EXCP_VPU);
-return;
-}
-VT = rD(ctx->opcode);
-VA = rA(ctx->opcode);
-VB = rB(ctx->opcode);
-tmp = tcg_temp_new_i64();
-avr = tcg_temp_new_i64();
+int VT = rD(ctx->opcode);
+int VA = rA(ctx->opcode);
+int VB = rB(ctx->opcode);
+TCGv_i64 tmp = tcg_temp_new_i64();
+TCGv_i64 avr = tcg_temp_new_i64();
get_avr64(avr, VB, true);
tcg_gen_shri_i64(tmp, avr, 32);
@@ -462,21 +477,14 @@ static void gen_vmrgew(DisasContext *ctx)
tcg_temp_free_i64(avr);
}
-static void gen_vmrgow(DisasContext *ctx)
+static void trans_vmrgow(DisasContext *ctx)
{
-TCGv_i64 t0, t1;
-TCGv_i64 avr;
-int VT, VA, VB;
-if (unlikely(!ctx->altivec_enabled)) {
-gen_exception(ctx, POWERPC_EXCP_VPU);
-return;
-}
-VT = rD(ctx->opcode);
-VA = rA(ctx->opcode);
-VB = rB(ctx->opcode);
-t0 = tcg_temp_new_i64();
-t1 = tcg_temp_new_i64();
-avr = tcg_temp_new_i64();
+int VT = rD(ctx->opcode);
+int VA = rA(ctx->opcode);
+int VB = rB(ctx->opcode);
+TCGv_i64 t0 = tcg_temp_new_i64();
+TCGv_i64 t1 = tcg_temp_new_i64();
+TCGv_i64 avr = tcg_temp_new_i64();
get_avr64(t0, VB, true);
get_avr64(t1, VA, true);
@@ -1165,14 +1173,14 @@ GEN_VXFORM_ENV(vminfp, 5, 17);
GEN_VXFORM_HETRO(vextublx, 6, 24)
GEN_VXFORM_HETRO(vextuhlx, 6, 25)
GEN_VXFORM_HETRO(vextuwlx, 6, 26)
-GEN_VXFORM_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
+GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207,
vextuwlx, PPC_NONE, PPC2_ISA300)
GEN_VXFORM_HETRO(vextubrx, 6, 28)
GEN_VXFORM_HETRO(vextuhrx, 6, 29)
GEN_VXFORM_HETRO(vextuwrx, 6, 30)
GEN_VXFORM_TRANS(lvsl, 6, 31)
GEN_VXFORM_TRANS(lvsr, 6, 32)
-GEN_VXFORM_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, \
+GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207,
vextuwrx, PPC_NONE, PPC2_ISA300)
#define GEN_VXRFORM1(opname, name, str, opc2, opc3) \
--
2.7.4