Move the body out of this large macro. Use tcg_constant_i64. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- target/ppc/translate/vmx-impl.c.inc | 95 +++++++++++++++-------------- 1 file changed, 49 insertions(+), 46 deletions(-)
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 05ba9c9492..ee656d6a44 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -171,53 +171,56 @@ static void gen_mtvscr(DisasContext *ctx) gen_helper_mtvscr(cpu_env, val); } +static void gen_vx_vmul10(DisasContext *ctx, bool add_cin, bool ret_carry) +{ + TCGv_i64 t0; + TCGv_i64 t1; + TCGv_i64 t2; + TCGv_i64 avr; + TCGv_i64 ten, z; + + if (unlikely(!ctx->altivec_enabled)) { + gen_exception(ctx, POWERPC_EXCP_VPU); + return; + } + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + avr = tcg_temp_new_i64(); + ten = tcg_constant_i64(10); + z = tcg_constant_i64(0); + + if (add_cin) { + get_avr64(avr, rA(ctx->opcode), false); + tcg_gen_mulu2_i64(t0, t1, avr, ten); + get_avr64(avr, rB(ctx->opcode), false); + tcg_gen_andi_i64(t2, avr, 0xF); + tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); + set_avr64(rD(ctx->opcode), avr, false); + } else { + get_avr64(avr, rA(ctx->opcode), false); + tcg_gen_mulu2_i64(avr, t2, avr, ten); + set_avr64(rD(ctx->opcode), avr, false); + } + + if (ret_carry) { + get_avr64(avr, rA(ctx->opcode), true); + tcg_gen_mulu2_i64(t0, t1, avr, ten); + tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); + set_avr64(rD(ctx->opcode), avr, false); + set_avr64(rD(ctx->opcode), z, true); + } else { + get_avr64(avr, rA(ctx->opcode), true); + tcg_gen_mul_i64(t0, avr, ten); + tcg_gen_add_i64(avr, t0, t2); + set_avr64(rD(ctx->opcode), avr, true); + } +} + #define GEN_VX_VMUL10(name, add_cin, ret_carry) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - TCGv_i64 t2; \ - TCGv_i64 avr; \ - TCGv_i64 ten, z; \ - \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - t2 = tcg_temp_new_i64(); \ - avr = tcg_temp_new_i64(); \ - ten = tcg_const_i64(10); \ - z = tcg_const_i64(0); \ - \ - if (add_cin) { \ - get_avr64(avr, rA(ctx->opcode), false); \ - tcg_gen_mulu2_i64(t0, t1, avr, ten); \ - get_avr64(avr, rB(ctx->opcode), false); \ - tcg_gen_andi_i64(t2, avr, 0xF); \ - tcg_gen_add2_i64(avr, t2, t0, t1, t2, z); \ - set_avr64(rD(ctx->opcode), avr, false); \ - } else { \ - get_avr64(avr, rA(ctx->opcode), false); \ - tcg_gen_mulu2_i64(avr, t2, avr, ten); \ - set_avr64(rD(ctx->opcode), avr, false); \ - } \ - \ - if (ret_carry) { \ - get_avr64(avr, rA(ctx->opcode), true); \ - tcg_gen_mulu2_i64(t0, t1, avr, ten); \ - tcg_gen_add2_i64(t0, avr, t0, t1, t2, z); \ - set_avr64(rD(ctx->opcode), avr, false); \ - set_avr64(rD(ctx->opcode), z, true); \ - } else { \ - get_avr64(avr, rA(ctx->opcode), true); \ - tcg_gen_mul_i64(t0, avr, ten); \ - tcg_gen_add_i64(avr, t0, t2); \ - set_avr64(rD(ctx->opcode), avr, true); \ - } \ -} \ + static void glue(gen_, name)(DisasContext *ctx) \ + { gen_vx_vmul10(ctx, add_cin, ret_carry); } GEN_VX_VMUL10(vmul10uq, 0, 0); GEN_VX_VMUL10(vmul10euq, 1, 0); -- 2.34.1