These helpers allow us to move AVR register values to/from the specified
TCGv_i64
argument.
To prevent VMX helpers accessing the cpu_avr{l,h} arrays directly, add extra TCG
temporaries as required.
Signed-off-by: Mark Cave-Ayland
---
target/ppc/translate.c | 10 +++
target/ppc/translate/vmx-impl.inc.c | 130
2 files changed, 111 insertions(+), 29 deletions(-)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 1d4bf624a3..fa3e8dc114 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -6704,6 +6704,16 @@ static inline void set_fpr(int regno, TCGv_i64 src)
tcg_gen_mov_i64(cpu_fpr[regno], src);
}
+static inline void get_avr64(TCGv_i64 dst, int regno, bool high)
+{
+tcg_gen_mov_i64(dst, (high ? cpu_avrh : cpu_avrl)[regno]);
+}
+
+static inline void set_avr64(int regno, TCGv_i64 src, bool high)
+{
+tcg_gen_mov_i64((high ? cpu_avrh : cpu_avrl)[regno], src);
+}
+
#include "translate/fp-impl.inc.c"
#include "translate/vmx-impl.inc.c"
diff --git a/target/ppc/translate/vmx-impl.inc.c
b/target/ppc/translate/vmx-impl.inc.c
index 3cb6fc2926..30046c6e31 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -18,52 +18,66 @@ static inline TCGv_ptr gen_avr_ptr(int reg)
static void glue(gen_, name)(DisasContext *ctx)
\
{ \
TCGv EA; \
+TCGv_i64 avr; \
if (unlikely(!ctx->altivec_enabled)) {\
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
gen_set_access_type(ctx, ACCESS_INT); \
+avr = tcg_temp_new_i64(); \
EA = tcg_temp_new(); \
gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf);\
/* We only need to swap high and low halves. gen_qemu_ld64_i64 does \
necessary 64-bit byteswap already. */ \
if (ctx->le_mode) { \
-gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA);\
+gen_qemu_ld64_i64(ctx, avr, EA); \
+set_avr64(rD(ctx->opcode), avr, false); \
tcg_gen_addi_tl(EA, EA, 8); \
-gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA);\
+gen_qemu_ld64_i64(ctx, avr, EA); \
+set_avr64(rD(ctx->opcode), avr, true);\
} else { \
-gen_qemu_ld64_i64(ctx, cpu_avrh[rD(ctx->opcode)], EA);\
+gen_qemu_ld64_i64(ctx, avr, EA); \
+set_avr64(rD(ctx->opcode), avr, true);\
tcg_gen_addi_tl(EA, EA, 8); \
-gen_qemu_ld64_i64(ctx, cpu_avrl[rD(ctx->opcode)], EA);\
+gen_qemu_ld64_i64(ctx, avr, EA); \
+set_avr64(rD(ctx->opcode), avr, false); \
} \
tcg_temp_free(EA);\
+tcg_temp_free_i64(avr); \
}
#define GEN_VR_STX(name, opc2, opc3) \
static void gen_st##name(DisasContext *ctx) \
{ \
TCGv EA; \
+TCGv_i64 avr; \
if (unlikely(!ctx->altivec_enabled)) {\
gen_exception(ctx, POWERPC_EXCP_VPU); \
return; \
} \
gen_set_access_type(ctx, ACCESS_INT); \
+avr = tcg_temp_new_i64(); \
EA =