Re: [Qemu-devel] [RFC PATCH 3/6] target/ppc: introduce get_cpu_vsr{l, h}() and set_cpu_vsr{l, h}() helpers for VSR register access

2018-12-11 Thread Mark Cave-Ayland
On 10/12/2018 19:16, Richard Henderson wrote:

> On 12/7/18 2:56 AM, Mark Cave-Ayland wrote:
>> +static inline void get_vsr(TCGv_i64 dst, int n)
>> +{
>> +tcg_gen_ld_i64(dst, cpu_env, offsetof(CPUPPCState, vsr[n]));
>> +}
>> +
>> +static inline void set_vsr(int n, TCGv_i64 src)
>> +{
>> +tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState, vsr[n]));
>> +}
> 
> Why isn't this helper still using cpu_vsr[n]?

Gah this is just a mistake from squashing patches from my working branch to 
tidy them
up for submission. I'll fix this in the next iteration.


ATB,

Mark.



Re: [Qemu-devel] [RFC PATCH 3/6] target/ppc: introduce get_cpu_vsr{l, h}() and set_cpu_vsr{l, h}() helpers for VSR register access

2018-12-10 Thread Richard Henderson
On 12/7/18 2:56 AM, Mark Cave-Ayland wrote:
> +static inline void get_vsr(TCGv_i64 dst, int n)
> +{
> +tcg_gen_ld_i64(dst, cpu_env, offsetof(CPUPPCState, vsr[n]));
> +}
> +
> +static inline void set_vsr(int n, TCGv_i64 src)
> +{
> +tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState, vsr[n]));
> +}

Why isn't this helper still using cpu_vsr[n]?


r~



[Qemu-devel] [RFC PATCH 3/6] target/ppc: introduce get_cpu_vsr{l, h}() and set_cpu_vsr{l, h}() helpers for VSR register access

2018-12-07 Thread Mark Cave-Ayland
These helpers allow us to move VSR register values to/from the specified 
TCGv_i64
argument.

To prevent VSX helpers accessing the cpu_vsr array directly, add extra TCG
temporaries as required.

Signed-off-by: Mark Cave-Ayland 
---
 target/ppc/translate/vsx-impl.inc.c | 782 ++--
 1 file changed, 561 insertions(+), 221 deletions(-)

diff --git a/target/ppc/translate/vsx-impl.inc.c 
b/target/ppc/translate/vsx-impl.inc.c
index 85ed135d44..20e1fd9324 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -1,20 +1,48 @@
 /***   VSX extension   ***/
 
-static inline TCGv_i64 cpu_vsrh(int n)
+static inline void get_vsr(TCGv_i64 dst, int n)
+{
+tcg_gen_ld_i64(dst, cpu_env, offsetof(CPUPPCState, vsr[n]));
+}
+
+static inline void set_vsr(int n, TCGv_i64 src)
+{
+tcg_gen_st_i64(src, cpu_env, offsetof(CPUPPCState, vsr[n]));
+}
+
+static inline void get_cpu_vsrh(TCGv_i64 dst, int n)
+{
+if (n < 32) {
+get_fpr(dst, n);
+} else {
+get_avr64(dst, n - 32, true);
+}
+}
+
+static inline void get_cpu_vsrl(TCGv_i64 dst, int n)
 {
 if (n < 32) {
-return cpu_fpr[n];
+get_vsr(dst, n);
 } else {
-return cpu_avrh[n-32];
+get_avr64(dst, n - 32, false);
 }
 }
 
-static inline TCGv_i64 cpu_vsrl(int n)
+static inline void set_cpu_vsrh(int n, TCGv_i64 src)
 {
 if (n < 32) {
-return cpu_vsr[n];
+set_fpr(n, src);
 } else {
-return cpu_avrl[n-32];
+set_avr64(n - 32, src, true);
+}
+}
+
+static inline void set_cpu_vsrl(int n, TCGv_i64 src)
+{
+if (n < 32) {
+set_vsr(n, src);
+} else {
+set_avr64(n - 32, src, false);
 }
 }
 
@@ -22,16 +50,20 @@ static inline TCGv_i64 cpu_vsrl(int n)
 static void gen_##name(DisasContext *ctx) \
 { \
 TCGv EA;  \
+TCGv_i64 t0;  \
 if (unlikely(!ctx->vsx_enabled)) {\
 gen_exception(ctx, POWERPC_EXCP_VSXU);\
 return;   \
 } \
+t0 = tcg_temp_new_i64();  \
 gen_set_access_type(ctx, ACCESS_INT); \
 EA = tcg_temp_new();  \
 gen_addr_reg_index(ctx, EA);  \
-gen_qemu_##operation(ctx, cpu_vsrh(xT(ctx->opcode)), EA); \
+gen_qemu_##operation(ctx, t0, EA);\
+set_cpu_vsrh(xT(ctx->opcode), t0);\
 /* NOTE: cpu_vsrl is undefined */ \
 tcg_temp_free(EA);\
+tcg_temp_free_i64(t0);\
 }
 
 VSX_LOAD_SCALAR(lxsdx, ld64_i64)
@@ -44,39 +76,54 @@ VSX_LOAD_SCALAR(lxsspx, ld32fs)
 static void gen_lxvd2x(DisasContext *ctx)
 {
 TCGv EA;
+TCGv_i64 t0;
 if (unlikely(!ctx->vsx_enabled)) {
 gen_exception(ctx, POWERPC_EXCP_VSXU);
 return;
 }
+t0 = tcg_temp_new_i64();
 gen_set_access_type(ctx, ACCESS_INT);
 EA = tcg_temp_new();
 gen_addr_reg_index(ctx, EA);
-gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
+gen_qemu_ld64_i64(ctx, t0, EA);
+set_cpu_vsrh(xT(ctx->opcode), t0);
 tcg_gen_addi_tl(EA, EA, 8);
-gen_qemu_ld64_i64(ctx, cpu_vsrl(xT(ctx->opcode)), EA);
+gen_qemu_ld64_i64(ctx, t0, EA);
+set_cpu_vsrl(xT(ctx->opcode), t0);
 tcg_temp_free(EA);
+tcg_temp_free_i64(t0);
 }
 
 static void gen_lxvdsx(DisasContext *ctx)
 {
 TCGv EA;
+TCGv_i64 t0;
+TCGv_i64 t1;
 if (unlikely(!ctx->vsx_enabled)) {
 gen_exception(ctx, POWERPC_EXCP_VSXU);
 return;
 }
+t0 = tcg_temp_new_i64();
+t1 = tcg_temp_new_i64();
 gen_set_access_type(ctx, ACCESS_INT);
 EA = tcg_temp_new();
 gen_addr_reg_index(ctx, EA);
-gen_qemu_ld64_i64(ctx, cpu_vsrh(xT(ctx->opcode)), EA);
-tcg_gen_mov_i64(cpu_vsrl(xT(ctx->opcode)), cpu_vsrh(xT(ctx->opcode)));
+gen_qemu_ld64_i64(ctx, t0, EA);
+set_cpu_vsrh(xT(ctx->opcode), t0);
+tcg_gen_mov_i64(t1, t0);
+set_cpu_vsrl(xT(ctx->opcode), t1);
 tcg_temp_free(EA);
+tcg_temp_free_i64(t0);
+tcg_temp_free_i64(t1);
 }
 
 static void gen_lxvw4x(DisasContext *ctx)
 {
 TCGv EA;
-TCGv_i64 xth = cpu_vsrh(xT(ctx->opcode));
-TCGv_i64 xtl = cpu_vsrl(xT(ctx->opcode));
+TCGv_i64 xth = tcg_temp_new_i64();
+TCGv_i64 xtl = tcg_temp_new_i64();
+get_cpu_vsrh(xth, xT(ctx->opcode));
+get_cpu_vsrh(xtl, xT(ctx->opcode));
 if (unlikely(!ctx->vsx_enabled)) {
 gen_exception(ctx, POWERPC_EXCP_VSXU);
 return;
@@ -104,6 +151,8 @@ static void