The resulting aarch64 translation is a bit cleaner.
Sign-extending from 32-bits is simpler than having
to use setcond to narrow from 64-bits.

Signed-off-by: Richard Henderson <r...@twiddle.net>
---
 target-arm/cpu.h           |  21 ++--
 target-arm/helper.c        |  18 ++-
 target-arm/translate-a64.c | 297 ++++++++++++++++++---------------------------
 target-arm/translate.c     |  26 +++-
 4 files changed, 163 insertions(+), 199 deletions(-)

diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 11845a6..74835f4 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -167,10 +167,10 @@ typedef struct CPUARMState {
     uint32_t fiq_regs[5];
 
     /* cpsr flag cache for faster execution */
-    uint32_t CF; /* 0 or 1 */
-    uint32_t VF; /* V is the bit 31. All other bits are undefined */
-    uint32_t NF; /* N is bit 31. All other bits are undefined.  */
-    uint32_t ZF; /* Z set if zero.  */
+    uint64_t CF; /* 0 or 1 */
+    uint64_t VF; /* V is the bit 63. All other bits are undefined */
+    uint64_t NF; /* N is bit 63. All other bits are undefined.  */
+    uint64_t ZF; /* Z set if zero.  */
     uint32_t QF; /* 0 or 1 */
     uint32_t GE; /* cpsr[19:16] */
     uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
@@ -666,20 +666,21 @@ static inline unsigned int aarch64_pstate_mode(unsigned 
int el, bool handler)
  */
 static inline uint32_t pstate_read(CPUARMState *env)
 {
-    int ZF;
+    unsigned ZF = (env->ZF == 0);
+    unsigned NF = ((int64_t)env->NF < 0);
+    unsigned VF = ((int64_t)env->VF < 0);
+    unsigned CF = env->CF;
 
-    ZF = (env->ZF == 0);
-    return (env->NF & 0x80000000) | (ZF << 30)
-        | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
+    return (NF << 31) | (ZF << 30) | (CF << 29) | (VF << 28)
         | env->pstate | env->daif;
 }
 
 static inline void pstate_write(CPUARMState *env, uint32_t val)
 {
+    env->NF = (uint64_t)val << 32;
     env->ZF = (~val) & PSTATE_Z;
-    env->NF = val;
     env->CF = (val >> 29) & 1;
-    env->VF = (val << 3) & 0x80000000;
+    env->VF = (uint64_t)val << (32 + (31 - 28));
     env->daif = val & PSTATE_DAIF;
     env->pstate = val & ~CACHED_PSTATE_BITS;
 }
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 3bc20af..1b28108 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -3876,10 +3876,13 @@ static int bad_mode_switch(CPUARMState *env, int mode)
 
 uint32_t cpsr_read(CPUARMState *env)
 {
-    int ZF;
-    ZF = (env->ZF == 0);
-    return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
-        (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
+    unsigned ZF = (env->ZF == 0);
+    unsigned NF = ((int64_t)env->NF < 0);
+    unsigned VF = ((int64_t)env->VF < 0);
+    unsigned CF = env->CF;
+
+    return env->uncached_cpsr | (NF << 31) | (ZF << 30)
+        | (CF << 29) | (VF << 28) | (env->QF << 27)
         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
         | ((env->condexec_bits & 0xfc) << 8)
         | (env->GE << 16) | (env->daif & CPSR_AIF);
@@ -3890,10 +3893,10 @@ void cpsr_write(CPUARMState *env, uint32_t val, 
uint32_t mask)
     uint32_t changed_daif;
 
     if (mask & CPSR_NZCV) {
+        env->NF = (uint64_t)val << 32;
         env->ZF = (~val) & CPSR_Z;
-        env->NF = val;
         env->CF = (val >> 29) & 1;
-        env->VF = (val << 3) & 0x80000000;
+        env->VF = (uint64_t)val << (32 + (31 - 28));
     }
     if (mask & CPSR_Q)
         env->QF = ((val & CPSR_Q) != 0);
@@ -4545,6 +4548,9 @@ void aarch64_sync_64_to_32(CPUARMState *env)
         env->regs[i] = env->xregs[i];
     }
 
+    /* Need to compress Z into the low bits.  */
+    env->ZF = (env->ZF != 0);
+
     /* Unless we are in FIQ mode, r8-r12 come from the user registers x8-x12.
      * Otherwise, we copy x8-x12 into the banked user regs.
      */
diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c
index dbca12a..763bf35 100644
--- a/target-arm/translate-a64.c
+++ b/target-arm/translate-a64.c
@@ -39,7 +39,7 @@
 
 static TCGv_i64 cpu_X[32];
 static TCGv_i64 cpu_pc;
-static TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
+static TCGv_i64 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
 
 /* Load/store exclusive handling */
 static TCGv_i64 cpu_exclusive_addr;
@@ -104,10 +104,10 @@ void a64_translate_init(void)
                                           regnames[i]);
     }
 
-    cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), 
"NF");
-    cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), 
"ZF");
-    cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), 
"CF");
-    cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), 
"VF");
+    cpu_NF = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, NF), 
"NF");
+    cpu_ZF = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, ZF), 
"ZF");
+    cpu_CF = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, CF), 
"CF");
+    cpu_VF = tcg_global_mem_new_i64(TCG_AREG0, offsetof(CPUARMState, VF), 
"VF");
 
     cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
         offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
@@ -515,78 +515,55 @@ static TCGv_ptr get_fpstatus_ptr(void)
     return statusptr;
 }
 
-/* Set ZF and NF based on a 64 bit result. This is alas fiddlier
- * than the 32 bit equivalent.
- */
-static inline void gen_set_NZ64(TCGv_i64 result)
-{
-    TCGv_i64 flag = tcg_temp_new_i64();
-
-    tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0);
-    tcg_gen_trunc_i64_i32(cpu_ZF, flag);
-    tcg_gen_shri_i64(flag, result, 32);
-    tcg_gen_trunc_i64_i32(cpu_NF, flag);
-    tcg_temp_free_i64(flag);
-}
-
 /* Set NZCV as for a logical operation: NZ as per result, CV cleared. */
 static inline void gen_logic_CC(int sf, TCGv_i64 result)
 {
     if (sf) {
-        gen_set_NZ64(result);
+        tcg_gen_mov_i64(cpu_NF, result);
+        tcg_gen_mov_i64(cpu_ZF, result);
     } else {
-        tcg_gen_trunc_i64_i32(cpu_ZF, result);
-        tcg_gen_trunc_i64_i32(cpu_NF, result);
+        tcg_gen_ext32s_i64(cpu_NF, result);
+        tcg_gen_mov_i64(cpu_ZF, cpu_NF);
     }
-    tcg_gen_movi_i32(cpu_CF, 0);
-    tcg_gen_movi_i32(cpu_VF, 0);
+    tcg_gen_movi_i64(cpu_CF, 0);
+    tcg_gen_movi_i64(cpu_VF, 0);
 }
 
 /* dest = T0 + T1; compute C, N, V and Z flags */
 static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 {
     if (sf) {
-        TCGv_i64 result, flag, tmp;
-        result = tcg_temp_new_i64();
-        flag = tcg_temp_new_i64();
-        tmp = tcg_temp_new_i64();
+        TCGv_i64 tmp = tcg_temp_new_i64();
 
         tcg_gen_movi_i64(tmp, 0);
-        tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
-
-        tcg_gen_trunc_i64_i32(cpu_CF, flag);
-
-        gen_set_NZ64(result);
-
-        tcg_gen_xor_i64(flag, result, t0);
+        tcg_gen_add2_i64(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
+        tcg_gen_mov_i64(cpu_ZF, cpu_NF);
+        tcg_gen_xor_i64(cpu_VF, cpu_NF, t0);
         tcg_gen_xor_i64(tmp, t0, t1);
-        tcg_gen_andc_i64(flag, flag, tmp);
+        tcg_gen_andc_i64(cpu_VF, cpu_VF, tmp);
         tcg_temp_free_i64(tmp);
-        tcg_gen_shri_i64(flag, flag, 32);
-        tcg_gen_trunc_i64_i32(cpu_VF, flag);
-
-        tcg_gen_mov_i64(dest, result);
-        tcg_temp_free_i64(result);
-        tcg_temp_free_i64(flag);
+        tcg_gen_mov_i64(dest, cpu_NF);
     } else {
         /* 32 bit arithmetic */
-        TCGv_i32 t0_32 = tcg_temp_new_i32();
-        TCGv_i32 t1_32 = tcg_temp_new_i32();
-        TCGv_i32 tmp = tcg_temp_new_i32();
-
-        tcg_gen_movi_i32(tmp, 0);
-        tcg_gen_trunc_i64_i32(t0_32, t0);
-        tcg_gen_trunc_i64_i32(t1_32, t1);
-        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
-        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
-        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
-        tcg_gen_xor_i32(tmp, t0_32, t1_32);
-        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
-        tcg_gen_extu_i32_i64(dest, cpu_NF);
-
-        tcg_temp_free_i32(tmp);
-        tcg_temp_free_i32(t0_32);
-        tcg_temp_free_i32(t1_32);
+        TCGv_i64 t0_32 = tcg_temp_new_i64();
+        TCGv_i64 t1_32 = tcg_temp_new_i64();
+        TCGv_i64 tmp;
+
+        tcg_gen_ext32u_i64(t0_32, t0);
+        tcg_gen_ext32u_i64(t1_32, t1);
+        tcg_gen_add_i64(cpu_NF, t0_32, t1_32);
+        tcg_gen_shri_i64(cpu_CF, cpu_NF, 32);
+        tcg_gen_xor_i64(cpu_VF, cpu_NF, t0_32);
+        tmp = tcg_temp_new_i64();
+        tcg_gen_xor_i64(tmp, t0_32, t1_32);
+        tcg_gen_andc_i64(cpu_VF, cpu_VF, tmp);
+        tcg_temp_free_i64(tmp);
+        tcg_temp_free_i64(t0_32);
+        tcg_temp_free_i64(t1_32);
+        tcg_gen_ext32u_i64(cpu_ZF, cpu_NF);
+        tcg_gen_ext32s_i64(cpu_NF, cpu_NF);
+        tcg_gen_ext32s_i64(cpu_VF, cpu_VF);
+        tcg_gen_mov_i64(dest, cpu_ZF);
     }
 }
 
@@ -595,58 +572,46 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 
t0, TCGv_i64 t1)
 {
     if (sf) {
         /* 64 bit arithmetic */
-        TCGv_i64 result, flag, tmp;
-
-        result = tcg_temp_new_i64();
-        flag = tcg_temp_new_i64();
-        tcg_gen_sub_i64(result, t0, t1);
-
-        gen_set_NZ64(result);
-
-        tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
-        tcg_gen_trunc_i64_i32(cpu_CF, flag);
+        TCGv_i64 tmp;
 
-        tcg_gen_xor_i64(flag, result, t0);
+        tcg_gen_sub_i64(cpu_NF, t0, t1);
+        tcg_gen_mov_i64(cpu_ZF, cpu_NF);
+        tcg_gen_setcond_i64(TCG_COND_GEU, cpu_CF, t0, t1);
+        tcg_gen_xor_i64(cpu_VF, cpu_NF, t0);
         tmp = tcg_temp_new_i64();
         tcg_gen_xor_i64(tmp, t0, t1);
-        tcg_gen_and_i64(flag, flag, tmp);
+        tcg_gen_and_i64(cpu_VF, cpu_VF, tmp);
         tcg_temp_free_i64(tmp);
-        tcg_gen_shri_i64(flag, flag, 32);
-        tcg_gen_trunc_i64_i32(cpu_VF, flag);
-        tcg_gen_mov_i64(dest, result);
-        tcg_temp_free_i64(flag);
-        tcg_temp_free_i64(result);
+        tcg_gen_mov_i64(dest, cpu_NF);
     } else {
         /* 32 bit arithmetic */
-        TCGv_i32 t0_32 = tcg_temp_new_i32();
-        TCGv_i32 t1_32 = tcg_temp_new_i32();
-        TCGv_i32 tmp;
-
-        tcg_gen_trunc_i64_i32(t0_32, t0);
-        tcg_gen_trunc_i64_i32(t1_32, t1);
-        tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
-        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
-        tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
-        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
-        tmp = tcg_temp_new_i32();
-        tcg_gen_xor_i32(tmp, t0_32, t1_32);
-        tcg_temp_free_i32(t0_32);
-        tcg_temp_free_i32(t1_32);
-        tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
-        tcg_temp_free_i32(tmp);
-        tcg_gen_extu_i32_i64(dest, cpu_NF);
+        TCGv_i64 t0_32 = tcg_temp_new_i64();
+        TCGv_i64 t1_32 = tcg_temp_new_i64();
+        TCGv_i64 tmp;
+
+        tcg_gen_ext32u_i64(t0_32, t0);
+        tcg_gen_ext32u_i64(t1_32, t1);
+        tcg_gen_sub_i64(cpu_NF, t0_32, t1_32);
+        tcg_gen_setcond_i64(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
+        tcg_gen_xor_i64(cpu_VF, cpu_NF, t0_32);
+        tmp = tcg_temp_new_i64();
+        tcg_gen_xor_i64(tmp, t0_32, t1_32);
+        tcg_gen_and_i64(cpu_VF, cpu_VF, tmp);
+        tcg_temp_free_i64(tmp);
+        tcg_temp_free_i64(t0_32);
+        tcg_temp_free_i64(t1_32);
+        tcg_gen_ext32u_i64(cpu_ZF, cpu_NF);
+        tcg_gen_ext32s_i64(cpu_NF, cpu_NF);
+        tcg_gen_ext32s_i64(cpu_VF, cpu_VF);
+        tcg_gen_mov_i64(dest, cpu_ZF);
     }
 }
 
 /* dest = T0 + T1 + CF; do not compute flags. */
 static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 {
-    TCGv_i64 flag = tcg_temp_new_i64();
-    tcg_gen_extu_i32_i64(flag, cpu_CF);
     tcg_gen_add_i64(dest, t0, t1);
-    tcg_gen_add_i64(dest, dest, flag);
-    tcg_temp_free_i64(flag);
-
+    tcg_gen_add_i64(dest, dest, cpu_CF);
     if (!sf) {
         tcg_gen_ext32u_i64(dest, dest);
     }
@@ -656,50 +621,37 @@ static void gen_adc(int sf, TCGv_i64 dest, TCGv_i64 t0, 
TCGv_i64 t1)
 static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
 {
     if (sf) {
-        TCGv_i64 result, cf_64, vf_64, tmp;
-        result = tcg_temp_new_i64();
-        cf_64 = tcg_temp_new_i64();
-        vf_64 = tcg_temp_new_i64();
-        tmp = tcg_const_i64(0);
-
-        tcg_gen_extu_i32_i64(cf_64, cpu_CF);
-        tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp);
-        tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp);
-        tcg_gen_trunc_i64_i32(cpu_CF, cf_64);
-        gen_set_NZ64(result);
-
-        tcg_gen_xor_i64(vf_64, result, t0);
-        tcg_gen_xor_i64(tmp, t0, t1);
-        tcg_gen_andc_i64(vf_64, vf_64, tmp);
-        tcg_gen_shri_i64(vf_64, vf_64, 32);
-        tcg_gen_trunc_i64_i32(cpu_VF, vf_64);
-
-        tcg_gen_mov_i64(dest, result);
+        TCGv_i64 tmp = tcg_const_i64(0);
 
+        tcg_gen_add2_i64(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
+        tcg_gen_add2_i64(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
+        tcg_gen_mov_i64(cpu_ZF, cpu_NF);
+        tcg_gen_xor_i64(cpu_VF, cpu_NF, t0);
+        tcg_gen_xor_i64(tmp, t0, t1);
+        tcg_gen_andc_i64(cpu_VF, cpu_VF, tmp);
         tcg_temp_free_i64(tmp);
-        tcg_temp_free_i64(vf_64);
-        tcg_temp_free_i64(cf_64);
-        tcg_temp_free_i64(result);
+        tcg_gen_mov_i64(dest, cpu_NF);
     } else {
-        TCGv_i32 t0_32, t1_32, tmp;
-        t0_32 = tcg_temp_new_i32();
-        t1_32 = tcg_temp_new_i32();
-        tmp = tcg_const_i32(0);
-
-        tcg_gen_trunc_i64_i32(t0_32, t0);
-        tcg_gen_trunc_i64_i32(t1_32, t1);
-        tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp);
-        tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp);
-
-        tcg_gen_mov_i32(cpu_ZF, cpu_NF);
-        tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
-        tcg_gen_xor_i32(tmp, t0_32, t1_32);
-        tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
-        tcg_gen_extu_i32_i64(dest, cpu_NF);
-
-        tcg_temp_free_i32(tmp);
-        tcg_temp_free_i32(t1_32);
-        tcg_temp_free_i32(t0_32);
+        TCGv_i64 t0_32 = tcg_temp_new_i64();
+        TCGv_i64 t1_32 = tcg_temp_new_i64();
+        TCGv_i64 tmp;
+
+        tcg_gen_ext32u_i64(t0_32, t0);
+        tcg_gen_ext32u_i64(t1_32, t1);
+        tcg_gen_add_i64(cpu_NF, t0_32, cpu_CF);
+        tcg_gen_add_i64(cpu_NF, cpu_NF, t1_32);
+        tcg_gen_shri_i64(cpu_CF, cpu_NF, 32);
+        tcg_gen_xor_i64(cpu_VF, cpu_NF, t0_32);
+        tmp = tcg_temp_new_i64();
+        tcg_gen_xor_i64(tmp, t0_32, t1_32);
+        tcg_gen_andc_i64(cpu_VF, cpu_VF, tmp);
+        tcg_temp_free_i64(tmp);
+        tcg_temp_free_i64(t0_32);
+        tcg_temp_free_i64(t1_32);
+        tcg_gen_ext32u_i64(cpu_ZF, cpu_NF);
+        tcg_gen_ext32s_i64(cpu_NF, cpu_NF);
+        tcg_gen_ext32s_i64(cpu_VF, cpu_VF);
+        tcg_gen_mov_i64(dest, cpu_ZF);
     }
 }
 
@@ -1038,7 +990,7 @@ static inline void gen_check_sp_alignment(DisasContext *s)
 
 typedef struct DisasCompare {
     TCGCond cond;
-    TCGv_i32 value;
+    TCGv_i64 value;
     bool value_global;
 } DisasCompare;
 
@@ -1048,7 +1000,7 @@ typedef struct DisasCompare {
  */
 static void arm_test_cc(DisasCompare *cmp, int cc)
 {
-    TCGv_i32 value;
+    TCGv_i64 value;
     TCGCond cond;
     bool global = true;
 
@@ -1080,28 +1032,28 @@ static void arm_test_cc(DisasCompare *cmp, int cc)
     case 8: /* hi: C && !Z */
     case 9: /* ls: !C || Z */
         cond = TCG_COND_NE;
-        value = tcg_temp_new_i32();
+        value = tcg_temp_new_i64();
         global = false;
-        tcg_gen_neg_i32(value, cpu_CF);
-        tcg_gen_and_i32(value, value, cpu_ZF);
+        tcg_gen_neg_i64(value, cpu_CF);
+        tcg_gen_and_i64(value, value, cpu_ZF);
         break;
 
     case 10: /* ge: N == V -> N ^ V == 0 */
     case 11: /* lt: N != V -> N ^ V != 0 */
         cond = TCG_COND_GE;
-        value = tcg_temp_new_i32();
+        value = tcg_temp_new_i64();
         global = false;
-        tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
+        tcg_gen_xor_i64(value, cpu_VF, cpu_NF);
         break;
 
     case 12: /* gt: !Z && N == V */
     case 13: /* le: Z || N != V */
         cond = TCG_COND_NE;
-        value = tcg_temp_new_i32();
+        value = tcg_temp_new_i64();
         global = false;
-        tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
-        tcg_gen_sari_i32(value, value, 31);
-        tcg_gen_andc_i32(value, cpu_ZF, value);
+        tcg_gen_xor_i64(value, cpu_VF, cpu_NF);
+        tcg_gen_sari_i64(value, value, 63);
+        tcg_gen_andc_i64(value, cpu_ZF, value);
         break;
 
     default:
@@ -1121,13 +1073,13 @@ static void arm_test_cc(DisasCompare *cmp, int cc)
 static void arm_free_cc(DisasCompare *cmp)
 {
     if (!cmp->value_global) {
-        tcg_temp_free_i32(cmp->value);
+        tcg_temp_free_i64(cmp->value);
     }
 }
 
 static void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
 {
-    tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
+    tcg_gen_brcondi_i64(cmp->cond, cmp->value, 0, label);
 }
 
 static void arm_gen_test_cc(int cc, TCGLabel *label)
@@ -1369,46 +1321,35 @@ static void handle_msr_i(DisasContext *s, uint32_t insn,
 
 static void gen_get_nzcv(TCGv_i64 tcg_rt)
 {
-    TCGv_i32 tmp = tcg_temp_new_i32();
-    TCGv_i32 nzcv = tcg_temp_new_i32();
+    TCGv_i64 tmp = tcg_temp_new_i64();
 
     /* build bit 31, N */
-    tcg_gen_andi_i32(nzcv, cpu_NF, (1U << 31));
+    tcg_gen_shri_i64(tmp, cpu_NF, 63);
+    tcg_gen_shli_i64(tcg_rt, tmp, 31);
     /* build bit 30, Z */
-    tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_ZF, 0);
-    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 30, 1);
+    tcg_gen_setcondi_i64(TCG_COND_EQ, tmp, cpu_ZF, 0);
+    tcg_gen_deposit_i64(tcg_rt, tcg_rt, tmp, 30, 1);
     /* build bit 29, C */
-    tcg_gen_deposit_i32(nzcv, nzcv, cpu_CF, 29, 1);
+    tcg_gen_deposit_i64(tcg_rt, tcg_rt, cpu_CF, 29, 1);
     /* build bit 28, V */
-    tcg_gen_shri_i32(tmp, cpu_VF, 31);
-    tcg_gen_deposit_i32(nzcv, nzcv, tmp, 28, 1);
-    /* generate result */
-    tcg_gen_extu_i32_i64(tcg_rt, nzcv);
+    tcg_gen_shri_i64(tmp, cpu_VF, 63);
+    tcg_gen_deposit_i64(tcg_rt, tcg_rt, tmp, 28, 1);
 
-    tcg_temp_free_i32(nzcv);
-    tcg_temp_free_i32(tmp);
+    tcg_temp_free_i64(tmp);
 }
 
 static void gen_set_nzcv(TCGv_i64 tcg_rt)
-
 {
-    TCGv_i32 nzcv = tcg_temp_new_i32();
-
-    /* take NZCV from R[t] */
-    tcg_gen_trunc_i64_i32(nzcv, tcg_rt);
-
     /* bit 31, N */
-    tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31));
+    tcg_gen_shli_i64(cpu_NF, tcg_rt, 32);
     /* bit 30, Z */
-    tcg_gen_andi_i32(cpu_ZF, nzcv, (1 << 30));
-    tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_ZF, cpu_ZF, 0);
+    tcg_gen_not_i64(cpu_ZF, tcg_rt);
+    tcg_gen_andi_i64(cpu_ZF, cpu_ZF, 1 << 30);
     /* bit 29, C */
-    tcg_gen_andi_i32(cpu_CF, nzcv, (1 << 29));
-    tcg_gen_shri_i32(cpu_CF, cpu_CF, 29);
+    tcg_gen_shri_i64(cpu_CF, tcg_rt, 29);
+    tcg_gen_andi_i64(cpu_CF, cpu_CF, 1);
     /* bit 28, V */
-    tcg_gen_andi_i32(cpu_VF, nzcv, (1 << 28));
-    tcg_gen_shli_i32(cpu_VF, cpu_VF, 3);
-    tcg_temp_free_i32(nzcv);
+    tcg_gen_shli_i64(cpu_VF, tcg_rt, 32 + (31 - 28));
 }
 
 /* C5.6.129 MRS - move from system register
diff --git a/target-arm/translate.c b/target-arm/translate.c
index dd4d80f..0d0a4d1 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -85,7 +85,7 @@ static const char *regnames[] =
 /* initialize TCG globals.  */
 void arm_translate_init(void)
 {
-    int i;
+    int i, be, le;
 
     cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
 
@@ -94,10 +94,26 @@ void arm_translate_init(void)
                                           offsetof(CPUARMState, regs[i]),
                                           regnames[i]);
     }
-    cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), 
"CF");
-    cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), 
"NF");
-    cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), 
"VF");
-    cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), 
"ZF");
+
+#ifdef HOST_WORDS_BIGENDIAN
+    be = 0;
+    le = 4;
+#else
+    le = 0;
+    be = 4;
+#endif
+
+    /* Place CF and ZF at the low end of the 64-bit variable, and NF
+       and VF at the high end.  The other halves are ignore(able) in
+       32-bit mode and synced during mode transition.  */
+    cpu_CF = tcg_global_mem_new_i32(TCG_AREG0,
+                                    offsetof(CPUARMState, CF) + le, "CF");
+    cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0,
+                                    offsetof(CPUARMState, ZF) + le, "ZF");
+    cpu_NF = tcg_global_mem_new_i32(TCG_AREG0,
+                                    offsetof(CPUARMState, NF) + be, "NF");
+    cpu_VF = tcg_global_mem_new_i32(TCG_AREG0,
+                                    offsetof(CPUARMState, VF) + be, "VF");
 
     cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
         offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
-- 
2.1.0


Reply via email to