This adds support for the VSEL floating point selection instruction
which was added in ARMv8. It is based on the previous patch[1] from
Mans Rullgard, but attempts to address the feedback given on that patch.

[1] http://lists.nongnu.org/archive/html/qemu-devel/2013-06/msg03117.html

Signed-off-by: Will Newton <will.new...@linaro.org>
---
 target-arm/translate.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 105 insertions(+)

Changes in v2:
 - Integrate vsel decoding into disas_vfp_insn

diff --git a/target-arm/translate.c b/target-arm/translate.c
index 998bde2..5e49334 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -2880,6 +2880,98 @@ static int disas_vfp_insn(CPUARMState * env, 
DisasContext *s, uint32_t insn)
                 rm = VFP_SREG_M(insn);
             }

+            if ((insn & 0x0f800e50) == 0x0e000a00) {
+                /* vsel */
+                uint32_t cc = (insn >> 20) & 3;
+                TCGv_i32 tmp, zero;
+
+                /* ARMv8 VFP.  */
+                if (!arm_feature(env, ARM_FEATURE_V8))
+                    return 1;
+
+                zero = tcg_const_tl(0);
+
+                if (dp) {
+                    TCGv_i64 ftmp1, ftmp2, ftmp3;
+
+                    ftmp1 = tcg_temp_new_i64();
+                    ftmp2 = tcg_temp_new_i64();
+                    ftmp3 = tcg_temp_new_i64();
+                    tcg_gen_ld_f64(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+                    tcg_gen_ld_f64(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+                    switch (cc) {
+                    case 0: /* eq: Z */
+                        tcg_gen_movcond_i64(TCG_COND_EQ, ftmp3, cpu_ZF, zero,
+                                            ftmp1, ftmp2);
+                        break;
+                    case 1: /* vs: V */
+                        tcg_gen_movcond_i64(TCG_COND_LT, ftmp3, cpu_VF, zero,
+                                            ftmp1, ftmp2);
+                        break;
+                    case 2: /* ge: N == V -> N ^ V == 0 */
+                        tmp = tcg_temp_new_i32();
+                        tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+                        tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
+                                            ftmp1, ftmp2);
+                        tcg_temp_free_i32(tmp);
+                        break;
+                    case 3: /* gt: !Z && N == V */
+                        tcg_gen_movcond_i64(TCG_COND_NE, ftmp3, cpu_ZF, zero,
+                                            ftmp1, ftmp2);
+                        tmp = tcg_temp_new_i32();
+                        tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+                        tcg_gen_movcond_i64(TCG_COND_GE, ftmp3, tmp, zero,
+                                            ftmp3, ftmp2);
+                        tcg_temp_free_i32(tmp);
+                        break;
+                    }
+                    tcg_gen_st_f64(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+                    tcg_temp_free_i64(ftmp1);
+                    tcg_temp_free_i64(ftmp2);
+                    tcg_temp_free_i64(ftmp3);
+                } else {
+                    TCGv_i32 ftmp1, ftmp2, ftmp3;
+
+                    ftmp1 = tcg_temp_new_i32();
+                    ftmp2 = tcg_temp_new_i32();
+                    ftmp3 = tcg_temp_new_i32();
+                    tcg_gen_ld_f32(ftmp1, cpu_env, vfp_reg_offset(dp, rn));
+                    tcg_gen_ld_f32(ftmp2, cpu_env, vfp_reg_offset(dp, rm));
+                    switch (cc) {
+                    case 0: /* eq: Z */
+                        tcg_gen_movcond_i32(TCG_COND_EQ, ftmp3, cpu_ZF, zero,
+                                            ftmp1, ftmp2);
+                        break;
+                    case 1: /* vs: V */
+                        tcg_gen_movcond_i32(TCG_COND_LT, ftmp3, cpu_VF, zero,
+                                            ftmp1, ftmp2);
+                        break;
+                    case 2: /* ge: N == V -> N ^ V == 0 */
+                        tmp = tcg_temp_new_i32();
+                        tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+                        tcg_gen_movcond_i32(TCG_COND_GE, ftmp3, tmp, zero,
+                                            ftmp1, ftmp2);
+                        tcg_temp_free_i32(tmp);
+                        break;
+                    case 3: /* gt: !Z && N == V */
+                        tcg_gen_movcond_i32(TCG_COND_NE, ftmp3, cpu_ZF, zero,
+                                            ftmp1, ftmp2);
+                        tmp = tcg_temp_new_i32();
+                        tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
+                        tcg_gen_movcond_i32(TCG_COND_GE, ftmp3, tmp, zero,
+                                            ftmp3, ftmp2);
+                        tcg_temp_free_i32(tmp);
+                        break;
+                    }
+                    tcg_gen_st_f32(ftmp3, cpu_env, vfp_reg_offset(dp, rd));
+                    tcg_temp_free_i32(ftmp1);
+                    tcg_temp_free_i32(ftmp2);
+                    tcg_temp_free_i32(ftmp3);
+                }
+
+                return 0;
+            }
+
             veclen = s->vec_len;
             if (op == 15 && rn > 3)
                 veclen = 0;
@@ -6756,6 +6848,13 @@ static void disas_arm_insn(CPUARMState * env, 
DisasContext *s)
                 goto illegal_op;
             return;
         }
+       if ((insn & 0x0f800e50) == 0x0e000a00) {
+           /* ARMv8 VFP.  */
+           ARCH(8);
+
+           if (disas_vfp_insn(env, s, insn))
+               goto illegal_op;
+       }
         if (((insn & 0x0f30f000) == 0x0510f000) ||
             ((insn & 0x0f30f010) == 0x0710f000)) {
             if ((insn & (1 << 22)) == 0) {
@@ -8768,6 +8867,12 @@ static int disas_thumb2_insn(CPUARMState *env, 
DisasContext *s, uint16_t insn_hw
             insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
             if (disas_neon_data_insn(env, s, insn))
                 goto illegal_op;
+       } else if ((insn & 0x0f800e50) == 0x0e000a00) {
+           /* ARMv8 VFP.  */
+           ARCH(8);
+
+           if (disas_vfp_insn(env, s, insn))
+               goto illegal_op;
         } else {
             if (insn & (1 << 28))
                 goto illegal_op;
-- 
1.8.1.4


Reply via email to