From: Sandipan Das <sandipandas1...@gmail.com>

xscmpeqdp: VSX Scalar Compare Equal Double-Precision
xscmpgedp: VSX Scalar Compare Greater Than or Equal Double-Precision
xscmpgtdp: VSX Scalar Compare Greater Than Double-Precision
xscmpnedp: VSX Scalar Compare Not Equal Double-Precision

Signed-off-by: Sandipan Das <sandipandas1...@gmail.com>
[ Fix LE case using msr_le ]
Signed-off-by: Nikunj A Dadhania <nik...@linux.vnet.ibm.com>
---
 target-ppc/fpu_helper.c             | 48 +++++++++++++++++++++++++++++++++++++
 target-ppc/helper.h                 |  4 ++++
 target-ppc/translate/vsx-impl.inc.c |  4 ++++
 target-ppc/translate/vsx-ops.inc.c  |  4 ++++
 4 files changed, 60 insertions(+)

diff --git a/target-ppc/fpu_helper.c b/target-ppc/fpu_helper.c
index b0760f0..87c17d9 100644
--- a/target-ppc/fpu_helper.c
+++ b/target-ppc/fpu_helper.c
@@ -2362,6 +2362,54 @@ VSX_MADD(xvnmaddmsp, 4, float32, VsrW(i), NMADD_FLGS, 0, 
0, 0)
 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0)
 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0)
 
+/* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision
+ *   op    - instruction mnemonic
+ *   cmp   - comparison operation
+ *   exp   - expected result of comparison
+ *   svxvc - set VXVC bit
+ */
+#define VSX_SCALAR_CMP_DP(op, cmp, exp, svxvc)                                \
+void helper_##op(CPUPPCState *env, uint32_t opcode)                           \
+{                                                                             \
+    ppc_vsr_t xt, xa, xb;                                                     \
+                                                                              \
+    getVSR(xA(opcode), &xa, env);                                             \
+    getVSR(xB(opcode), &xb, env);                                             \
+    getVSR(xT(opcode), &xt, env);                                             \
+                                                                              \
+    if (unlikely(float64_is_any_nan(xa.VsrD(0)) ||                            \
+                 float64_is_any_nan(xb.VsrD(0)))) {                           \
+        if (float64_is_signaling_nan(xa.VsrD(0), &env->fp_status) ||          \
+            float64_is_signaling_nan(xb.VsrD(0), &env->fp_status)) {          \
+            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 0);            \
+        }                                                                     \
+        if (svxvc) {                                                          \
+            float_invalid_op_excp(env, POWERPC_EXCP_FP_VXVC, 0);              \
+        }                                                                     \
+    } else {                                                                  \
+        if (float64_##cmp(xb.VsrD(0), xa.VsrD(0), &env->fp_status) == exp) {  \
+            if (msr_le) {                                                     \
+                xt.VsrD(0) = 0;                                               \
+                xt.VsrD(1) = -1;                                              \
+            } else {                                                          \
+                xt.VsrD(0) = -1;                                              \
+                xt.VsrD(1) = 0;                                               \
+            }                                                                 \
+        } else {                                                              \
+            xt.VsrD(0) = 0;                                                   \
+            xt.VsrD(1) = 0;                                                   \
+        }                                                                     \
+    }                                                                         \
+                                                                              \
+    putVSR(xT(opcode), &xt, env);                                             \
+    helper_float_check_status(env);                                           \
+}
+
+VSX_SCALAR_CMP_DP(xscmpeqdp, eq, 1, 0)
+VSX_SCALAR_CMP_DP(xscmpgedp, le, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpgtdp, lt, 1, 1)
+VSX_SCALAR_CMP_DP(xscmpnedp, eq, 0, 0)
+
 #define VSX_SCALAR_CMP(op, ordered)                                      \
 void helper_##op(CPUPPCState *env, uint32_t opcode)                      \
 {                                                                        \
diff --git a/target-ppc/helper.h b/target-ppc/helper.h
index 5fcc546..0337292 100644
--- a/target-ppc/helper.h
+++ b/target-ppc/helper.h
@@ -389,6 +389,10 @@ DEF_HELPER_2(xsnmaddadp, void, env, i32)
 DEF_HELPER_2(xsnmaddmdp, void, env, i32)
 DEF_HELPER_2(xsnmsubadp, void, env, i32)
 DEF_HELPER_2(xsnmsubmdp, void, env, i32)
+DEF_HELPER_2(xscmpeqdp, void, env, i32)
+DEF_HELPER_2(xscmpgtdp, void, env, i32)
+DEF_HELPER_2(xscmpgedp, void, env, i32)
+DEF_HELPER_2(xscmpnedp, void, env, i32)
 DEF_HELPER_2(xscmpodp, void, env, i32)
 DEF_HELPER_2(xscmpudp, void, env, i32)
 DEF_HELPER_2(xsmaxdp, void, env, i32)
diff --git a/target-ppc/translate/vsx-impl.inc.c 
b/target-ppc/translate/vsx-impl.inc.c
index d510842..3ee20b4 100644
--- a/target-ppc/translate/vsx-impl.inc.c
+++ b/target-ppc/translate/vsx-impl.inc.c
@@ -620,6 +620,10 @@ GEN_VSX_HELPER_2(xsnmaddadp, 0x04, 0x14, 0, PPC2_VSX)
 GEN_VSX_HELPER_2(xsnmaddmdp, 0x04, 0x15, 0, PPC2_VSX)
 GEN_VSX_HELPER_2(xsnmsubadp, 0x04, 0x16, 0, PPC2_VSX)
 GEN_VSX_HELPER_2(xsnmsubmdp, 0x04, 0x17, 0, PPC2_VSX)
+GEN_VSX_HELPER_2(xscmpeqdp, 0x0C, 0x00, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpgtdp, 0x0C, 0x01, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpgedp, 0x0C, 0x02, 0, PPC2_ISA300)
+GEN_VSX_HELPER_2(xscmpnedp, 0x0C, 0x03, 0, PPC2_ISA300)
 GEN_VSX_HELPER_2(xscmpodp, 0x0C, 0x05, 0, PPC2_VSX)
 GEN_VSX_HELPER_2(xscmpudp, 0x0C, 0x04, 0, PPC2_VSX)
 GEN_VSX_HELPER_2(xsmaxdp, 0x00, 0x14, 0, PPC2_VSX)
diff --git a/target-ppc/translate/vsx-ops.inc.c 
b/target-ppc/translate/vsx-ops.inc.c
index af0d27e..202c557 100644
--- a/target-ppc/translate/vsx-ops.inc.c
+++ b/target-ppc/translate/vsx-ops.inc.c
@@ -114,6 +114,10 @@ GEN_XX3FORM(xsnmaddadp, 0x04, 0x14, PPC2_VSX),
 GEN_XX3FORM(xsnmaddmdp, 0x04, 0x15, PPC2_VSX),
 GEN_XX3FORM(xsnmsubadp, 0x04, 0x16, PPC2_VSX),
 GEN_XX3FORM(xsnmsubmdp, 0x04, 0x17, PPC2_VSX),
+GEN_XX3FORM(xscmpeqdp, 0x0C, 0x00, PPC2_ISA300),
+GEN_XX3FORM(xscmpgtdp, 0x0C, 0x01, PPC2_ISA300),
+GEN_XX3FORM(xscmpgedp, 0x0C, 0x02, PPC2_ISA300),
+GEN_XX3FORM(xscmpnedp, 0x0C, 0x03, PPC2_ISA300),
 GEN_XX2IFORM(xscmpodp,  0x0C, 0x05, PPC2_VSX),
 GEN_XX2IFORM(xscmpudp,  0x0C, 0x04, PPC2_VSX),
 GEN_XX3FORM(xsmaxdp, 0x00, 0x14, PPC2_VSX),
-- 
2.7.4


Reply via email to