Revision: 20487
Author:   alexandre.ra...@arm.com
Date:     Thu Apr  3 14:30:44 2014 UTC
Log:      ARM64: Fixes and more support for FRINT<X> instructions.

Fix simulation and tests for the [-0.5, -0.0[ range for FRINTA and FRINTN, and
add support for FRINTM.

R=u...@chromium.org

Review URL: https://codereview.chromium.org/223843002
http://code.google.com/p/v8/source/detail?r=20487

Modified:
 /branches/bleeding_edge/src/arm64/assembler-arm64.cc
 /branches/bleeding_edge/src/arm64/assembler-arm64.h
 /branches/bleeding_edge/src/arm64/macro-assembler-arm64-inl.h
 /branches/bleeding_edge/src/arm64/macro-assembler-arm64.h
 /branches/bleeding_edge/src/arm64/simulator-arm64.cc
 /branches/bleeding_edge/test/cctest/test-assembler-arm64.cc

=======================================
--- /branches/bleeding_edge/src/arm64/assembler-arm64.cc Wed Apr 2 16:31:58 2014 UTC +++ /branches/bleeding_edge/src/arm64/assembler-arm64.cc Thu Apr 3 14:30:44 2014 UTC
@@ -1658,6 +1658,13 @@
   ASSERT(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FRINTA);
 }
+
+
+void Assembler::frintm(const FPRegister& fd,
+                       const FPRegister& fn) {
+  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  FPDataProcessing1Source(fd, fn, FRINTM);
+}


 void Assembler::frintn(const FPRegister& fd,
=======================================
--- /branches/bleeding_edge/src/arm64/assembler-arm64.h Mon Mar 31 14:06:42 2014 UTC +++ /branches/bleeding_edge/src/arm64/assembler-arm64.h Thu Apr 3 14:30:44 2014 UTC
@@ -1584,6 +1584,9 @@
   // FP round to integer (nearest with ties to away).
   void frinta(const FPRegister& fd, const FPRegister& fn);

+  // FP round to integer (toward minus infinity).
+  void frintm(const FPRegister& fd, const FPRegister& fn);
+
   // FP round to integer (nearest with ties to even).
   void frintn(const FPRegister& fd, const FPRegister& fn);

=======================================
--- /branches/bleeding_edge/src/arm64/macro-assembler-arm64-inl.h Tue Apr 1 10:39:09 2014 UTC +++ /branches/bleeding_edge/src/arm64/macro-assembler-arm64-inl.h Thu Apr 3 14:30:44 2014 UTC
@@ -831,6 +831,12 @@
   ASSERT(allow_macro_instructions_);
   frinta(fd, fn);
 }
+
+
+void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
+  ASSERT(allow_macro_instructions_);
+  frintm(fd, fn);
+}


 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
=======================================
--- /branches/bleeding_edge/src/arm64/macro-assembler-arm64.h Mon Mar 31 14:06:42 2014 UTC +++ /branches/bleeding_edge/src/arm64/macro-assembler-arm64.h Thu Apr 3 14:30:44 2014 UTC
@@ -387,6 +387,7 @@
                      const FPRegister& fm,
                      const FPRegister& fa);
   inline void Frinta(const FPRegister& fd, const FPRegister& fn);
+  inline void Frintm(const FPRegister& fd, const FPRegister& fn);
   inline void Frintn(const FPRegister& fd, const FPRegister& fn);
   inline void Frintz(const FPRegister& fd, const FPRegister& fn);
   inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
=======================================
--- /branches/bleeding_edge/src/arm64/simulator-arm64.cc Tue Apr 1 12:51:15 2014 UTC +++ /branches/bleeding_edge/src/arm64/simulator-arm64.cc Thu Apr 3 14:30:44 2014 UTC
@@ -2390,6 +2390,10 @@
     case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break;
     case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break;
     case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break;
+    case FRINTM_s:
+        set_sreg(fd, FPRoundInt(sreg(fn), FPNegativeInfinity)); break;
+    case FRINTM_d:
+        set_dreg(fd, FPRoundInt(dreg(fn), FPNegativeInfinity)); break;
     case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break;
     case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break;
     case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break;
@@ -2656,17 +2660,27 @@
   double error = value - int_result;
   switch (round_mode) {
     case FPTieAway: {
- // If the error is greater than 0.5, or is equal to 0.5 and the integer
-      // result is positive, round up.
-      if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+      // Take care of correctly handling the range ]-0.5, -0.0], which must
+      // yield -0.0.
+      if ((-0.5 < value) && (value < 0.0)) {
+        int_result = -0.0;
+
+ } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { + // If the error is greater than 0.5, or is equal to 0.5 and the integer
+        // result is positive, round up.
         int_result++;
       }
       break;
     }
     case FPTieEven: {
+      // Take care of correctly handling the range [-0.5, -0.0], which must
+      // yield -0.0.
+      if ((-0.5 <= value) && (value < 0.0)) {
+        int_result = -0.0;
+
// If the error is greater than 0.5, or is equal to 0.5 and the integer
       // result is odd, round up.
-      if ((error > 0.5) ||
+      } else if ((error > 0.5) ||
           ((error == 0.5) && (fmod(int_result, 2) != 0))) {
         int_result++;
       }
=======================================
--- /branches/bleeding_edge/test/cctest/test-assembler-arm64.cc Mon Mar 24 17:43:56 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-assembler-arm64.cc Thu Apr 3 14:30:44 2014 UTC
@@ -6032,6 +6032,7 @@
   __ Fmov(s24, kFP32NegativeInfinity);
   __ Fmov(s25, 0.0);
   __ Fmov(s26, -0.0);
+  __ Fmov(s27, -0.2);

   __ Frinta(s0, s16);
   __ Frinta(s1, s17);
@@ -6044,6 +6045,7 @@
   __ Frinta(s8, s24);
   __ Frinta(s9, s25);
   __ Frinta(s10, s26);
+  __ Frinta(s11, s27);

   __ Fmov(d16, 1.0);
   __ Fmov(d17, 1.1);
@@ -6056,18 +6058,20 @@
   __ Fmov(d24, kFP32NegativeInfinity);
   __ Fmov(d25, 0.0);
   __ Fmov(d26, -0.0);
+  __ Fmov(d27, -0.2);

-  __ Frinta(d11, d16);
-  __ Frinta(d12, d17);
-  __ Frinta(d13, d18);
-  __ Frinta(d14, d19);
-  __ Frinta(d15, d20);
-  __ Frinta(d16, d21);
-  __ Frinta(d17, d22);
-  __ Frinta(d18, d23);
-  __ Frinta(d19, d24);
-  __ Frinta(d20, d25);
-  __ Frinta(d21, d26);
+  __ Frinta(d12, d16);
+  __ Frinta(d13, d17);
+  __ Frinta(d14, d18);
+  __ Frinta(d15, d19);
+  __ Frinta(d16, d20);
+  __ Frinta(d17, d21);
+  __ Frinta(d18, d22);
+  __ Frinta(d19, d23);
+  __ Frinta(d20, d24);
+  __ Frinta(d21, d25);
+  __ Frinta(d22, d26);
+  __ Frinta(d23, d27);
   END();

   RUN();
@@ -6083,17 +6087,108 @@
   ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
   ASSERT_EQUAL_FP32(0.0, s9);
   ASSERT_EQUAL_FP32(-0.0, s10);
-  ASSERT_EQUAL_FP64(1.0, d11);
+  ASSERT_EQUAL_FP32(-0.0, s11);
   ASSERT_EQUAL_FP64(1.0, d12);
-  ASSERT_EQUAL_FP64(2.0, d13);
+  ASSERT_EQUAL_FP64(1.0, d13);
   ASSERT_EQUAL_FP64(2.0, d14);
-  ASSERT_EQUAL_FP64(3.0, d15);
-  ASSERT_EQUAL_FP64(-2.0, d16);
-  ASSERT_EQUAL_FP64(-3.0, d17);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
-  ASSERT_EQUAL_FP64(0.0, d20);
-  ASSERT_EQUAL_FP64(-0.0, d21);
+  ASSERT_EQUAL_FP64(2.0, d15);
+  ASSERT_EQUAL_FP64(3.0, d16);
+  ASSERT_EQUAL_FP64(-2.0, d17);
+  ASSERT_EQUAL_FP64(-3.0, d18);
+  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
+  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
+  ASSERT_EQUAL_FP64(0.0, d21);
+  ASSERT_EQUAL_FP64(-0.0, d22);
+  ASSERT_EQUAL_FP64(-0.0, d23);
+
+  TEARDOWN();
+}
+
+
+TEST(frintm) {
+  INIT_V8();
+  SETUP();
+
+  START();
+  __ Fmov(s16, 1.0);
+  __ Fmov(s17, 1.1);
+  __ Fmov(s18, 1.5);
+  __ Fmov(s19, 1.9);
+  __ Fmov(s20, 2.5);
+  __ Fmov(s21, -1.5);
+  __ Fmov(s22, -2.5);
+  __ Fmov(s23, kFP32PositiveInfinity);
+  __ Fmov(s24, kFP32NegativeInfinity);
+  __ Fmov(s25, 0.0);
+  __ Fmov(s26, -0.0);
+  __ Fmov(s27, -0.2);
+
+  __ Frintm(s0, s16);
+  __ Frintm(s1, s17);
+  __ Frintm(s2, s18);
+  __ Frintm(s3, s19);
+  __ Frintm(s4, s20);
+  __ Frintm(s5, s21);
+  __ Frintm(s6, s22);
+  __ Frintm(s7, s23);
+  __ Frintm(s8, s24);
+  __ Frintm(s9, s25);
+  __ Frintm(s10, s26);
+  __ Frintm(s11, s27);
+
+  __ Fmov(d16, 1.0);
+  __ Fmov(d17, 1.1);
+  __ Fmov(d18, 1.5);
+  __ Fmov(d19, 1.9);
+  __ Fmov(d20, 2.5);
+  __ Fmov(d21, -1.5);
+  __ Fmov(d22, -2.5);
+  __ Fmov(d23, kFP32PositiveInfinity);
+  __ Fmov(d24, kFP32NegativeInfinity);
+  __ Fmov(d25, 0.0);
+  __ Fmov(d26, -0.0);
+  __ Fmov(d27, -0.2);
+
+  __ Frintm(d12, d16);
+  __ Frintm(d13, d17);
+  __ Frintm(d14, d18);
+  __ Frintm(d15, d19);
+  __ Frintm(d16, d20);
+  __ Frintm(d17, d21);
+  __ Frintm(d18, d22);
+  __ Frintm(d19, d23);
+  __ Frintm(d20, d24);
+  __ Frintm(d21, d25);
+  __ Frintm(d22, d26);
+  __ Frintm(d23, d27);
+  END();
+
+  RUN();
+
+  ASSERT_EQUAL_FP32(1.0, s0);
+  ASSERT_EQUAL_FP32(1.0, s1);
+  ASSERT_EQUAL_FP32(1.0, s2);
+  ASSERT_EQUAL_FP32(1.0, s3);
+  ASSERT_EQUAL_FP32(2.0, s4);
+  ASSERT_EQUAL_FP32(-2.0, s5);
+  ASSERT_EQUAL_FP32(-3.0, s6);
+  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
+  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
+  ASSERT_EQUAL_FP32(0.0, s9);
+  ASSERT_EQUAL_FP32(-0.0, s10);
+  ASSERT_EQUAL_FP32(-1.0, s11);
+  ASSERT_EQUAL_FP64(1.0, d12);
+  ASSERT_EQUAL_FP64(1.0, d13);
+  ASSERT_EQUAL_FP64(1.0, d14);
+  ASSERT_EQUAL_FP64(1.0, d15);
+  ASSERT_EQUAL_FP64(2.0, d16);
+  ASSERT_EQUAL_FP64(-2.0, d17);
+  ASSERT_EQUAL_FP64(-3.0, d18);
+  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
+  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
+  ASSERT_EQUAL_FP64(0.0, d21);
+  ASSERT_EQUAL_FP64(-0.0, d22);
+  ASSERT_EQUAL_FP64(-1.0, d23);

   TEARDOWN();
 }
@@ -6115,6 +6210,7 @@
   __ Fmov(s24, kFP32NegativeInfinity);
   __ Fmov(s25, 0.0);
   __ Fmov(s26, -0.0);
+  __ Fmov(s27, -0.2);

   __ Frintn(s0, s16);
   __ Frintn(s1, s17);
@@ -6127,6 +6223,7 @@
   __ Frintn(s8, s24);
   __ Frintn(s9, s25);
   __ Frintn(s10, s26);
+  __ Frintn(s11, s27);

   __ Fmov(d16, 1.0);
   __ Fmov(d17, 1.1);
@@ -6139,18 +6236,20 @@
   __ Fmov(d24, kFP32NegativeInfinity);
   __ Fmov(d25, 0.0);
   __ Fmov(d26, -0.0);
+  __ Fmov(d27, -0.2);

-  __ Frintn(d11, d16);
-  __ Frintn(d12, d17);
-  __ Frintn(d13, d18);
-  __ Frintn(d14, d19);
-  __ Frintn(d15, d20);
-  __ Frintn(d16, d21);
-  __ Frintn(d17, d22);
-  __ Frintn(d18, d23);
-  __ Frintn(d19, d24);
-  __ Frintn(d20, d25);
-  __ Frintn(d21, d26);
+  __ Frintn(d12, d16);
+  __ Frintn(d13, d17);
+  __ Frintn(d14, d18);
+  __ Frintn(d15, d19);
+  __ Frintn(d16, d20);
+  __ Frintn(d17, d21);
+  __ Frintn(d18, d22);
+  __ Frintn(d19, d23);
+  __ Frintn(d20, d24);
+  __ Frintn(d21, d25);
+  __ Frintn(d22, d26);
+  __ Frintn(d23, d27);
   END();

   RUN();
@@ -6166,17 +6265,19 @@
   ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
   ASSERT_EQUAL_FP32(0.0, s9);
   ASSERT_EQUAL_FP32(-0.0, s10);
-  ASSERT_EQUAL_FP64(1.0, d11);
+  ASSERT_EQUAL_FP32(-0.0, s11);
   ASSERT_EQUAL_FP64(1.0, d12);
-  ASSERT_EQUAL_FP64(2.0, d13);
+  ASSERT_EQUAL_FP64(1.0, d13);
   ASSERT_EQUAL_FP64(2.0, d14);
   ASSERT_EQUAL_FP64(2.0, d15);
-  ASSERT_EQUAL_FP64(-2.0, d16);
+  ASSERT_EQUAL_FP64(2.0, d16);
   ASSERT_EQUAL_FP64(-2.0, d17);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
-  ASSERT_EQUAL_FP64(0.0, d20);
-  ASSERT_EQUAL_FP64(-0.0, d21);
+  ASSERT_EQUAL_FP64(-2.0, d18);
+  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
+  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
+  ASSERT_EQUAL_FP64(0.0, d21);
+  ASSERT_EQUAL_FP64(-0.0, d22);
+  ASSERT_EQUAL_FP64(-0.0, d23);

   TEARDOWN();
 }

--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to