Revision: 20778
Author:   plin...@gmail.com
Date:     Tue Apr 15 16:39:21 2014 UTC
Log:      MIPS: Add big-endian support for MIPS.

Important notices:

- The snapshot cannot be created for big-endian target in cross-compilation
  environment on little-endian host using simulator.

- In order to have i18n support working on big-endian target, the icudt46b.dat and
  icudt46b_dat.S files should be generated and upstreamed to ICU repo.

- The mjsunit 'nans' test is endian dependent, it is skipped for mips target.

- The zlib and Mandreel from Octane 2.0 benchmark are endian dependent due to
  use of typed arrays.

TEST=
BUG=
R=jkumme...@chromium.org, plin...@gmail.com

Review URL: https://codereview.chromium.org/228943009

Patch from Dusan Milosavljevic <dusan.milosavlje...@rt-rk.com>.
http://code.google.com/p/v8/source/detail?r=20778

Modified:
 /branches/bleeding_edge/Makefile
 /branches/bleeding_edge/build/toolchain.gypi
 /branches/bleeding_edge/src/conversions-inl.h
 /branches/bleeding_edge/src/globals.h
 /branches/bleeding_edge/src/heap-snapshot-generator.cc
 /branches/bleeding_edge/src/mips/assembler-mips.cc
 /branches/bleeding_edge/src/mips/assembler-mips.h
 /branches/bleeding_edge/src/mips/builtins-mips.cc
 /branches/bleeding_edge/src/mips/code-stubs-mips.cc
 /branches/bleeding_edge/src/mips/codegen-mips.cc
 /branches/bleeding_edge/src/mips/constants-mips.h
 /branches/bleeding_edge/src/mips/ic-mips.cc
 /branches/bleeding_edge/src/mips/lithium-codegen-mips.cc
 /branches/bleeding_edge/src/mips/macro-assembler-mips.cc
 /branches/bleeding_edge/src/objects.h
 /branches/bleeding_edge/src/runtime.cc
 /branches/bleeding_edge/test/cctest/cctest.status
 /branches/bleeding_edge/test/cctest/test-assembler-mips.cc
 /branches/bleeding_edge/test/cctest/test-platform.cc
 /branches/bleeding_edge/test/mjsunit/mjsunit.status
 /branches/bleeding_edge/test/mozilla/mozilla.status
 /branches/bleeding_edge/test/test262/test262.status
 /branches/bleeding_edge/tools/gyp/v8.gyp
 /branches/bleeding_edge/tools/run-tests.py
 /branches/bleeding_edge/tools/testrunner/local/statusfile.py

=======================================
--- /branches/bleeding_edge/Makefile    Tue Apr  8 08:26:40 2014 UTC
+++ /branches/bleeding_edge/Makefile    Tue Apr 15 16:39:21 2014 UTC
@@ -232,7 +232,7 @@

 # Architectures and modes to be compiled. Consider these to be internal
 # variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm arm64 mipsel
+ARCHES = ia32 x64 arm arm64 mips mipsel
 DEFAULT_ARCHES = ia32 x64 arm
 MODES = release debug optdebug
 DEFAULT_MODES = release debug
@@ -281,10 +281,6 @@
        $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
                builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"

-mips mips.release mips.debug:
-       @echo "V8 does not support big-endian MIPS builds at the moment," \
-             "please use little-endian builds (mipsel)."
-
 # Compile targets. MODES and ARCHES are convenience targets.
 .SECONDEXPANSION:
 $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
=======================================
--- /branches/bleeding_edge/build/toolchain.gypi Wed Apr 9 09:54:49 2014 UTC +++ /branches/bleeding_edge/build/toolchain.gypi Tue Apr 15 16:39:21 2014 UTC
@@ -278,6 +278,57 @@
           'V8_TARGET_ARCH_IA32',
         ],
       }],  # v8_target_arch=="ia32"
+      ['v8_target_arch=="mips"', {
+        'defines': [
+          'V8_TARGET_ARCH_MIPS',
+        ],
+        'variables': {
+ 'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
+        },
+        'conditions': [
+          ['mipscompiler=="yes"', {
+            'target_conditions': [
+              ['_toolset=="target"', {
+                'cflags': ['-EB'],
+                'ldflags': ['-EB'],
+                'conditions': [
+                  [ 'v8_use_mips_abi_hardfloat=="true"', {
+                    'cflags': ['-mhard-float'],
+                    'ldflags': ['-mhard-float'],
+                  }, {
+                    'cflags': ['-msoft-float'],
+                    'ldflags': ['-msoft-float'],
+                  }],
+                  ['mips_arch_variant=="mips32r2"', {
+                    'cflags': ['-mips32r2', '-Wa,-mips32r2'],
+                  }],
+                  ['mips_arch_variant=="mips32r1"', {
+                    'cflags': ['-mips32', '-Wa,-mips32'],
+                  }],
+                ],
+              }],
+            ],
+          }],
+          [ 'v8_can_use_fpu_instructions=="true"', {
+            'defines': [
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }],
+          [ 'v8_use_mips_abi_hardfloat=="true"', {
+            'defines': [
+              '__mips_hard_float=1',
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }, {
+            'defines': [
+              '__mips_soft_float=1'
+            ],
+          }],
+          ['mips_arch_variant=="mips32r2"', {
+            'defines': ['_MIPS_ARCH_MIPS32R2',],
+          }],
+        ],
+      }],  # v8_target_arch=="mips"
       ['v8_target_arch=="mipsel"', {
         'defines': [
           'V8_TARGET_ARCH_MIPS',
@@ -380,7 +431,7 @@
       ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
          or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
         (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
-         v8_target_arch=="mipsel")', {
+         v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
         # Check whether the host compiler and target compiler support the
         # '-m32' option and set it if so.
         'target_conditions': [
=======================================
--- /branches/bleeding_edge/src/conversions-inl.h Mon Feb 10 12:43:10 2014 UTC +++ /branches/bleeding_edge/src/conversions-inl.h Tue Apr 15 16:39:21 2014 UTC
@@ -75,7 +75,11 @@
   if (x < k2Pow52) {
     x += k2Pow52;
     uint32_t result;
+#ifndef V8_TARGET_BIG_ENDIAN
     Address mantissa_ptr = reinterpret_cast<Address>(&x);
+#else
+    Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
+#endif
     // Copy least significant 32 bits of mantissa.
     OS::MemCopy(&result, mantissa_ptr, sizeof(result));
     return negative ? ~result + 1 : result;
=======================================
--- /branches/bleeding_edge/src/globals.h       Fri Mar 21 09:28:26 2014 UTC
+++ /branches/bleeding_edge/src/globals.h       Tue Apr 15 16:39:21 2014 UTC
@@ -78,7 +78,7 @@
 #elif defined(__ARMEL__)
 #define V8_HOST_ARCH_ARM 1
 #define V8_HOST_ARCH_32_BIT 1
-#elif defined(__MIPSEL__)
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
 #define V8_HOST_ARCH_MIPS 1
 #define V8_HOST_ARCH_32_BIT 1
 #else
@@ -108,7 +108,7 @@
 #define V8_TARGET_ARCH_ARM64 1
 #elif defined(__ARMEL__)
 #define V8_TARGET_ARCH_ARM 1
-#elif defined(__MIPSEL__)
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
 #define V8_TARGET_ARCH_MIPS 1
 #else
 #error Target architecture was not detected as supported by v8
@@ -147,7 +147,7 @@
 #endif
 #endif

-// Determine architecture endiannes (we only support little-endian).
+// Determine architecture endianness.
 #if V8_TARGET_ARCH_IA32
 #define V8_TARGET_LITTLE_ENDIAN 1
 #elif V8_TARGET_ARCH_X64
@@ -157,9 +157,13 @@
 #elif V8_TARGET_ARCH_ARM64
 #define V8_TARGET_LITTLE_ENDIAN 1
 #elif V8_TARGET_ARCH_MIPS
+#if defined(__MIPSEB__)
+#define V8_TARGET_BIG_ENDIAN 1
+#else
 #define V8_TARGET_LITTLE_ENDIAN 1
+#endif
 #else
-#error Unknown target architecture endiannes
+#error Unknown target architecture endianness
 #endif

 // Determine whether the architecture uses an out-of-line constant pool.
=======================================
--- /branches/bleeding_edge/src/heap-snapshot-generator.cc Tue Apr 15 14:48:21 2014 UTC +++ /branches/bleeding_edge/src/heap-snapshot-generator.cc Tue Apr 15 16:39:21 2014 UTC
@@ -1081,7 +1081,9 @@
     Address field = obj->address() + offset;
     ASSERT(!Memory::Object_at(field)->IsFailure());
     ASSERT(Memory::Object_at(field)->IsHeapObject());
-    *field |= kFailureTag;
+    Object* untagged = *reinterpret_cast<Object**>(field);
+    intptr_t tagged  = reinterpret_cast<intptr_t>(untagged) | kFailureTag;
+    *reinterpret_cast<Object**>(field) = reinterpret_cast<Object*>(tagged);
   }

  private:
=======================================
--- /branches/bleeding_edge/src/mips/assembler-mips.cc Wed Mar 19 15:34:17 2014 UTC +++ /branches/bleeding_edge/src/mips/assembler-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -1655,10 +1655,12 @@
 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
   // load to two 32-bit loads.
-  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+      Register::kMantissaOffset);
   FPURegister nextfpreg;
   nextfpreg.setcode(fd.code() + 1);
-  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
+  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+      Register::kExponentOffset);
 }


@@ -1670,10 +1672,12 @@
 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
   // store to two 32-bit stores.
-  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+      Register::kMantissaOffset);
   FPURegister nextfpreg;
   nextfpreg.setcode(fd.code() + 1);
-  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
+  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+      Register::kExponentOffset);
 }


=======================================
--- /branches/bleeding_edge/src/mips/assembler-mips.h Wed Mar 19 15:34:17 2014 UTC +++ /branches/bleeding_edge/src/mips/assembler-mips.h Tue Apr 15 16:39:21 2014 UTC
@@ -77,6 +77,16 @@
   static const int kSizeInBytes = 4;
   static const int kCpRegister = 23;  // cp (s7) is the 23rd register.

+#if defined(V8_TARGET_LITTLE_ENDIAN)
+  static const int kMantissaOffset = 0;
+  static const int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+  static const int kMantissaOffset = 4;
+  static const int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
   inline static int NumAllocatableRegisters();

   static int ToAllocationIndex(Register reg) {
=======================================
--- /branches/bleeding_edge/src/mips/builtins-mips.cc Tue Apr 15 08:26:26 2014 UTC +++ /branches/bleeding_edge/src/mips/builtins-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -470,9 +470,7 @@

       if (count_constructions) {
         __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-        __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-        __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
-                kBitsPerByte);
+ __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
         __ sll(at, a0, kPointerSizeLog2);
         __ addu(a0, t5, at);
         __ sll(at, a3, kPointerSizeLog2);
@@ -525,12 +523,9 @@
       __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields
       // and in-object properties.
-      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-      __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
-             kBitsPerByte);
+ __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
       __ Addu(a3, a3, Operand(t6));
-      __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
-              kBitsPerByte);
+      __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
       __ subu(a3, a3, t6);

       // Done if no extra properties are to be allocated.
=======================================
--- /branches/bleeding_edge/src/mips/code-stubs-mips.cc Fri Mar 28 10:07:23 2014 UTC +++ /branches/bleeding_edge/src/mips/code-stubs-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -559,13 +559,14 @@


 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
-  Register exponent = result1_;
-  Register mantissa = result2_;
-#else
-  Register exponent = result2_;
-  Register mantissa = result1_;
-#endif
+  Register exponent, mantissa;
+  if (kArchEndian == kLittle) {
+    exponent = result1_;
+    mantissa = result2_;
+  } else {
+    exponent = result2_;
+    mantissa = result1_;
+  }
   Label not_special;
   // Convert from Smi to integer.
   __ sra(source_, source_, kSmiTagSize);
@@ -671,8 +672,10 @@
   Register input_high = scratch2;
   Register input_low = scratch3;

-  __ lw(input_low, MemOperand(input_reg, double_offset));
-  __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+  __ lw(input_low,
+      MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+  __ lw(input_high,
+      MemOperand(input_reg, double_offset + Register::kExponentOffset));

   Label normal_exponent, restore_sign;
   // Extract the biased exponent in result.
@@ -3532,9 +3535,15 @@
   {
     Label loop;
     __ bind(&loop);
-    __ lwr(scratch1, MemOperand(src));
-    __ Addu(src, src, Operand(kReadAlignment));
-    __ lwl(scratch1, MemOperand(src, -1));
+    if (kArchEndian == kBig) {
+      __ lwl(scratch1, MemOperand(src));
+      __ Addu(src, src, Operand(kReadAlignment));
+      __ lwr(scratch1, MemOperand(src, -1));
+    } else {
+      __ lwr(scratch1, MemOperand(src));
+      __ Addu(src, src, Operand(kReadAlignment));
+      __ lwl(scratch1, MemOperand(src, -1));
+    }
     __ sw(scratch1, MemOperand(dest));
     __ Addu(dest, dest, Operand(kReadAlignment));
     __ Subu(scratch2, limit, dest);
=======================================
--- /branches/bleeding_edge/src/mips/codegen-mips.cc Thu Feb 6 01:10:06 2014 UTC +++ /branches/bleeding_edge/src/mips/codegen-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -67,21 +67,13 @@
     Register temp2 = t1;
     Register temp3 = t2;

-    if (!IsMipsSoftFloatABI) {
-      // Input value is in f12 anyway, nothing to do.
-    } else {
-      __ Move(input, a0, a1);
-    }
+    __ MovFromFloatParameter(input);
     __ Push(temp3, temp2, temp1);
     MathExpGenerator::EmitMathExp(
         &masm, input, result, double_scratch1, double_scratch2,
         temp1, temp2, temp3);
     __ Pop(temp3, temp2, temp1);
-    if (!IsMipsSoftFloatABI) {
-      // Result is already in f0, nothing to do.
-    } else {
-      __ Move(v0, v1, result);
-    }
+    __ MovToFloatResult(result);
     __ Ret();
   }

@@ -167,11 +159,17 @@
     __ beq(a3, zero_reg, &aligned);  // Already aligned.
     __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.

-    __ lwr(t8, MemOperand(a1));
-    __ addu(a1, a1, a3);
-    __ swr(t8, MemOperand(a0));
-    __ addu(a0, a0, a3);
-
+    if (kArchEndian == kLittle) {
+      __ lwr(t8, MemOperand(a1));
+      __ addu(a1, a1, a3);
+      __ swr(t8, MemOperand(a0));
+      __ addu(a0, a0, a3);
+    } else {
+      __ lwl(t8, MemOperand(a1));
+      __ addu(a1, a1, a3);
+      __ swl(t8, MemOperand(a0));
+      __ addu(a0, a0, a3);
+    }
     // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
// count how many bytes we have to copy after all the 64 byte chunks are // copied and a3 to the dst pointer after all the 64 byte chunks have been
@@ -323,12 +321,21 @@
     __ beq(a3, zero_reg, &ua_chk16w);
     __ subu(a2, a2, a3);  // In delay slot.

-    __ lwr(v1, MemOperand(a1));
-    __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
-    __ addu(a1, a1, a3);
-    __ swr(v1, MemOperand(a0));
-    __ addu(a0, a0, a3);
+    if (kArchEndian == kLittle) {
+      __ lwr(v1, MemOperand(a1));
+      __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+      __ addu(a1, a1, a3);
+      __ swr(v1, MemOperand(a0));
+      __ addu(a0, a0, a3);
+    } else {
+      __ lwl(v1, MemOperand(a1));
+      __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+      __ addu(a1, a1, a3);
+      __ swl(v1, MemOperand(a0));
+      __ addu(a0, a0, a3);
+    }

// Now the dst (but not the source) is aligned. Set a2 to count how many // bytes we have to copy after all the 64 byte chunks are copied and a3 to
@@ -357,40 +364,77 @@

     __ bind(&ua_loop16w);
     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
-    __ lwr(t0, MemOperand(a1));
-    __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
-    __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+    if (kArchEndian == kLittle) {
+      __ lwr(t0, MemOperand(a1));
+      __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+      __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+
+      if (pref_hint_store == kPrefHintPrepareForStore) {
+        __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+      }
+ __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+      __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+      __ bind(&ua_skip_pref);
+      __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+      __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+      __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+      __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+      __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    } else {
+      __ lwl(t0, MemOperand(a1));
+      __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+      __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));

-    if (pref_hint_store == kPrefHintPrepareForStore) {
-      __ sltu(v1, t9, a0);
-      __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
-    }
- __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+      if (pref_hint_store == kPrefHintPrepareForStore) {
+        __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+      }
+ __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.

-    __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
-    __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));

-    __ bind(&ua_skip_pref);
-    __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
-    __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
-    __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
-    __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
-    __ lwl(t0,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t1,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t2,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t3,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t4,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t5,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t6,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t7,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+      __ bind(&ua_skip_pref);
+      __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+      __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+      __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+      __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+      __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    }
     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
     __ sw(t0, MemOperand(a0));
     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
@@ -400,30 +444,57 @@
     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
-    __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
-    __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
-    __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
-    __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
-    __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
-    __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
-    __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
-    __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
-    __ lwl(t0,
- MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t1,
- MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t2,
- MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t3,
- MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t4,
- MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t5,
- MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t6,
- MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t7,
- MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+    if (kArchEndian == kLittle) {
+      __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
+      __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
+      __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
+      __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
+      __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
+      __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
+      __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
+      __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
+      __ lwl(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+    } else {
+      __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
+      __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
+      __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
+      __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
+      __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
+      __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
+      __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
+      __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
+      __ lwr(t0,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t1,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t2,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t3,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t4,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t5,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t6,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t7,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+    }
     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
@@ -447,30 +518,57 @@

     __ beq(a2, t8, &ua_chk1w);
     __ nop();  // In delay slot.
-    __ lwr(t0, MemOperand(a1));
-    __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
-    __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
-    __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
-    __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
-    __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
-    __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
-    __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
-    __ lwl(t0,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t1,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t2,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t3,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t4,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t5,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t6,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
-    __ lwl(t7,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    if (kArchEndian == kLittle) {
+      __ lwr(t0, MemOperand(a1));
+      __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+      __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+      __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
+      __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+      __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+      __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+      __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+      __ lwl(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwl(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    } else {
+      __ lwl(t0, MemOperand(a1));
+      __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
+      __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
+      __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
+      __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
+      __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
+      __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
+      __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
+      __ lwr(t0,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t1,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t2,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t3,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t4,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t5,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t6,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+      __ lwr(t7,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    }
     __ addiu(a1, a1, 8 * loadstore_chunk);
     __ sw(t0, MemOperand(a0));
     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
@@ -491,9 +589,15 @@
     __ addu(a3, a0, a3);

     __ bind(&ua_wordCopy_loop);
-    __ lwr(v1, MemOperand(a1));
-    __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    if (kArchEndian == kLittle) {
+      __ lwr(v1, MemOperand(a1));
+      __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    } else {
+      __ lwl(v1, MemOperand(a1));
+      __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    }
     __ addiu(a0, a0, loadstore_chunk);
     __ addiu(a1, a1, loadstore_chunk);
     __ bne(a0, a3, &ua_wordCopy_loop);
@@ -722,8 +826,8 @@
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
     __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
   }
-  __ sw(t0, MemOperand(t3));  // mantissa
-  __ sw(t1, MemOperand(t3, kIntSize));  // exponent
+  __ sw(t0, MemOperand(t3, Register::kMantissaOffset));  // mantissa
+  __ sw(t1, MemOperand(t3, Register::kExponentOffset));  // exponent
   __ Addu(t3, t3, kDoubleSize);

   __ bind(&entry);
@@ -773,7 +877,9 @@
   __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));

   // Prepare for conversion loop.
- __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ Addu(t0, t0, Operand(
+        FixedDoubleArray::kHeaderSize - kHeapObjectTag
+        + Register::kExponentOffset));
   __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
   __ Addu(t2, t2, Operand(kHeapObjectTag));
   __ sll(t1, t1, 1);
@@ -782,7 +888,8 @@
   __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
   // Using offsetted addresses.
   // a3: begin of destination FixedArray element fields, not tagged
-  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
+  // t0: begin of source FixedDoubleArray element fields, not tagged,
+  //     points to the exponent
   // t1: end of destination FixedArray, not tagged
   // t2: destination FixedArray
   // t3: the-hole pointer
@@ -805,7 +912,9 @@
   // Non-hole double, copy value into a heap number.
   __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
   // a2: new heap number
-  __ lw(a0, MemOperand(t0, -12));
+ // Load mantissa of current element, t0 point to exponent of next element.
+  __ lw(a0, MemOperand(t0, (Register::kMantissaOffset
+      - Register::kExponentOffset - kDoubleSize)));
   __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
   __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
   __ mov(a0, a3);
@@ -1010,8 +1119,8 @@
   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
   __ sll(at, temp2, 3);
   __ Addu(temp3, temp3, Operand(at));
-  __ lw(temp2, MemOperand(temp3, 0));
-  __ lw(temp3, MemOperand(temp3, kPointerSize));
+  __ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
+  __ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
   // The first word is loaded is the lower number register.
   if (temp2.code() < temp3.code()) {
     __ sll(at, temp1, 20);
=======================================
--- /branches/bleeding_edge/src/mips/constants-mips.h Fri Dec 6 16:23:49 2013 UTC +++ /branches/bleeding_edge/src/mips/constants-mips.h Tue Apr 15 16:39:21 2014 UTC
@@ -55,6 +55,18 @@
   static const ArchVariants kArchVariant = kMips32r1;
 #endif

+enum Endianness {
+  kLittle,
+  kBig
+};
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+  static const Endianness kArchEndian = kLittle;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+  static const Endianness kArchEndian = kBig;
+#else
+#error Unknown endianness
+#endif

 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
 // Use floating-point coprocessor instructions. This flag is raised when
@@ -69,6 +81,15 @@
 const bool IsMipsSoftFloatABI = true;
 #endif

+#if defined(V8_TARGET_LITTLE_ENDIAN)
+const uint32_t kHoleNanUpper32Offset = 4;
+const uint32_t kHoleNanLower32Offset = 0;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+const uint32_t kHoleNanUpper32Offset = 0;
+const uint32_t kHoleNanLower32Offset = 4;
+#else
+#error Unknown endianness
+#endif

 // Defines constants and accessor classes to assemble, disassemble and
 // simulate MIPS32 instructions.
=======================================
--- /branches/bleeding_edge/src/mips/ic-mips.cc Tue Mar 11 22:16:56 2014 UTC
+++ /branches/bleeding_edge/src/mips/ic-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -918,7 +918,7 @@
   // We have to see if the double version of the hole is present. If so
   // go to the runtime.
   __ Addu(address, elements,
-          Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
+          Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset
                   - kHeapObjectTag));
   __ sll(at, key, kPointerSizeLog2);
   __ addu(address, address, at);
=======================================
--- /branches/bleeding_edge/src/mips/lithium-codegen-mips.cc Tue Apr 15 15:11:36 2014 UTC +++ /branches/bleeding_edge/src/mips/lithium-codegen-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -3219,7 +3219,7 @@
   __ ldc1(result, MemOperand(scratch));

   if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+    __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
   }
 }
=======================================
--- /branches/bleeding_edge/src/mips/macro-assembler-mips.cc Wed Mar 26 15:51:48 2014 UTC +++ /branches/bleeding_edge/src/mips/macro-assembler-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -3313,13 +3313,24 @@

   // TODO(kalmard) check if this can be optimized to use sw in most cases.
   // Can't use unaligned access - copy byte by byte.
-  sb(scratch, MemOperand(dst, 0));
-  srl(scratch, scratch, 8);
-  sb(scratch, MemOperand(dst, 1));
-  srl(scratch, scratch, 8);
-  sb(scratch, MemOperand(dst, 2));
-  srl(scratch, scratch, 8);
-  sb(scratch, MemOperand(dst, 3));
+  if (kArchEndian == kLittle) {
+    sb(scratch, MemOperand(dst, 0));
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(dst, 1));
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(dst, 2));
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(dst, 3));
+  } else {
+    sb(scratch, MemOperand(dst, 3));
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(dst, 2));
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(dst, 1));
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(dst, 0));
+  }
+
   Addu(dst, dst, 4);

   Subu(length, length, Operand(kPointerSize));
@@ -3424,11 +3435,12 @@
   bind(&have_double_value);
   sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
   Addu(scratch1, scratch1, elements_reg);
-  sw(mantissa_reg, FieldMemOperand(
-     scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
-  uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
-      sizeof(kHoleNanLower32);
-  sw(exponent_reg, FieldMemOperand(scratch1, offset));
+  sw(mantissa_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+          + kHoleNanLower32Offset));
+  sw(exponent_reg,
+ FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+          + kHoleNanUpper32Offset));
   jmp(&done);

   bind(&maybe_nan);
@@ -3526,7 +3538,11 @@

 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
   if (IsMipsSoftFloatABI) {
-    Move(dst, v0, v1);
+    if (kArchEndian == kLittle) {
+      Move(dst, v0, v1);
+    } else {
+      Move(dst, v1, v0);
+    }
   } else {
     Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
   }
@@ -3535,7 +3551,11 @@

 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) {
   if (IsMipsSoftFloatABI) {
-    Move(dst, a0, a1);
+    if (kArchEndian == kLittle) {
+      Move(dst, a0, a1);
+    } else {
+      Move(dst, a1, a0);
+    }
   } else {
     Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
   }
@@ -3546,7 +3566,11 @@
   if (!IsMipsSoftFloatABI) {
     Move(f12, src);
   } else {
-    Move(a0, a1, src);
+    if (kArchEndian == kLittle) {
+      Move(a0, a1, src);
+    } else {
+      Move(a1, a0, src);
+    }
   }
 }

@@ -3555,7 +3579,11 @@
   if (!IsMipsSoftFloatABI) {
     Move(f0, src);
   } else {
-    Move(v0, v1, src);
+    if (kArchEndian == kLittle) {
+      Move(v0, v1, src);
+    } else {
+      Move(v1, v0, src);
+    }
   }
 }

@@ -3572,8 +3600,13 @@
       Move(f14, src2);
     }
   } else {
-    Move(a0, a1, src1);
-    Move(a2, a3, src2);
+    if (kArchEndian == kLittle) {
+      Move(a0, a1, src1);
+      Move(a2, a3, src2);
+    } else {
+      Move(a1, a0, src1);
+      Move(a3, a2, src2);
+    }
   }
 }

=======================================
--- /branches/bleeding_edge/src/objects.h       Tue Apr 15 15:17:04 2014 UTC
+++ /branches/bleeding_edge/src/objects.h       Tue Apr 15 16:39:21 2014 UTC
@@ -1961,11 +1961,18 @@
   // Layout description.
   static const int kValueOffset = HeapObject::kHeaderSize;
// IEEE doubles are two 32 bit words. The first is just mantissa, the second - // is a mixture of sign, exponent and mantissa. Our current platforms are all
-  // little endian apart from non-EABI arm which is little endian with big
-  // endian floating point word ordering!
+  // is a mixture of sign, exponent and mantissa. The offsets of two 32 bit
+  // words within double numbers are endian dependent and they are set
+  // accordingly.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
   static const int kMantissaOffset = kValueOffset;
   static const int kExponentOffset = kValueOffset + 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+  static const int kMantissaOffset = kValueOffset + 4;
+  static const int kExponentOffset = kValueOffset;
+#else
+#error Unknown byte ordering
+#endif

   static const int kSize = kValueOffset + kDoubleSize;
   static const uint32_t kSignMask = 0x80000000u;
@@ -7418,9 +7425,9 @@
   // The construction counter for inobject slack tracking is stored in the
   // most significant byte of compiler_hints which is otherwise unused.
   // Its offset depends on the endian-ness of the architecture.
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if defined(V8_TARGET_LITTLE_ENDIAN)
   static const int kConstructionCountOffset = kCompilerHintsOffset + 3;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif defined(V8_TARGET_BIG_ENDIAN)
   static const int kConstructionCountOffset = kCompilerHintsOffset + 0;
 #else
 #error Unknown byte ordering
@@ -7494,12 +7501,12 @@
   static const int kNativeBitWithinByte =
       (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;

-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if defined(V8_TARGET_LITTLE_ENDIAN)
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
-#elif __BYTE_ORDER == __BIG_ENDIAN
+#elif defined(V8_TARGET_BIG_ENDIAN)
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
=======================================
--- /branches/bleeding_edge/src/runtime.cc      Tue Apr 15 15:17:04 2014 UTC
+++ /branches/bleeding_edge/src/runtime.cc      Tue Apr 15 16:39:21 2014 UTC
@@ -9158,8 +9158,15 @@
 #else
 typedef uint64_t ObjectPair;
 static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
   return reinterpret_cast<uint32_t>(x) |
       (reinterpret_cast<ObjectPair>(y) << 32);
+#elif defined(V8_TARGET_BIG_ENDIAN)
+    return reinterpret_cast<uint32_t>(y) |
+        (reinterpret_cast<ObjectPair>(x) << 32);
+#else
+#error Unknown endianness
+#endif
 }
 #endif

=======================================
--- /branches/bleeding_edge/test/cctest/cctest.status Thu Apr 10 08:04:50 2014 UTC +++ /branches/bleeding_edge/test/cctest/cctest.status Tue Apr 15 16:39:21 2014 UTC
@@ -186,7 +186,7 @@
 }],  # 'arch == arm'

##############################################################################
-['arch == mipsel', {
+['arch == mipsel or arch == mips', {

   # BUG(2657): Test sometimes times out on MIPS simulator.
'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
@@ -196,7 +196,7 @@
   'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
   'test-serialize/DeserializeAndRunScript2': [SKIP],
   'test-serialize/DeserializeFromSecondSerialization': [SKIP],
-}],  # 'arch == mipsel'
+}],  # 'arch == mipsel or arch == mips'

##############################################################################
 ['arch == android_arm or arch == android_ia32', {
=======================================
--- /branches/bleeding_edge/test/cctest/test-assembler-mips.cc Tue Feb 4 14:05:46 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-assembler-mips.cc Tue Apr 15 16:39:21 2014 UTC
@@ -533,11 +533,21 @@
   USE(dummy);

   CHECK_EQ(0x11223344, t.r1);
+#if __BYTE_ORDER == __LITTLE_ENDIAN
   CHECK_EQ(0x3344, t.r2);
   CHECK_EQ(0xffffbbcc, t.r3);
   CHECK_EQ(0x0000bbcc, t.r4);
   CHECK_EQ(0xffffffcc, t.r5);
   CHECK_EQ(0x3333bbcc, t.r6);
+#elif __BYTE_ORDER == __BIG_ENDIAN
+  CHECK_EQ(0x1122, t.r2);
+  CHECK_EQ(0xffff99aa, t.r3);
+  CHECK_EQ(0x000099aa, t.r4);
+  CHECK_EQ(0xffffff99, t.r5);
+  CHECK_EQ(0x99aa3333, t.r6);
+#else
+#error Unknown endianness
+#endif
 }


@@ -942,6 +952,7 @@
   Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
   USE(dummy);

+#if __BYTE_ORDER == __LITTLE_ENDIAN
   CHECK_EQ(0x44bbccdd, t.lwl_0);
   CHECK_EQ(0x3344ccdd, t.lwl_1);
   CHECK_EQ(0x223344dd, t.lwl_2);
@@ -961,6 +972,29 @@
   CHECK_EQ(0xbbccdd44, t.swr_1);
   CHECK_EQ(0xccdd3344, t.swr_2);
   CHECK_EQ(0xdd223344, t.swr_3);
+#elif __BYTE_ORDER == __BIG_ENDIAN
+  CHECK_EQ(0x11223344, t.lwl_0);
+  CHECK_EQ(0x223344dd, t.lwl_1);
+  CHECK_EQ(0x3344ccdd, t.lwl_2);
+  CHECK_EQ(0x44bbccdd, t.lwl_3);
+
+  CHECK_EQ(0xaabbcc11, t.lwr_0);
+  CHECK_EQ(0xaabb1122, t.lwr_1);
+  CHECK_EQ(0xaa112233, t.lwr_2);
+  CHECK_EQ(0x11223344, t.lwr_3);
+
+  CHECK_EQ(0xaabbccdd, t.swl_0);
+  CHECK_EQ(0x11aabbcc, t.swl_1);
+  CHECK_EQ(0x1122aabb, t.swl_2);
+  CHECK_EQ(0x112233aa, t.swl_3);
+
+  CHECK_EQ(0xdd223344, t.swr_0);
+  CHECK_EQ(0xccdd3344, t.swr_1);
+  CHECK_EQ(0xbbccdd44, t.swr_2);
+  CHECK_EQ(0xaabbccdd, t.swr_3);
+#else
+#error Unknown endianness
+#endif
 }


=======================================
--- /branches/bleeding_edge/test/cctest/test-platform.cc Wed Feb 12 09:19:30 2014 UTC +++ /branches/bleeding_edge/test/cctest/test-platform.cc Tue Apr 15 16:39:21 2014 UTC
@@ -59,7 +59,7 @@
   do { \
     ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
   } while (0)
-#elif defined(__MIPSEL__)
+#elif defined(__MIPSEB__) || defined(__MIPSEL__)
 #define GET_STACK_POINTER() \
   static int sp_addr = 0; \
   do { \
=======================================
--- /branches/bleeding_edge/test/mjsunit/mjsunit.status Tue Apr 15 08:26:26 2014 UTC +++ /branches/bleeding_edge/test/mjsunit/mjsunit.status Tue Apr 15 16:39:21 2014 UTC
@@ -70,7 +70,7 @@
##############################################################################
   # These use a built-in that's only present in debug mode. They take
   # too long to run in debug mode on ARM and MIPS.
- 'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel', SKIP]], + 'fuzz-natives-part*': [PASS, ['mode == release or arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', SKIP]],

'big-object-literal': [PASS, ['arch == arm or arch == android_arm or arch == android_arm64', SKIP]],

@@ -78,7 +78,7 @@
   'array-constructor': [PASS, TIMEOUT],

   # Very slow on ARM and MIPS, contains no architecture dependent code.
- 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel', TIMEOUT]], + 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', TIMEOUT]],

############################################################################## # This test expects to reach a certain recursion depth, which may not work
@@ -122,6 +122,11 @@
   # BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
   # nosse2. Also for arm novfp3.
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == ia32 or arch == arm and simulator == True', PASS]],
+
+ # Skip endain dependent test for mips due to different typed views of the same
+  # array buffer.
+  'nans': [PASS, ['arch == mips', SKIP]],
+
 }],  # ALWAYS

##############################################################################
@@ -292,7 +297,7 @@
 }],  # 'arch == arm or arch == android_arm'

##############################################################################
-['arch == mipsel', {
+['arch == mipsel or arch == mips', {

   # Slow tests which times out in debug mode.
   'try': [PASS, ['mode == debug', SKIP]],
@@ -328,7 +333,7 @@

   # Currently always deopt on minus zero
   'math-floor-of-div-minus-zero': [SKIP],
-}],  # 'arch == mipsel'
+}],  # 'arch == mipsel or arch == mips'

##############################################################################
 # Native Client uses the ARM simulator so will behave similarly to arm
=======================================
--- /branches/bleeding_edge/test/mozilla/mozilla.status Thu Mar 27 14:25:22 2014 UTC +++ /branches/bleeding_edge/test/mozilla/mozilla.status Tue Apr 15 16:39:21 2014 UTC
@@ -141,8 +141,8 @@
   'ecma/Date/15.9.5.28-1': [PASS, FAIL],

   # 1050186: Arm/MIPS vm is broken; probably unrelated to dates
-  'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel', FAIL]],
-  'ecma/Date/15.9.5.22-2': [PASS, ['arch == arm or arch == mipsel', FAIL]],
+ 'ecma/Array/15.4.4.5-3': [PASS, ['arch == arm or arch == mipsel or arch == mips', FAIL]], + 'ecma/Date/15.9.5.22-2': [PASS, ['arch == arm or arch == mipsel or arch == mips', FAIL]],

   # Flaky test that fails due to what appears to be a bug in the test.
   # Occurs depending on current time
@@ -874,6 +874,25 @@
   'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
 }],  # 'arch == mipsel'

+['arch == mips', {
+
+  # BUG(3251229): Times out when running new crankshaft test script.
+  'ecma_3/RegExp/regress-311414': [SKIP],
+  'ecma/Date/15.9.5.8': [SKIP],
+  'ecma/Date/15.9.5.10-2': [SKIP],
+  'ecma/Date/15.9.5.11-2': [SKIP],
+  'ecma/Date/15.9.5.12-2': [SKIP],
+  'js1_5/Array/regress-99120-02': [SKIP],
+  'js1_5/extensions/regress-371636': [SKIP],
+  'js1_5/Regress/regress-203278-1': [SKIP],
+  'js1_5/Regress/regress-404755': [SKIP],
+  'js1_5/Regress/regress-451322': [SKIP],
+
+
+  # BUG(1040): Allow this test to timeout.
+  'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
+}],  # 'arch == mips'
+
 ['arch == arm64 and simulator_run == True', {

   'js1_5/GC/regress-203278-2': [SKIP],
=======================================
--- /branches/bleeding_edge/test/test262/test262.status Fri Mar 21 09:28:26 2014 UTC +++ /branches/bleeding_edge/test/test262/test262.status Tue Apr 15 16:39:21 2014 UTC
@@ -99,7 +99,7 @@
   'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
 }],  # ALWAYS

-['arch == arm or arch == mipsel or arch == arm64', {
+['arch == arm or arch == mipsel or arch == mips or arch == arm64', {

   # TODO(mstarzinger): Causes stack overflow on simulators due to eager
   # compilation of parenthesized function literals. Needs investigation.
@@ -112,5 +112,5 @@
   'S15.1.3.2_A2.5_T1': [SKIP],
   'S15.1.3.3_A2.3_T1': [SKIP],
   'S15.1.3.4_A2.3_T1': [SKIP],
-}],  # 'arch == arm or arch == mipsel or arch == arm64'
+}],  # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
 ]
=======================================
--- /branches/bleeding_edge/tools/gyp/v8.gyp    Mon Apr 14 13:38:16 2014 UTC
+++ /branches/bleeding_edge/tools/gyp/v8.gyp    Tue Apr 15 16:39:21 2014 UTC
@@ -727,7 +727,7 @@
             '../../src/ia32/stub-cache-ia32.cc',
           ],
         }],
-        ['v8_target_arch=="mipsel"', {
+        ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
           'sources': [  ### gcmole(arch:mipsel) ###
             '../../src/mips/assembler-mips.cc',
             '../../src/mips/assembler-mips.h',
=======================================
--- /branches/bleeding_edge/tools/run-tests.py  Thu Apr 10 07:25:49 2014 UTC
+++ /branches/bleeding_edge/tools/run-tests.py  Tue Apr 15 16:39:21 2014 UTC
@@ -80,6 +80,7 @@
                    "android_ia32",
                    "arm",
                    "ia32",
+                   "mips",
                    "mipsel",
                    "nacl_ia32",
                    "nacl_x64",
@@ -90,6 +91,7 @@
               "android_arm64",
               "android_ia32",
               "arm",
+              "mips",
               "mipsel",
               "nacl_ia32",
               "nacl_x64",
=======================================
--- /branches/bleeding_edge/tools/testrunner/local/statusfile.py Thu Mar 27 10:06:53 2014 UTC +++ /branches/bleeding_edge/tools/testrunner/local/statusfile.py Tue Apr 15 16:39:21 2014 UTC
@@ -53,8 +53,8 @@
 # Support arches, modes to be written as keywords instead of strings.
 VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32",
-            "arm", "arm64", "ia32", "mipsel", "x64", "nacl_ia32", "nacl_x64",
-            "macos", "windows", "linux"]:
+            "arm", "arm64", "ia32", "mips", "mipsel", "x64", "nacl_ia32",
+            "nacl_x64", "macos", "windows", "linux"]:
   VARIABLES[var] = var


--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to