Revision: 13284
Author:   [email protected]
Date:     Fri Dec 28 03:09:16 2012
Log:      Refactor and improve inlined double-aligned allocations

Change is performance neutral but generates smaller code and encapsulates double alignment in the macro-assembler rather than at the allocation site.

Review URL: https://codereview.chromium.org/11684005
http://code.google.com/p/v8/source/detail?r=13284

Modified:
 /branches/bleeding_edge/src/arm/code-stubs-arm.cc
 /branches/bleeding_edge/src/arm/codegen-arm.cc
 /branches/bleeding_edge/src/arm/macro-assembler-arm.cc
 /branches/bleeding_edge/src/arm/macro-assembler-arm.h
 /branches/bleeding_edge/src/ia32/builtins-ia32.cc
 /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc
 /branches/bleeding_edge/src/ia32/codegen-ia32.cc
 /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc
 /branches/bleeding_edge/src/ia32/macro-assembler-ia32.h
 /branches/bleeding_edge/src/macro-assembler.h
 /branches/bleeding_edge/src/x64/code-stubs-x64.cc
 /branches/bleeding_edge/src/x64/macro-assembler-x64.cc
 /branches/bleeding_edge/src/x64/macro-assembler-x64.h

=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Thu Dec 20 08:31:19 2012 +++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Fri Dec 28 03:09:16 2012
@@ -361,12 +361,11 @@

   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size,
-                        r0,
-                        r1,
-                        r2,
-                        fail,
-                        TAG_OBJECT);
+  AllocationFlags flags = TAG_OBJECT;
+  if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+    flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+  }
+  __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);

   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.cc      Tue Dec 18 08:25:45 2012
+++ /branches/bleeding_edge/src/arm/codegen-arm.cc      Fri Dec 28 03:09:16 2012
@@ -193,27 +193,10 @@
   // Allocate new FixedDoubleArray.
   // Use lr as a temporary register.
   __ mov(lr, Operand(r5, LSL, 2));
-  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
-  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
+  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
   // r6: destination FixedDoubleArray, not tagged as heap object.

-  // Align the array conveniently for doubles.
-  // Store a filler value in the unused memory.
-  Label aligned, aligned_done;
-  __ tst(r6, Operand(kDoubleAlignmentMask));
- __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
-  __ b(eq, &aligned);
- // Store at the beginning of the allocated memory and update the base pointer.
-  __ str(ip, MemOperand(r6, kPointerSize, PostIndex));
-  __ b(&aligned_done);
-
-  __ bind(&aligned);
-  // Store the filler at the end of the allocated memory.
-  __ sub(lr, lr, Operand(kPointerSize));
-  __ str(ip, MemOperand(r6, lr));
-
-  __ bind(&aligned_done);
-
   // Set destination FixedDoubleArray's length and map.
   __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
   __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Fri Dec 28 03:09:16 2012
@@ -1616,6 +1616,18 @@
// Load allocation limit into ip. Result already contains allocation top.
     ldr(ip, MemOperand(topaddr, limit - top));
   }
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+    // always safe because the limit of the heap is always aligned.
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+    Label aligned;
+    b(eq, &aligned);
+    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+    bind(&aligned);
+  }

   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top.
@@ -1701,6 +1713,18 @@
// Load allocation limit into ip. Result already contains allocation top.
     ldr(ip, MemOperand(topaddr, limit - top));
   }
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+    // always safe because the limit of the heap is always aligned.
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+    Label aligned;
+    b(eq, &aligned);
+    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+    bind(&aligned);
+  }

   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top. Object size may be in words so a shift is
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.h Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/arm/macro-assembler-arm.h Fri Dec 28 03:09:16 2012
@@ -54,20 +54,6 @@
 const Register cp = { 8 };  // JavaScript context pointer
 const Register kRootRegister = { 10 };  // Roots array pointer.

-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
-  // No special flags.
-  NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
-  // new space.
-  RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
-  // words instead of bytes.
-  SIZE_IN_WORDS = 1 << 2
-};
-
 // Flags used for AllocateHeapNumber
 enum TaggingMode {
   // Tag the result.
=======================================
--- /branches/bleeding_edge/src/ia32/builtins-ia32.cc Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/ia32/builtins-ia32.cc Fri Dec 28 03:09:16 2012
@@ -257,6 +257,7 @@
       __ AllocateInNewSpace(FixedArray::kHeaderSize,
                             times_pointer_size,
                             edx,
+                            REGISTER_VALUE_IS_INT32,
                             edi,
                             ecx,
                             no_reg,
@@ -1102,8 +1103,9 @@
   // requested elements.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
-                        times_half_pointer_size,  // array_size is a smi.
+                        times_pointer_size,
                         array_size,
+                        REGISTER_VALUE_IS_SMI,
                         result,
                         elements_array_end,
                         scratch,
=======================================
--- /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Fri Dec 21 10:16:27 2012 +++ /branches/bleeding_edge/src/ia32/code-stubs-ia32.cc Fri Dec 28 03:09:16 2012
@@ -340,7 +340,11 @@

   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
+  AllocationFlags flags = TAG_OBJECT;
+  if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+    flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+  }
+  __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);

   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -4080,8 +4084,9 @@
   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
   // Elements:  [Map][Length][..elements..]
   __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
-                        times_half_pointer_size,
- ebx, // In: Number of elements (times 2, being a smi)
+                        times_pointer_size,
+                        ebx,  // In: Number of elements as a smi
+                        REGISTER_VALUE_IS_SMI,
                         eax,  // Out: Start of allocation (tagged).
                         ecx,  // Out: End of allocation.
                         edx,  // Scratch register
=======================================
--- /branches/bleeding_edge/src/ia32/codegen-ia32.cc Wed Dec 5 07:49:22 2012 +++ /branches/bleeding_edge/src/ia32/codegen-ia32.cc Fri Dec 28 03:09:16 2012
@@ -434,24 +434,11 @@
   // Allocate new FixedDoubleArray.
   // edx: receiver
   // edi: length of source FixedArray (smi-tagged)
-  __ lea(esi, Operand(edi,
-                      times_4,
-                      FixedDoubleArray::kHeaderSize + kPointerSize));
-  __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
-
-  Label aligned, aligned_done;
-  __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
-  __ j(zero, &aligned, Label::kNear);
-  __ mov(FieldOperand(eax, 0),
-         Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-  __ add(eax, Immediate(kPointerSize));
-  __ jmp(&aligned_done);
-
-  __ bind(&aligned);
-  __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
-         Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-
-  __ bind(&aligned_done);
+  AllocationFlags flags =
+      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+  __ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
+                        edi, REGISTER_VALUE_IS_SMI,
+                        eax, ebx, no_reg, &gc_required, flags);

   // eax: destination FixedDoubleArray
   // edi: number of elements
=======================================
--- /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc Fri Dec 28 03:09:16 2012
@@ -1241,6 +1241,7 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1259,6 +1260,19 @@

   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }

   Register top_reg = result_end.is_valid() ? result_end : result;

@@ -1278,26 +1292,31 @@
   UpdateAllocationTopHelper(top_reg, scratch);

   // Tag result if requested.
+  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if ((flags & TAG_OBJECT) != 0) {
+    if (tag_result) {
       sub(result, Immediate(object_size - kHeapObjectTag));
     } else {
       sub(result, Immediate(object_size));
     }
-  } else if ((flags & TAG_OBJECT) != 0) {
-    add(result, Immediate(kHeapObjectTag));
+  } else if (tag_result) {
+    ASSERT(kHeapObjectTag == 1);
+    inc(result);
   }
 }


-void MacroAssembler::AllocateInNewSpace(int header_size,
-                                        ScaleFactor element_size,
-                                        Register element_count,
-                                        Register result,
-                                        Register result_end,
-                                        Register scratch,
-                                        Label* gc_required,
-                                        AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(
+    int header_size,
+    ScaleFactor element_size,
+    Register element_count,
+    RegisterValueType element_count_type,
+    Register result,
+    Register result_end,
+    Register scratch,
+    Label* gc_required,
+    AllocationFlags flags) {
+  ASSERT((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1315,6 +1334,19 @@

   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    Label aligned;
+    test(result, Immediate(kDoubleAlignmentMask));
+    j(zero, &aligned, Label::kNear);
+    mov(Operand(result, 0),
+        Immediate(isolate()->factory()->one_pointer_filler_map()));
+    add(result, Immediate(kDoubleSize / 2));
+    bind(&aligned);
+  }

   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
@@ -1322,15 +1354,25 @@

   // We assume that element_count*element_size + header_size does not
   // overflow.
+  if (element_count_type == REGISTER_VALUE_IS_SMI) {
+    STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
+    STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
+    STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
+    ASSERT(element_size >= times_2);
+    ASSERT(kSmiTagSize == 1);
+    element_size = static_cast<ScaleFactor>(element_size - 1);
+  } else {
+    ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+  }
   lea(result_end, Operand(element_count, element_size, header_size));
   add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);

-  // Tag result if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    lea(result, Operand(result, kHeapObjectTag));
+    ASSERT(kHeapObjectTag == 1);
+    inc(result);
   }

   // Update allocation top.
@@ -1344,6 +1386,8 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
+                   SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1433,6 +1477,7 @@
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
                      times_1,
                      scratch1,
+                     REGISTER_VALUE_IS_INT32,
                      result,
                      scratch2,
                      scratch3,
@@ -1468,6 +1513,7 @@
   AllocateInNewSpace(SeqOneByteString::kHeaderSize,
                      times_1,
                      scratch1,
+                     REGISTER_VALUE_IS_INT32,
                      result,
                      scratch2,
                      scratch3,
=======================================
--- /branches/bleeding_edge/src/ia32/macro-assembler-ia32.h Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/ia32/macro-assembler-ia32.h Fri Dec 28 03:09:16 2012
@@ -35,18 +35,6 @@
 namespace v8 {
 namespace internal {

-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
-  // No special flags.
-  NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
-  // new space.
-  RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
 // Convenience for platform-independent signatures.  We do not normally
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
@@ -55,6 +43,12 @@
 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };


+enum RegisterValueType {
+  REGISTER_VALUE_IS_SMI,
+  REGISTER_VALUE_IS_INT32
+};
+
+
 bool AreAliased(Register r1, Register r2, Register r3, Register r4);


@@ -576,6 +570,7 @@
   void AllocateInNewSpace(int header_size,
                           ScaleFactor element_size,
                           Register element_count,
+                          RegisterValueType element_count_type,
                           Register result,
                           Register result_end,
                           Register scratch,
=======================================
--- /branches/bleeding_edge/src/macro-assembler.h       Fri Feb 10 00:47:35 2012
+++ /branches/bleeding_edge/src/macro-assembler.h       Fri Dec 28 03:09:16 2012
@@ -36,6 +36,23 @@
 };


+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+  // No special flags.
+  NO_ALLOCATION_FLAGS = 0,
+  // Return the pointer to the allocated already tagged as a heap object.
+  TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+  // new space.
+  RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+  // words instead of bytes.
+  SIZE_IN_WORDS = 1 << 2,
+  // Align the allocation to a multiple of kDoubleSize
+  DOUBLE_ALIGNMENT = 1 << 3
+};
+
+
 // Invalid depth in prototype chain.
 const int kInvalidProtoDepth = -1;

=======================================
--- /branches/bleeding_edge/src/x64/code-stubs-x64.cc Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/x64/code-stubs-x64.cc Fri Dec 28 03:09:16 2012
@@ -333,7 +333,11 @@

   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+  AllocationFlags flags = TAG_OBJECT;
+  if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+    flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+  }
+  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);

   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Fri Dec 28 03:09:16 2012
@@ -3758,6 +3758,7 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3776,6 +3777,13 @@

   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+    testq(result, Immediate(kDoubleAlignmentMask));
+    Check(zero, "Allocation is not double aligned");
+  }

   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
@@ -3795,15 +3803,17 @@
   // Update allocation top.
   UpdateAllocationTopHelper(top_reg, scratch);

+  bool tag_result = (flags & TAG_OBJECT) != 0;
   if (top_reg.is(result)) {
-    if ((flags & TAG_OBJECT) != 0) {
+    if (tag_result) {
       subq(result, Immediate(object_size - kHeapObjectTag));
     } else {
       subq(result, Immediate(object_size));
     }
-  } else if ((flags & TAG_OBJECT) != 0) {
+  } else if (tag_result) {
     // Tag the result if requested.
-    addq(result, Immediate(kHeapObjectTag));
+    ASSERT(kHeapObjectTag == 1);
+    incq(result);
   }
 }

@@ -3816,6 +3826,7 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3833,6 +3844,13 @@

   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
+
+ // Align the next allocation. Storing the filler map without checking top is
+  // always safe because the limit of the heap is always aligned.
+  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+    testq(result, Immediate(kDoubleAlignmentMask));
+    Check(zero, "Allocation is not double aligned");
+  }

   // Calculate new top and bail out if new space is exhausted.
   ExternalReference new_space_allocation_limit =
@@ -3852,7 +3870,8 @@

   // Tag the result if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    addq(result, Immediate(kHeapObjectTag));
+    ASSERT(kHeapObjectTag == 1);
+    incq(result);
   }
 }

@@ -3863,6 +3882,8 @@
                                         Register scratch,
                                         Label* gc_required,
                                         AllocationFlags flags) {
+  ASSERT((flags & (DOUBLE_ALIGNMENT | RESULT_CONTAINS_TOP |
+                   SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.h Tue Dec 18 08:25:45 2012 +++ /branches/bleeding_edge/src/x64/macro-assembler-x64.h Fri Dec 28 03:09:16 2012
@@ -35,18 +35,6 @@
 namespace v8 {
 namespace internal {

-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
-  // No special flags.
-  NO_ALLOCATION_FLAGS = 0,
-  // Return the pointer to the allocated already tagged as a heap object.
-  TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
-  // new space.
-  RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
// Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to