Revision: 16224
Author:   mstarzin...@chromium.org
Date:     Mon Aug 19 17:46:43 2013 UTC
Log:      Fix invalid out-of-bounds store in MacroAssembler::Allocate.

This fixes an out-of-bounds store in inlined allocations when double
alignment and pretenuring into old-data-space have been requested.

R=hpa...@chromium.org, jkumme...@chromium.org, plin...@gmail.com
BUG=chromium:263515

Review URL: https://codereview.chromium.org/23242006
http://code.google.com/p/v8/source/detail?r=16224

Modified:
 /branches/bleeding_edge/src/arm/macro-assembler-arm.cc
 /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc
 /branches/bleeding_edge/src/mips/macro-assembler-mips.cc
 /branches/bleeding_edge/src/x64/macro-assembler-x64.cc

=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Mon Aug 19 13:37:57 2013 UTC +++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Mon Aug 19 17:46:43 2013 UTC
@@ -1726,12 +1726,16 @@

   if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
-    // always safe because the limit of the heap is always aligned.
+    // safe in new-space because the limit of the heap is aligned there.
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
     Label aligned;
     b(eq, &aligned);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmp(result, Operand(ip));
+      b(hs, gc_required);
+    }
     mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
     str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
     bind(&aligned);
@@ -1836,12 +1840,16 @@

   if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
-    // always safe because the limit of the heap is always aligned.
+    // safe in new-space because the limit of the heap is aligned there.
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
     Label aligned;
     b(eq, &aligned);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmp(result, Operand(ip));
+      b(hs, gc_required);
+    }
     mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
     str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
     bind(&aligned);
=======================================
--- /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc Fri Aug 9 13:43:46 2013 UTC +++ /branches/bleeding_edge/src/ia32/macro-assembler-ia32.cc Mon Aug 19 17:46:43 2013 UTC
@@ -1307,26 +1307,29 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);

+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking top is
-  // always safe because the limit of the heap is always aligned.
+  // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmp(result, Operand::StaticVariable(allocation_limit));
+      j(above_equal, gc_required);
+    }
     mov(Operand(result, 0),
         Immediate(isolate()->factory()->one_pointer_filler_map()));
     add(result, Immediate(kDoubleSize / 2));
     bind(&aligned);
   }

+  // Calculate new top and bail out if space is exhausted.
   Register top_reg = result_end.is_valid() ? result_end : result;
-
-  // Calculate new top and bail out if space is exhausted.
-  ExternalReference allocation_limit =
-      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
@@ -1381,14 +1384,21 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);

+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking top is
-  // always safe because the limit of the heap is always aligned.
+  // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmp(result, Operand::StaticVariable(allocation_limit));
+      j(above_equal, gc_required);
+    }
     mov(Operand(result, 0),
         Immediate(isolate()->factory()->one_pointer_filler_map()));
     add(result, Immediate(kDoubleSize / 2));
@@ -1396,9 +1406,6 @@
   }

   // Calculate new top and bail out if space is exhausted.
-  ExternalReference allocation_limit =
-      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
   // We assume that element_count*element_size + header_size does not
   // overflow.
   if (element_count_type == REGISTER_VALUE_IS_SMI) {
@@ -1452,14 +1459,21 @@
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);

+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
// Align the next allocation. Storing the filler map without checking top is
-  // always safe because the limit of the heap is always aligned.
+  // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmp(result, Operand::StaticVariable(allocation_limit));
+      j(above_equal, gc_required);
+    }
     mov(Operand(result, 0),
         Immediate(isolate()->factory()->one_pointer_filler_map()));
     add(result, Immediate(kDoubleSize / 2));
@@ -1467,9 +1481,6 @@
   }

   // Calculate new top and bail out if space is exhausted.
-  ExternalReference allocation_limit =
-      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
-
   if (!object_size.is(result_end)) {
     mov(result_end, object_size);
   }
=======================================
--- /branches/bleeding_edge/src/mips/macro-assembler-mips.cc Wed Aug 7 21:54:03 2013 UTC +++ /branches/bleeding_edge/src/mips/macro-assembler-mips.cc Mon Aug 19 17:46:43 2013 UTC
@@ -2944,12 +2944,15 @@

   if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
-    // always safe because the limit of the heap is always aligned.
+    // safe in new-space because the limit of the heap is aligned there.
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     And(scratch2, result, Operand(kDoubleAlignmentMask));
     Label aligned;
     Branch(&aligned, eq, scratch2, Operand(zero_reg));
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      Branch(gc_required, Ugreater_equal, result, Operand(t9));
+    }
     li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
     sw(scratch2, MemOperand(result));
     Addu(result, result, Operand(kDoubleSize / 2));
@@ -3028,12 +3031,15 @@

   if ((flags & DOUBLE_ALIGNMENT) != 0) {
// Align the next allocation. Storing the filler map without checking top is
-    // always safe because the limit of the heap is always aligned.
+    // safe in new-space because the limit of the heap is aligned there.
     ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     And(scratch2, result, Operand(kDoubleAlignmentMask));
     Label aligned;
     Branch(&aligned, eq, scratch2, Operand(zero_reg));
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      Branch(gc_required, Ugreater_equal, result, Operand(t9));
+    }
     li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
     sw(scratch2, MemOperand(result));
     Addu(result, result, Operand(kDoubleSize / 2));
=======================================
--- /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Tue Aug 6 00:09:01 2013 UTC +++ /branches/bleeding_edge/src/x64/macro-assembler-x64.cc Mon Aug 19 17:46:43 2013 UTC
@@ -3859,7 +3859,7 @@
   LoadAllocationTopHelper(result, scratch, flags);

// Align the next allocation. Storing the filler map without checking top is
-  // always safe because the limit of the heap is always aligned.
+  // safe in new-space because the limit of the heap is aligned there.
   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
     testq(result, Immediate(kDoubleAlignmentMask));
     Check(zero, kAllocationIsNotDoubleAligned);
@@ -3938,7 +3938,7 @@
   LoadAllocationTopHelper(result, scratch, flags);

// Align the next allocation. Storing the filler map without checking top is
-  // always safe because the limit of the heap is always aligned.
+  // safe in new-space because the limit of the heap is aligned there.
   if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
     testq(result, Immediate(kDoubleAlignmentMask));
     Check(zero, kAllocationIsNotDoubleAligned);

--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to