Reviewers: Hannes Payer,

Description:
Reland: Fix logic for incremental marking steps on tenured allocation

BUG=

Please review this at https://codereview.chromium.org/1077153004/

Base URL: https://chromium.googlesource.com/v8/v8.git@master

Affected files (+62, -26 lines):
  M src/heap/incremental-marking.h
  M src/heap/incremental-marking.cc
  M src/heap/spaces.h
  M src/heap/spaces.cc


Index: src/heap/incremental-marking.cc
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc index 2ba969432d0a009690f08226e19af6086e8da719..17c609c3f64668db620eedd017dcdb541a4222bd 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -818,7 +818,7 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
   if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
     Start();
   } else {
- Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+    Step(allocated * kOldSpaceAllocationMarkingFactor, GC_VIA_STACK_GUARD);
   }
 }

@@ -894,8 +894,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
                                   ForceMarkingAction marking,
                                   ForceCompletionAction completion) {
   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
-      !FLAG_incremental_marking_steps ||
-      (state_ != SWEEPING && state_ != MARKING)) {
+      !CanDoSteps()) {
     return 0;
   }

Index: src/heap/incremental-marking.h
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 2e648e7e2fb8d417beeedc8f27aa8bc0c52b848d..51633233fe8acd11dd4920cf14deccf4a2fb5407 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -50,7 +50,10 @@ class IncrementalMarking {

   INLINE(bool IsMarking()) { return state() >= MARKING; }

-  inline bool IsMarkingIncomplete() { return state() == MARKING; }
+  inline bool CanDoSteps() {
+    return FLAG_incremental_marking_steps &&
+           (state() == MARKING || state() == SWEEPING);
+  }

   inline bool IsComplete() { return state() == COMPLETE; }

@@ -102,6 +105,8 @@ class IncrementalMarking {
// But if we are promoting a lot of data we need to mark faster to keep up
   // with the data that is entering the old space through promotion.
   static const intptr_t kFastMarking = 3;
+  static const intptr_t kOldSpaceAllocationMarkingFactor =
+      kFastMarking / kInitialMarkingSpeed;
   // After this many steps we increase the marking/allocating factor.
   static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
   // This is how much we increase the marking/allocating factor by.
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 23323a4a7daa224fd8f862ec9e4d45b7aa87fa66..15f6c1908b9cbbf29608d6248036ee5744596d24 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -2200,6 +2200,7 @@ void FreeList::Reset() {
   medium_list_.Reset();
   large_list_.Reset();
   huge_list_.Reset();
+  unreported_allocation_ = 0;
 }


@@ -2347,6 +2348,15 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
 }


+void PagedSpace::SetTopAndLimit(Address top, Address limit) {
+  DCHECK(top == limit ||
+         Page::FromAddress(top) == Page::FromAddress(limit - 1));
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  allocation_info_.set_top(top);
+  allocation_info_.set_limit(limit);
+}
+
+
 // Allocation on the old space free list.  If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If // the allocation fails then NULL is returned, and the caller can perform a GC
@@ -2364,9 +2374,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   // if it is big enough.
   owner_->Free(owner_->top(), old_linear_size);

-  owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
-                                                      old_linear_size);
-
   int new_node_size = 0;
   FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == NULL) {
@@ -2389,21 +2396,27 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   // candidate.
   DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));

-  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+ // An old-space step will mark more data per byte allocated, because old space + // allocation is more serious. We don't want the pause to be bigger, so we
+  // do marking after a smaller amount of allocation.
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+ IncrementalMarking::kOldSpaceAllocationMarkingFactor;

// Memory in the linear allocation area is counted as allocated. We may free
   // a little of this again immediately - see below.
   owner_->Allocate(new_node_size);

+  unreported_allocation_ += new_node_size;
+
   if (owner_->heap()->inline_allocation_disabled()) {
     // Keep the linear allocation area empty if requested to do so, just
     // return area back to the free list instead.
     owner_->Free(new_node->address() + size_in_bytes, bytes_left);
     DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
   } else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
-             FLAG_incremental_marking_steps) {
+             owner_->heap()->incremental_marking()->CanDoSteps()) {
     int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+
     // We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
     // we want to do another increment until the linear area is used up.
@@ -2411,15 +2424,30 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
                  new_node_size - size_in_bytes - linear_size);
     owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
-  } else if (bytes_left > 0) {
-    // Normally we give the rest of the node to the allocator as its new
-    // linear allocation area.
-    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
-                           new_node->address() + new_node_size);
+ // It is important that we are done updating top and limit before we call
+    // this.
+    owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
+                                                        linear_size);
+    unreported_allocation_ = 0;
   } else {
-    // TODO(gc) Try not freeing linear allocation region when bytes_left
-    // are zero.
-    owner_->SetTopAndLimit(NULL, NULL);
+    if (bytes_left > 0) {
+      // Normally we give the rest of the node to the allocator as its new
+      // linear allocation area.
+      owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                             new_node->address() + new_node_size);
+    } else {
+      // TODO(gc) Try not freeing linear allocation region when bytes_left
+      // are zero.
+      owner_->SetTopAndLimit(NULL, NULL);
+    }
+    if (unreported_allocation_ > kThreshold) {
+      // This may start the incremental marker, or do a little work if it's
+      // already started. It is important that we are finished updating top
+      // and limit before we call this.
+      owner_->heap()->incremental_marking()->OldSpaceStep(
+          Min(kThreshold, unreported_allocation_));
+      unreported_allocation_ = 0;
+    }
   }

   return new_node;
@@ -2906,7 +2934,16 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
     reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
   }

-  heap()->incremental_marking()->OldSpaceStep(object_size);
+ // We would like to tell the incremental marker to do a lot of work, since + // we just made a large allocation in old space, but that might cause a huge
+  // pause. Underreporting here may cause the marker to speed up because it
+  // will perceive that it is not keeping up with allocation. Although this
+ // causes some big incremental marking steps they are not as big as this one + // might have been. In testing, a very large pause was divided up into about
+  // 12 parts.
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+ IncrementalMarking::kOldSpaceAllocationMarkingFactor;
+  heap()->incremental_marking()->OldSpaceStep(kThreshold);
   return object;
 }

Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index e6ec542c9e1f843b66e391f3e85338f187d5c157..74250c7ce8859100f3e0edeef14d86ed441356f1 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -1583,6 +1583,7 @@ class FreeList {

   PagedSpace* owner_;
   Heap* heap_;
+  int unreported_allocation_;

   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
@@ -1780,13 +1781,7 @@ class PagedSpace : public Space {
   void ResetFreeList() { free_list_.Reset(); }

   // Set space allocation info.
-  void SetTopAndLimit(Address top, Address limit) {
-    DCHECK(top == limit ||
-           Page::FromAddress(top) == Page::FromAddress(limit - 1));
-    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-    allocation_info_.set_top(top);
-    allocation_info_.set_limit(limit);
-  }
+  void SetTopAndLimit(Address top, Address limit);

   // Empty space allocation info, returning unused area to free list.
   void EmptyAllocationInfo() {


--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to