Revision: 22374
Author: tit...@chromium.org
Date: Mon Jul 14 13:02:36 2014 UTC
Log: Revert "Remove sequential sweeping mode and perform lazy sweeping
when no sweeper threads are active."
Reason: broke win64 build
This reverts commit 221bfdd2da2b6f3c1cbe77c5d197f1ea626b0bd2.
TBR=hpa...@chromium.org
BUG=
Review URL: https://codereview.chromium.org/393523002
http://code.google.com/p/v8/source/detail?r=22374
Modified:
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/incremental-marking.cc
/branches/bleeding_edge/src/isolate.cc
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/mark-compact.h
/branches/bleeding_edge/src/spaces.cc
/branches/bleeding_edge/src/spaces.h
/branches/bleeding_edge/test/cctest/test-heap.cc
=======================================
--- /branches/bleeding_edge/src/heap.cc Mon Jul 14 11:31:22 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Mon Jul 14 13:02:36 2014 UTC
@@ -3312,8 +3312,9 @@
// pages is set after sweeping all pages.
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
page->WasSwept() ||
- (page->parallel_sweeping() <=
- MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+ (mark_compact_collector()->AreSweeperThreadsActivated() &&
+ page->parallel_sweeping() <=
+ MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
@@ -4338,8 +4339,8 @@
// If the IdleNotifcation is called with a large hint we will wait for
// the sweepter threads here.
if (hint >= kMinHintForFullGC &&
- mark_compact_collector()->sweeping_in_progress()) {
- mark_compact_collector()->EnsureSweepingCompleted();
+ mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ mark_compact_collector()->WaitUntilSweepingCompleted();
}
return false;
=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Mon Jul 14 11:31:22
2014 UTC
+++ /branches/bleeding_edge/src/incremental-marking.cc Mon Jul 14 13:02:36
2014 UTC
@@ -536,7 +536,7 @@
ResetStepCounters();
- if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
@@ -883,11 +883,11 @@
}
if (state_ == SWEEPING) {
- if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+ if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()
&&
heap_->mark_compact_collector()->IsSweepingCompleted()) {
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
+ heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
}
- if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ if
(!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
=======================================
--- /branches/bleeding_edge/src/isolate.cc Mon Jul 14 11:31:22 2014 UTC
+++ /branches/bleeding_edge/src/isolate.cc Mon Jul 14 13:02:36 2014 UTC
@@ -1559,8 +1559,8 @@
sweeper_thread_ = NULL;
if (FLAG_job_based_sweeping &&
- heap_.mark_compact_collector()->sweeping_in_progress()) {
- heap_.mark_compact_collector()->EnsureSweepingCompleted();
+ heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
}
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Mon Jul 14 11:31:22 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Mon Jul 14 13:02:36 2014 UTC
@@ -45,7 +45,7 @@
marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
- sweeping_in_progress_(false),
+ sweeping_pending_(false),
pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
@@ -573,7 +573,7 @@
void MarkCompactCollector::StartSweeperThreads() {
ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
ASSERT(free_list_old_data_space_.get()->IsEmpty());
- sweeping_in_progress_ = true;
+ sweeping_pending_ = true;
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
@@ -588,17 +588,8 @@
}
-void MarkCompactCollector::EnsureSweepingCompleted() {
- ASSERT(sweeping_in_progress_ == true);
-
- // If sweeping is not completed, we try to complete it here. If we do not
- // have sweeper threads we have to complete since we do not have a good
- // indicator for a swept space in that case.
- if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
- SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
- SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
- }
-
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+ ASSERT(sweeping_pending_ == true);
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
@@ -608,7 +599,7 @@
pending_sweeper_jobs_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
- sweeping_in_progress_ = false;
+ sweeping_pending_ = false;
RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
@@ -622,7 +613,6 @@
return false;
}
}
-
if (FLAG_job_based_sweeping) {
if (!pending_sweeper_jobs_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
@@ -630,7 +620,6 @@
}
pending_sweeper_jobs_semaphore_.Signal();
}
-
return true;
}
@@ -657,6 +646,12 @@
bool MarkCompactCollector::AreSweeperThreadsActivated() {
return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
}
+
+
+bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace*
space) {
+ return (space == NULL || space->is_swept_concurrently()) &&
+ sweeping_pending_;
+}
void Marking::TransferMark(Address old_start, Address new_start) {
@@ -964,9 +959,9 @@
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
- if (sweeping_in_progress()) {
+ if (IsConcurrentSweepingInProgress()) {
// Instead of waiting we could also abort the sweeper threads here.
- EnsureSweepingCompleted();
+ WaitUntilSweepingCompleted();
}
// Clear marking bits if incremental marking is aborted.
@@ -4011,7 +4006,7 @@
static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
ASSERT_EQ(0, p->LiveBytes());
- return free_list->GuaranteedAllocatable(max_freed_bytes);
+ return freed_bytes;
}
// Grow the size of the start-of-page free space a little to get up to
the
@@ -4068,7 +4063,7 @@
}
p->ResetLiveBytes();
- return free_list->GuaranteedAllocatable(max_freed_bytes);
+ return max_freed_bytes;
}
@@ -4102,6 +4097,7 @@
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType
sweeper) {
space->set_is_iterable(sweeper == PRECISE);
+ space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first
page
@@ -4146,6 +4142,15 @@
}
switch (sweeper) {
+ case CONSERVATIVE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
+ pages_swept++;
+ break;
+ }
case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) {
@@ -4207,10 +4212,11 @@
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
- SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
-
+ SweeperType how_to_sweep = CONSERVATIVE;
+ if (AreSweeperThreadsActivated()) {
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ }
if (sweep_precisely_) how_to_sweep = PRECISE;
MoveEvacuationCandidatesToEndOfPagesList();
@@ -4232,7 +4238,7 @@
}
if (how_to_sweep == PARALLEL_CONSERVATIVE) {
- EnsureSweepingCompleted();
+ WaitUntilSweepingCompleted();
}
}
RemoveDeadInvalidatedCode();
=======================================
--- /branches/bleeding_edge/src/mark-compact.h Mon Jul 14 11:31:22 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.h Mon Jul 14 13:02:36 2014 UTC
@@ -570,6 +570,7 @@
void EnableCodeFlushing(bool enable);
enum SweeperType {
+ CONSERVATIVE,
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PRECISE
@@ -664,19 +665,18 @@
// then the whole given space is swept.
int SweepInParallel(PagedSpace* space, int required_freed_bytes);
- void EnsureSweepingCompleted();
+ void WaitUntilSweepingCompleted();
- // If sweeper threads are not active this method will return true. If
- // this is a latency issue we should be smarter here. Otherwise, it will
- // return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
void RefillFreeList(PagedSpace* space);
bool AreSweeperThreadsActivated();
- // Checks if sweeping is in progress right now on any space.
- bool sweeping_in_progress() { return sweeping_in_progress_; }
+ // If a paged space is passed in, this method checks if the given space
is
+ // swept concurrently. Otherwise, this method checks if concurrent
sweeping
+ // is in progress right now on any space.
+ bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping;
@@ -739,7 +739,7 @@
bool was_marked_incrementally_;
// True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_in_progress_;
+ bool sweeping_pending_;
base::Semaphore pending_sweeper_jobs_semaphore_;
=======================================
--- /branches/bleeding_edge/src/spaces.cc Mon Jul 14 11:31:22 2014 UTC
+++ /branches/bleeding_edge/src/spaces.cc Mon Jul 14 13:02:36 2014 UTC
@@ -936,6 +936,7 @@
: Space(heap, id, executable),
free_list_(this),
is_iterable_(true),
+ is_swept_concurrently_(false),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
@@ -2546,8 +2547,8 @@
intptr_t PagedSpace::SizeOfObjects() {
- ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() ||
- (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->
+ IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
@@ -2577,12 +2578,24 @@
}
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
+
+ if (collector->IsConcurrentSweepingInProgress(this)) {
+ // If sweeping is still in progress try to sweep pages on the main
thread.
+ int free_chunk =
+ collector->SweepInParallel(this, size_in_bytes);
+ if (free_chunk >= size_in_bytes) {
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ // We should be able to allocate an object here since we just freed
that
+ // much memory.
+ ASSERT(object != NULL);
+ if (object != NULL) return object;
+ }
+
// Wait for the sweeper threads here and complete the sweeping phase.
- collector->EnsureSweepingCompleted();
+ collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
@@ -2595,28 +2608,14 @@
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
+ // If sweeper threads are active, try to re-fill the free-lists.
MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
+ if (collector->IsConcurrentSweepingInProgress(this)) {
collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
-
- // If sweeping is still in progress try to sweep pages on the main
thread.
- int free_chunk =
- collector->SweepInParallel(this, size_in_bytes);
- collector->RefillFreeList(this);
- if (free_chunk >= size_in_bytes) {
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- // We should be able to allocate an object here since we just freed
that
- // much memory.
- ASSERT(object != NULL);
- if (object != NULL) return object;
- }
}
// Free list allocation failed and there is no next page. Fail if we
have
@@ -2626,7 +2625,7 @@
&& heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
- HeapObject* object =
WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object;
}
@@ -2639,7 +2638,7 @@
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given
allocation.
- return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ return EnsureSweepingProgress(size_in_bytes);
}
=======================================
--- /branches/bleeding_edge/src/spaces.h Mon Jul 14 11:31:22 2014 UTC
+++ /branches/bleeding_edge/src/spaces.h Mon Jul 14 13:02:36 2014 UTC
@@ -1618,21 +1618,6 @@
// i.e., its contents will be destroyed. The start address should be
word
// aligned, and the size should be a non-zero multiple of the word size.
int Free(Address start, int size_in_bytes);
-
- // This method returns how much memory can be allocated after freeing
- // maximum_freed memory.
- int GuaranteedAllocatable(int maximum_freed) {
- if (maximum_freed < kSmallListMin) {
- return 0;
- } else if (maximum_freed <= kSmallListMax) {
- return kSmallAllocationMax;
- } else if (maximum_freed <= kMediumListMax) {
- return kMediumAllocationMax;
- } else if (maximum_freed <= kLargeListMax) {
- return kLargeAllocationMax;
- }
- return maximum_freed;
- }
// Allocate a block of size 'size_in_bytes' from the free list. The
block
// is unitialized. A failure is returned if no block is available. The
@@ -1919,6 +1904,9 @@
bool is_iterable() { return is_iterable_; }
void set_is_iterable(bool b) { is_iterable_ = b; }
+
+ bool is_swept_concurrently() { return is_swept_concurrently_; }
+ void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
@@ -2004,6 +1992,9 @@
// This space was swept precisely, hence it is iterable.
bool is_iterable_;
+ // This space is currently swept by sweeper threads.
+ bool is_swept_concurrently_;
+
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because
concurrent
// sweeping is done conservatively.
@@ -2026,8 +2017,7 @@
// If sweeping is still in progress try to sweep unswept pages. If that
is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
- MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
- int size_in_bytes);
+ MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Mon Jul 14 11:31:22
2014 UTC
+++ /branches/bleeding_edge/test/cctest/test-heap.cc Mon Jul 14 13:02:36
2014 UTC
@@ -44,8 +44,8 @@
static void SimulateIncrementalMarking() {
MarkCompactCollector* collector =
CcTest::heap()->mark_compact_collector();
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
@@ -1595,8 +1595,8 @@
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
MarkCompactCollector* collector =
CcTest::heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
@@ -1622,8 +1622,8 @@
CHECK_EQ(initial_size,
static_cast<int>(CcTest::heap()->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
CHECK_EQ(initial_size,
static_cast<int>(CcTest::heap()->SizeOfObjects()));
}
--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.