Revision: 8196
Author: [email protected]
Date: Tue Jun 7 02:39:09 2011
Log: Two-page newspace.
Review URL: http://codereview.chromium.org/7124006
http://code.google.com/p/v8/source/detail?r=8196
Modified:
/branches/experimental/gc/src/heap-inl.h
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/mark-compact.cc
/branches/experimental/gc/src/runtime.cc
/branches/experimental/gc/src/spaces-inl.h
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
=======================================
--- /branches/experimental/gc/src/heap-inl.h Thu May 26 03:47:57 2011
+++ /branches/experimental/gc/src/heap-inl.h Tue Jun 7 02:39:09 2011
@@ -292,6 +292,7 @@
// An object should be promoted if:
// - the object has survived a scavenge operation or
// - to space is already 25% full.
+ // TODO(gc): Do something about age-mark in paged new-space.
return old_address < new_space_.age_mark()
|| (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
}
=======================================
--- /branches/experimental/gc/src/heap.cc Mon Jun 6 04:18:36 2011
+++ /branches/experimental/gc/src/heap.cc Tue Jun 7 02:39:09 2011
@@ -4314,11 +4314,15 @@
#ifdef DEBUG
void Heap::ZapFromSpace() {
- ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
- for (Address a = new_space_.FromSpaceLow();
- a < new_space_.FromSpaceHigh();
- a += kPointerSize) {
- Memory::Address_at(a) = kFromSpaceZapValue;
+ NewSpacePageIterator it(new_space_.FromSpaceLow(),
+ new_space_.FromSpaceHigh());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ for (Address cursor = page->body(), limit = page->body_limit();
+ cursor < limit;
+ cursor += kPointerSize) {
+ Memory::Address_at(cursor) = kFromSpaceZapValue;
+ }
}
}
#endif // DEBUG
=======================================
--- /branches/experimental/gc/src/mark-compact.cc Mon Jun 6 04:18:36 2011
+++ /branches/experimental/gc/src/mark-compact.cc Tue Jun 7 02:39:09 2011
@@ -114,7 +114,17 @@
static void VerifyMarking(NewSpace* space) {
- VerifyMarking(space->bottom(), space->top());
+ Address end = space->top();
+ NewSpacePageIterator it(space->bottom(), end);
+ // The bottom position is at the start of its page. Allows us to use
+ // page->body() as start of range on all pages.
+ ASSERT_EQ(space->bottom(),
+ NewSpacePage::FromAddress(space->bottom())->body());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ Address limit = it.has_next() ? page->body_limit() : end;
+ VerifyMarking(page->body(), limit);
+ }
}
@@ -1697,9 +1707,9 @@
ASSERT(state_ == PREPARE_GC);
state_ = MARK_LIVE_OBJECTS;
#endif
- // The to space contains live objects, the from space is used as a
marking
- // stack.
- Address marking_deque_start = heap()->new_space()->FromSpaceLow();
+ // The to space contains live objects, a page in from space is used as a
+ // marking stack.
+ Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
Address marking_deque_end = heap()->new_space()->FromSpaceHigh();
if (FLAG_force_marking_deque_overflows) {
marking_deque_end = marking_deque_start + 64 * kPointerSize;
@@ -1927,7 +1937,7 @@
int size,
bool to_old_space) {
if (to_old_space) {
- HEAP->CopyBlockToOldSpaceAndUpdateWriteBarrier(dst, src, size);
+ heap->CopyBlockToOldSpaceAndUpdateWriteBarrier(dst, src, size);
} else {
heap->CopyBlock(dst, src, size);
}
@@ -2064,6 +2074,7 @@
void MarkCompactCollector::SweepNewSpace(NewSpace* space) {
heap_->CheckNewSpaceExpansionCriteria();
+ // Store allocation range before flipping semispaces.
Address from_bottom = space->bottom();
Address from_top = space->top();
@@ -2072,22 +2083,22 @@
space->Flip();
space->ResetAllocationInfo();
- int size = 0;
int survivors_size = 0;
// First pass: traverse all objects in inactive semispace, remove marks,
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
- for (Address current = from_bottom; current < from_top; current += size)
{
- HeapObject* object = HeapObject::FromAddress(current);
-
+ SemiSpaceIterator from_it(from_bottom, from_top);
+ for (HeapObject* object = from_it.Next();
+ object != NULL;
+ object = from_it.Next()) {
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
heap_->mark_compact_collector()->tracer()->decrement_marked_count();
- size = object->Size();
+ int size = object->Size();
survivors_size += size;
// Aggressively promote young survivors to the old space.
@@ -2096,21 +2107,29 @@
}
// Promotion failed. Just migrate object to another semispace.
- // Allocation cannot fail at this point: semispaces are of equal
size.
- Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
-
+ MaybeObject* allocation = space->AllocateRaw(size);
+ if (allocation->IsFailure()) {
+ if (!space->AddFreshPage()) {
+ // Shouldn't happen. We are sweeping linearly, and to-space
+ // has the same number of pages as from-space, so there is
+ // always room.
+ UNREACHABLE();
+ }
+ allocation = space->AllocateRaw(size);
+ ASSERT(!allocation->IsFailure());
+ }
+ Object* target = allocation->ToObjectUnchecked();
MigrateObject(heap_,
HeapObject::cast(target)->address(),
- current,
+ object->address(),
size,
false);
} else {
// Process the dead object before we write a NULL into its header.
LiveObjectList::ProcessNonLive(object);
- size = object->Size();
// Mark dead objects in the new space with null in their map field.
- Memory::Address_at(current) = NULL;
+ Memory::Address_at(object->address()) = NULL;
}
}
@@ -2118,12 +2137,12 @@
PointersToNewGenUpdatingVisitor updating_visitor(heap_);
// Update pointers in to space.
- Address current = space->bottom();
- while (current < space->top()) {
- HeapObject* object = HeapObject::FromAddress(current);
- current +=
- StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
- object);
+ SemiSpaceIterator to_it(space->bottom(), space->top());
+ for (HeapObject* object = to_it.Next();
+ object != NULL;
+ object = to_it.Next()) {
+ StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
+ object);
}
// Update roots.
@@ -2720,7 +2739,19 @@
int MarkCompactCollector::IterateLiveObjects(
NewSpace* space, LiveObjectCallback size_f) {
ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
- return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
+ int accumulator = 0;
+ Address end = space->top();
+ NewSpacePageIterator it(space->bottom(), end);
+ // The bottom is at the start of its page.
+ ASSERT_EQ(space->bottom(),
+ NewSpacePage::FromAddress(space->bottom())->body());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ Address start = page->body();
+ Address limit = it.has_next() ? page->body_limit() : end;
+ accumulator += IterateLiveObjectsInRange(start, limit, size_f);
+ }
+ return accumulator;
}
=======================================
--- /branches/experimental/gc/src/runtime.cc Wed May 25 07:05:16 2011
+++ /branches/experimental/gc/src/runtime.cc Tue Jun 7 02:39:09 2011
@@ -12252,6 +12252,9 @@
Isolate* isolate = Isolate::Current();
Failure* failure = Failure::cast(result);
if (failure->IsRetryAfterGC()) {
+ if (isolate->heap()->new_space()->AddFreshPage()) {
+ return;
+ }
// Try to do a garbage collection; ignore it if it fails. The C
// entry stub will throw an out-of-memory exception in that case.
isolate->heap()->CollectGarbage(failure->allocation_space());
=======================================
--- /branches/experimental/gc/src/spaces-inl.h Wed May 18 08:02:58 2011
+++ /branches/experimental/gc/src/spaces-inl.h Tue Jun 7 02:39:09 2011
@@ -57,6 +57,38 @@
next_page_ = next_page_->next_page();
return prev_page_;
}
+
+
+//
-----------------------------------------------------------------------------
+// NewSpacePageIterator
+
+
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+ : prev_page_(&space->anchor_),
+ next_page_(prev_page_->next_page()),
+ last_page_(prev_page_->prev_page()) { }
+
+ NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+ : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+ next_page_(NewSpacePage::FromAddress(start)),
+ last_page_(NewSpacePage::FromLimit(limit)) {
+#ifdef DEBUG
+ SemiSpace::ValidateRange(start, limit);
+#endif
+}
+
+
+bool NewSpacePageIterator::has_next() {
+ return prev_page_ != last_page_;
+}
+
+
+NewSpacePage* NewSpacePageIterator::next() {
+ ASSERT(has_next());
+ prev_page_ = next_page_;
+ next_page_ = next_page_->next_page();
+ return prev_page_;
+}
//
-----------------------------------------------------------------------------
@@ -249,9 +281,10 @@
// NewSpace
MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
- Address new_top = allocation_info_.top + size_in_bytes;
+ Address old_top = allocation_info_.top;
+ Address new_top = old_top + size_in_bytes;
if (new_top > allocation_info_.limit) {
- Address high = to_space_.high();
+ Address high = to_space_.page_high();
if (allocation_info_.limit < high) {
allocation_info_.limit = Min(
allocation_info_.limit + inline_alloction_limit_step_,
@@ -260,6 +293,12 @@
heap()->incremental_marking()->Step(bytes_allocated);
top_on_previous_step_ = new_top;
return AllocateRawInternal(size_in_bytes);
+ } else if (AddFreshPage()) {
+ // Switched to new page. Try allocating again.
+ int bytes_allocated = old_top - top_on_previous_step_;
+ heap()->incremental_marking()->Step(bytes_allocated);
+ top_on_previous_step_ = to_space_.page_low();
+ return AllocateRawInternal(size_in_bytes);
} else {
return Failure::RetryAfterGC();
}
=======================================
--- /branches/experimental/gc/src/spaces.cc Mon Jun 6 23:55:11 2011
+++ /branches/experimental/gc/src/spaces.cc Tue Jun 7 02:39:09 2011
@@ -419,6 +419,9 @@
set_owner(semi_space);
set_next_chunk(this);
set_prev_chunk(this);
+ // Flags marks this invalid page as not being in new-space.
+ // All real new-space pages will be in new-space.
+ SetFlags(0, ~0);
}
@@ -964,7 +967,7 @@
}
}
}
- allocation_info_.limit = to_space_.high();
+ allocation_info_.limit = to_space_.page_high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
@@ -986,21 +989,56 @@
}
}
}
- allocation_info_.limit = to_space_.high();
+ allocation_info_.limit = to_space_.page_high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
-void NewSpace::ResetAllocationInfo() {
- to_space_.Reset();
+void NewSpace::UpdateAllocationInfo() {
allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.high();
+ allocation_info_.limit = to_space_.page_high();
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
+
+
+void NewSpace::ResetAllocationInfo() {
+ to_space_.Reset();
+ UpdateAllocationInfo();
+ // Clear all mark-bits in the to-space.
+ NewSpacePageIterator it(to_space_.space_low(), to_space_.space_high());
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
+ page->markbits()->Clear();
+ }
+}
+
+
+bool NewSpace::AddFreshPage() {
+ Address top = allocation_info_.top;
+ if (top == NewSpacePage::FromLimit(top)->body()) {
+ // The current page is already empty. Don't try to make another.
+
+ // We should only get here if someone asks to allocate more
+ // than what can be stored in a single page.
+ // TODO(gc): Change the limit on new-space allocation to prevent this
+ // from happening (all such allocations should go directly to LOSpace).
+ return false;
+ }
+ if (!to_space_.AdvancePage()) {
+ // Failed to get a new page in to-space.
+ return false;
+ }
+ // Clear remainder of current page.
+ int remaining_in_page =
+ static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
+ heap()->CreateFillerObjectAt(top, remaining_in_page);
+ UpdateAllocationInfo();
+ return true;
+}
#ifdef DEBUG
-// We do not use the SemispaceIterator because verification doesn't assume
+// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
void NewSpace::Verify() {
// The allocation pointer should be in the space or at the very end.
@@ -1008,8 +1046,22 @@
// There should be objects packed in from the low address up to the
// allocation pointer.
- Address current = to_space_.low();
- while (current < top()) {
+ NewSpacePage* page = to_space_.first_page();
+ Address current = page->body();
+ CHECK_EQ(current, to_space_.space_low());
+ CHECK(end_page->ContainsLimit(top()));
+
+ while (current != top()) {
+ if (current == page->body_limit()) {
+ // At end of page, switch to next page.
+ page = page->next_page();
+ // Next page should be valid.
+ CHECK(!page->is_anchor());
+ current = page->body();
+ }
+ // The allocation pointer should not be in the middle of an object.
+ CHECK(!page->ContainsLimit(top()) || current < top());
+
HeapObject* object = HeapObject::FromAddress(current);
// The first word should be a map, and we expect all map pointers to
@@ -1032,9 +1084,6 @@
current += size;
}
-
- // The allocation pointer should not be in the middle of an object.
- ASSERT(current == top());
// Check semi-spaces.
ASSERT_EQ(from_space_.id(), kFromSpace);
@@ -1047,26 +1096,52 @@
bool SemiSpace::Commit() {
ASSERT(!is_committed());
+ // TODO(gc): Rewrite completely when switching to n-page new-space.
+ // Create one page.
+ int pagesize = Page::kPageSize;
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_, capacity_, executable())) {
+ start_, pagesize, executable())) {
return false;
}
- committed_ = true;
- // TODO(gc): When more than one page is present, initialize and
- // chain them all.
NewSpacePage* page = NewSpacePage::Initialize(heap(), start_, this);
page->InsertAfter(&anchor_);
- current_page_ = anchor_.next_page();
+
+ // Maybe create a second.
+ if (capacity_ >= 2 * pagesize) {
+ Address last_page_address =
+ start_ + ((capacity_ - pagesize) & ~Page::kPageAlignmentMask);
+ if (heap()->isolate()->memory_allocator()->CommitBlock(
+ last_page_address, pagesize, executable())) {
+ NewSpacePage* last_page = NewSpacePage::Initialize(heap(),
+ last_page_address,
+ this);
+ last_page->InsertAfter(page);
+ } else {
+ UNREACHABLE(); // TODO(gc): Don't rely on this. Splitting the commit
+ // is only temporary.
+ }
+ }
+
+ committed_ = true;
+ Reset();
return true;
}
bool SemiSpace::Uncommit() {
ASSERT(is_committed());
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- start_, capacity_)) {
- return false;
- }
+ // TODO(gc): Rewrite completely when switching to n-page new-space.
+ NewSpacePage* last_page = anchor()->prev_page();
+ while (last_page != anchor()) {
+ NewSpacePage* temp_page = last_page->prev_page();
+ last_page->Unlink();
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ last_page->address(), Page::kPageSize)) {
+ return false;
+ }
+ last_page = temp_page;
+ }
+
committed_ = false;
return true;
}
@@ -1112,12 +1187,7 @@
// space is used as the marking stack. It requires contiguous memory
// addresses.
ASSERT(maximum_capacity >= Page::kPageSize);
- if (initial_capacity < Page::kPageSize) {
- initial_capacity = Page::kPageSize;
- } else {
- initial_capacity &= ~Page::kPageAlignmentMask;
- }
- initial_capacity_ = initial_capacity;
+ initial_capacity_ = initial_capacity & ~Page::kPageAlignmentMask;
capacity_ = initial_capacity;
maximum_capacity_ = maximum_capacity;
committed_ = false;
@@ -1144,7 +1214,7 @@
int extra = Min(RoundUp(capacity_,
static_cast<int>(OS::AllocateAlignment())),
maximum_extra);
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- high(), extra, executable())) {
+ space_high(), extra, executable())) {
return false;
}
capacity_ += extra;
@@ -1159,7 +1229,7 @@
size_t delta = new_capacity - capacity_;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- high(), delta, executable())) {
+ space_high(), delta, executable())) {
return false;
}
capacity_ = new_capacity;
@@ -1174,7 +1244,7 @@
size_t delta = capacity_ - new_capacity;
ASSERT(IsAligned(delta, OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->UncommitBlock(
- high() - delta, delta)) {
+ space_high() - delta, delta)) {
return false;
}
capacity_ = new_capacity;
@@ -1228,40 +1298,62 @@
page = page->next_page();
}
}
+
+
+void SemiSpace::ValidateRange(Address start, Address end) {
+ // Addresses belong to same semi-space
+ NewSpacePage* page = NewSpacePage::FromAddress(start);
+ NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+ SemiSpace* space = page->semi_space();
+ CHECK_EQ(space, end_page->semi_space());
+ // Start address is before end address, either on same page,
+ // or end address is on a later page in the linked list of
+ // semi-space pages.
+ if (page == end_page) {
+ CHECK(start <= end);
+ } else {
+ while (page != end_page) {
+ page = page->next_page();
+ CHECK_NE(page, space->anchor());
+ }
+ }
+}
#endif
//
-----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space, space->bottom(), space->top(), NULL);
+ Initialize(space->bottom(), space->top(), NULL);
}
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
HeapObjectCallback size_func) {
- Initialize(space, space->bottom(), space->top(), size_func);
+ Initialize(space->bottom(), space->top(), size_func);
}
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(space, start, space->top(), NULL);
+ Initialize(start, space->top(), NULL);
}
-void SemiSpaceIterator::Initialize(NewSpace* space,
- Address start,
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+ Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start,
Address end,
HeapObjectCallback size_func) {
- ASSERT(space->ToSpaceContains(start));
- ASSERT(space->ToSpaceLow() <= end
- && end <= space->ToSpaceHigh());
- space_ = &space->to_space_;
- current_ = start;
+#ifdef DEBUG
+ SemiSpace::ValidateRange(start, end);
+#endif
NewSpacePage* page = NewSpacePage::FromAddress(start);
- current_page_limit_ = page->body() + page->body_size();
- if (current_page_limit_ > end) current_page_limit_ = end;
+ current_ = start;
limit_ = end;
+ current_page_limit_ = page->body_limit();
size_func_ = size_func;
}
=======================================
--- /branches/experimental/gc/src/spaces.h Mon Jun 6 23:55:11 2011
+++ /branches/experimental/gc/src/spaces.h Tue Jun 7 02:39:09 2011
@@ -370,6 +370,13 @@
bool Contains(Address addr) {
return addr >= body() && addr < address() + size();
}
+
+ // Checks whether addr can be a limit of addresses in this page.
+ // It's a limit if it's in the page, or if it's just after the
+ // last byte of the page.
+ bool ContainsLimit(Address addr) {
+ return addr >= body() && addr <= address() + size();
+ }
enum MemoryChunkFlags {
IS_EXECUTABLE,
@@ -1329,7 +1336,7 @@
bool EnsureCapacity(int capacity);
// The dummy page that anchors the linked list of pages.
- Page *anchor() { return &anchor_; }
+ Page* anchor() { return &anchor_; }
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
@@ -1509,17 +1516,29 @@
return reinterpret_cast<SemiSpace*>(owner());
}
- private:
- NewSpacePage(SemiSpace* owner) {
- InitializeAsAnchor(owner);
- }
+ bool is_anchor() { return !this->InNewSpace(); }
// Finds the NewSpacePage containg the given address.
- static NewSpacePage* FromAddress(Address address_in_page) {
+ static inline NewSpacePage* FromAddress(Address address_in_page) {
Address page_start =
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
~Page::kPageAlignmentMask);
- return reinterpret_cast<NewSpacePage*>(page_start);
+ NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+ ASSERT(page->InNewSpace());
+ return page;
+ }
+
+ // Find the page for a limit address. A limit address is either an
address
+ // inside a page, or the address right after the last byte of a page.
+ static inline NewSpacePage* FromLimit(Address address_limit) {
+ return NewSpacePage::FromAddress(address_limit - 1);
+ }
+
+ private:
+ // Create a NewSpacePage object that is only used as anchor
+ // for the doubly-linked list of real pages.
+ explicit NewSpacePage(SemiSpace* owner) {
+ InitializeAsAnchor(owner);
}
static NewSpacePage* Initialize(Heap* heap,
@@ -1528,7 +1547,7 @@
// Intialize a fake NewSpacePage used as sentinel at the ends
// of a doubly-linked list of real NewSpacePages.
- // Only uses the prev/next links.
+ // Only uses the prev/next links, and sets flags to not be in new-space.
void InitializeAsAnchor(SemiSpace* owner);
friend class SemiSpace;
@@ -1580,7 +1599,7 @@
bool ShrinkTo(int new_capacity);
// Returns the start address of the first page of the space.
- Address low() {
+ Address space_low() {
ASSERT(anchor_.next_page() != &anchor_);
return anchor_.next_page()->body();
}
@@ -1592,10 +1611,21 @@
}
// Returns one past the end address of the space.
- Address high() {
- // TODO(gc): Change when there is more than one page.
+ Address space_high() {
+ return anchor_.prev_page()->body_limit();
+ }
+
+ // Returns one past the end address of the current page of the space.
+ Address page_high() {
return current_page_->body_limit();
}
+
+ bool AdvancePage() {
+ NewSpacePage* next_page = current_page_->next_page();
+ if (next_page == &anchor_) return false;
+ current_page_ = next_page;
+ return true;
+ }
// Resets the space to using the first page.
void Reset();
@@ -1603,11 +1633,6 @@
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark) { age_mark_ = mark; }
-
- // The offset of an address from the beginning of the space.
- int SpaceOffsetForAddress(Address addr) {
- return static_cast<int>(addr - low());
- }
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called.
@@ -1625,7 +1650,7 @@
bool Commit();
bool Uncommit();
- NewSpacePage* first_page() { return NewSpacePage::FromAddress(start_); }
+ NewSpacePage* first_page() { return anchor_.next_page(); }
NewSpacePage* current_page() { return current_page_; }
#ifdef ENABLE_HEAP_PROTECTION
@@ -1637,6 +1662,7 @@
#ifdef DEBUG
virtual void Print();
virtual void Verify();
+ static void ValidateRange(Address from, Address to);
#endif
// Returns the current capacity of the semi space.
@@ -1656,6 +1682,8 @@
// Flips the semispace between being from-space and to-space.
// Copies the flags into the masked positions on all pages in the space.
void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+ NewSpacePage* anchor() { return &anchor_; }
// The current and maximum capacity of the space.
int capacity_;
@@ -1678,6 +1706,8 @@
NewSpacePage anchor_;
NewSpacePage* current_page_;
+ friend class SemiSpaceIterator;
+ friend class NewSpacePageIterator;
public:
TRACK_MEMORY("SemiSpace")
};
@@ -1693,15 +1723,26 @@
// Create an iterator over the objects in the given space. If no start
// address is given, the iterator starts from the bottom of the space.
If
// no size function is given, the iterator calls Object::Size().
+
+ // Iterate over all of allocated to-space.
explicit SemiSpaceIterator(NewSpace* space);
+ // Iterate over all of allocated to-space, with a custome size function.
SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+ // Iterate over part of allocated to-space, from start to the end
+ // of allocation.
SemiSpaceIterator(NewSpace* space, Address start);
+ // Iterate from one address to another in the same semi-space.
+ SemiSpaceIterator(Address from, Address to);
HeapObject* Next() {
+ if (current_ == limit_) return NULL;
if (current_ == current_page_limit_) {
- // TODO(gc): Add something here when we have more than one page.
- }
- if (current_ == limit_) return NULL;
+ NewSpacePage* page = NewSpacePage::FromAddress(current_ - 1);
+ page = page->next_page();
+ ASSERT(!page->is_anchor());
+ current_ = page->body();
+ if (current_ == limit_) return NULL;
+ }
HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1714,13 +1755,10 @@
virtual HeapObject* next_object() { return Next(); }
private:
- void Initialize(NewSpace* space,
- Address start,
+ void Initialize(Address start,
Address end,
HeapObjectCallback size_func);
- // The semispace.
- SemiSpace* space_;
// The current iteration point.
Address current_;
// The end of the current page.
@@ -1732,6 +1770,30 @@
};
+//
-----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+ // Make an iterator that runs over all pages in the given semispace,
+ // even those not used in allocation.
+ explicit inline NewSpacePageIterator(SemiSpace* space);
+ // Make iterator that iterates from the page containing start
+ // to the page that contains limit in the same semispace.
+ inline NewSpacePageIterator(Address start, Address limit);
+
+ inline bool has_next();
+ inline NewSpacePage* next();
+
+ private:
+ NewSpacePage* prev_page_; // Previous page returned.
+ // Next page that will be returned. Cached here so that we can use this
+ // iterator for operations that deallocate pages.
+ NewSpacePage* next_page_;
+ // Last page returned.
+ NewSpacePage* last_page_;
+};
+
+
//
-----------------------------------------------------------------------------
// The young generation space.
//
@@ -1818,8 +1880,11 @@
return Capacity();
}
- // Return the available bytes without growing in the active semispace.
- intptr_t Available() { return Capacity() - Size(); }
+ // Return the available bytes without growing or switching page in the
+ // active semispace.
+ intptr_t Available() {
+ return allocation_info_.limit - allocation_info_.top;
+ }
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
@@ -1836,7 +1901,7 @@
// Return the address of the allocation pointer in the active semispace.
Address top() { return allocation_info_.top; }
// Return the address of the first object in the active semispace.
- Address bottom() { return to_space_.low(); }
+ Address bottom() { return to_space_.space_low(); }
// Get the age mark of the inactive semispace.
Address age_mark() { return from_space_.age_mark(); }
@@ -1873,7 +1938,7 @@
void LowerInlineAllocationLimit(intptr_t step) {
inline_alloction_limit_step_ = step;
if (step == 0) {
- allocation_info_.limit = to_space_.high();
+ allocation_info_.limit = to_space_.page_high();
} else {
allocation_info_.limit = Min(
allocation_info_.top + inline_alloction_limit_step_,
@@ -1882,22 +1947,15 @@
top_on_previous_step_ = allocation_info_.top;
}
- // Get the extent of the inactive semispace (for use as a marking stack).
- Address FromSpaceLow() { return from_space_.low(); }
- Address FromSpaceHigh() { return from_space_.high(); }
-
- // Get the extent of the active semispace (to sweep newly copied objects
- // during a scavenge collection).
- Address ToSpaceLow() { return to_space_.low(); }
- Address ToSpaceHigh() { return to_space_.high(); }
-
- // Offsets from the beginning of the semispaces.
- int ToSpaceOffsetForAddress(Address a) {
- return to_space_.SpaceOffsetForAddress(a);
- }
- int FromSpaceOffsetForAddress(Address a) {
- return from_space_.SpaceOffsetForAddress(a);
- }
+ // Get the extent of the inactive semispace (for use as a marking stack,
+ // or to zap it).
+ Address FromSpacePageLow() { return from_space_.page_low(); }
+ Address FromSpaceLow() { return from_space_.space_low(); }
+ Address FromSpaceHigh() { return from_space_.space_high(); }
+
+ // Get the extent of the active semispace's pages' memory.
+ Address ToSpaceLow() { return to_space_.space_low(); }
+ Address ToSpaceHigh() { return to_space_.space_high(); }
inline bool ToSpaceContains(Address address) {
MemoryChunk* page = MemoryChunk::FromAddress(address);
@@ -1923,6 +1981,12 @@
Address address = reinterpret_cast<Address>(o);
return FromSpaceContains(address);
}
+
+ // Try to switch the active semispace to a new, empty, page.
+ // Returns false if this isn't possible or reasonable (i.e., there
+ // are no pages, or the current page is already empty), or true
+ // if successful.
+ bool AddFreshPage();
virtual bool ReserveSpace(int bytes);
@@ -1983,6 +2047,9 @@
}
private:
+ // Update allocation info to match the current to-space page.
+ void UpdateAllocationInfo();
+
Address chunk_base_;
uintptr_t chunk_size_;
@@ -2055,9 +2122,9 @@
// For contiguous spaces, top should be in the space (or at the end) and
limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- ASSERT((space).low() <= (info).top \
- && (info).top <= (space).high() \
- && (info).limit <= (space).high())
+ ASSERT((space).page_low() <= (info).top \
+ && (info).top <= (space).page_high() \
+ && (info).limit <= (space).page_high())
//
-----------------------------------------------------------------------------
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev