Revision: 6230
Author: vego...@chromium.org
Date: Fri Jan 7 06:11:14 2011
Log: Separate markbits from heap object map words into bitmaps.
This change considerably degrades marking (50% slowdown) and sweeping (25%
slowdown) but this is intended.
We are going to reduce overheads in the CLs that will follow.
Review URL: http://codereview.chromium.org/6088012
http://code.google.com/p/v8/source/detail?r=6230
Modified:
/branches/experimental/gc/src/heap-inl.h
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/heap.h
/branches/experimental/gc/src/mark-compact.cc
/branches/experimental/gc/src/mark-compact.h
/branches/experimental/gc/src/objects-inl.h
/branches/experimental/gc/src/objects.cc
/branches/experimental/gc/src/objects.h
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
=======================================
--- /branches/experimental/gc/src/heap-inl.h Thu Jan 6 06:05:23 2011
+++ /branches/experimental/gc/src/heap-inl.h Fri Jan 7 06:11:14 2011
@@ -250,6 +250,11 @@
InToSpace(object)); // ... or in to-space (where we
allocate).
return result;
}
+
+
+bool Heap::InNewSpace(Address addr) {
+ return new_space_.Contains(addr);
+}
bool Heap::InFromSpace(Object* object) {
=======================================
--- /branches/experimental/gc/src/heap.cc Wed Jan 5 05:52:13 2011
+++ /branches/experimental/gc/src/heap.cc Fri Jan 7 06:11:14 2011
@@ -216,12 +216,7 @@
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
- ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
- ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return object->SizeFromMap(map_word.ToMap());
+ return object->Size();
}
@@ -4598,6 +4593,8 @@
ProducerHeapProfile::Setup();
#endif
+ if (!Marking::Setup()) return false;
+
return true;
}
@@ -4671,6 +4668,8 @@
delete lo_space_;
lo_space_ = NULL;
}
+
+ Marking::TearDown();
MemoryAllocator::TearDown();
}
@@ -4912,8 +4911,8 @@
}
bool SkipObject(HeapObject* object) {
- if (object->IsMarked()) {
- object->ClearMark();
+ if (IntrusiveMarking::IsMarked(object)) {
+ IntrusiveMarking::ClearMark(object);
return true;
} else {
return false;
@@ -4935,7 +4934,9 @@
for (HeapObject* obj = iter.next_object();
obj != NULL;
obj = iter.next_object()) {
- if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
+ if (FreeListNode::IsFreeListNode(obj)) {
+ IntrusiveMarking::SetMark(obj);
+ }
}
}
@@ -4950,8 +4951,8 @@
}
bool SkipObject(HeapObject* object) {
- if (object->IsMarked()) {
- object->ClearMark();
+ if (IntrusiveMarking::IsMarked(object)) {
+ IntrusiveMarking::ClearMark(object);
return true;
} else {
return false;
@@ -4967,8 +4968,8 @@
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- if (obj->IsMarked()) {
- obj->ClearMark();
+ if (IntrusiveMarking::IsMarked(obj)) {
+ IntrusiveMarking::ClearMark(obj);
list_.Add(obj);
}
}
@@ -4990,7 +4991,7 @@
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
- obj->SetMark();
+ IntrusiveMarking::SetMark(obj);
}
UnmarkingVisitor visitor;
Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
@@ -5024,7 +5025,7 @@
void HeapIterator::Init() {
// Start the iteration.
space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
- new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+ new SpaceIterator(IntrusiveMarking::SizeOfMarkedObject);
switch (filtering_) {
case kFilterFreeListNodes:
filter_ = new FreeListNodesFilter;
@@ -5284,7 +5285,6 @@
// These two fields reflect the state of the previous full collection.
// Set them before they are changed by the collector.
previous_has_compacted_ = MarkCompactCollector::HasCompacted();
- previous_marked_count_ = MarkCompactCollector::previous_marked_count();
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
start_time_ = OS::TimeCurrentMillis();
start_size_ = Heap::SizeOfObjects();
=======================================
--- /branches/experimental/gc/src/heap.h Thu Jan 6 06:05:23 2011
+++ /branches/experimental/gc/src/heap.h Fri Jan 7 06:11:14 2011
@@ -868,6 +868,7 @@
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
+ static inline bool InNewSpace(Address addr);
static inline bool InFromSpace(Object* object);
static inline bool InToSpace(Object* object);
@@ -1951,10 +1952,6 @@
// cleared. Will be zero on a scavenge collection.
int marked_count_;
- // The count from the end of the previous full GC. Will be zero if there
- // was no previous full GC.
- int previous_marked_count_;
-
// Amounts of time spent in different scopes during GC.
double scopes_[Scope::kNumberOfScopes];
@@ -2135,6 +2132,43 @@
};
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+ static bool IsMarked(HeapObject* object) {
+ return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+ }
+
+ static void ClearMark(HeapObject* object) {
+ uintptr_t map_word = object->map_word().ToRawValue();
+ object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+ ASSERT(!IsMarked(object));
+ }
+
+ static void SetMark(HeapObject* object) {
+ uintptr_t map_word = object->map_word().ToRawValue();
+ object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+ ASSERT(IsMarked(object));
+ }
+
+ static int SizeOfMarkedObject(HeapObject* object) {
+ uintptr_t map_word = object->map_word().ToRawValue();
+ Map* map = MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+ return object->SizeFromMap(map);
+ }
+
+ private:
+ static const uintptr_t kNotMarkedBit = 0x1;
+ STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
+};
+
+
} } // namespace v8::internal
#endif // V8_HEAP_H_
=======================================
--- /branches/experimental/gc/src/mark-compact.cc Wed Dec 15 01:35:55 2010
+++ /branches/experimental/gc/src/mark-compact.cc Fri Jan 7 06:11:14 2011
@@ -46,10 +46,8 @@
bool MarkCompactCollector::compacting_collection_ = false;
bool MarkCompactCollector::compact_on_next_gc_ = false;
-int MarkCompactCollector::previous_marked_count_ = 0;
GCTracer* MarkCompactCollector::tracer_ = NULL;
-
#ifdef DEBUG
MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
@@ -64,6 +62,30 @@
int MarkCompactCollector::live_cell_objects_size_ = 0;
int MarkCompactCollector::live_lo_objects_size_ = 0;
#endif
+
+
+Marking::NewSpaceMarkbitsBitmap* Marking::new_space_bitmap_ = NULL;
+
+
+bool Marking::Setup() {
+ if (new_space_bitmap_ == NULL) {
+ int markbits_per_newspace =
+ (2*Heap::ReservedSemiSpaceSize()) >> kPointerSizeLog2;
+
+ new_space_bitmap_ =
+ BitmapStorageDescriptor::Allocate(
+ NewSpaceMarkbitsBitmap::CellsForLength(markbits_per_newspace));
+ }
+ return new_space_bitmap_ != NULL;
+}
+
+
+void Marking::TearDown() {
+ if (new_space_bitmap_ != NULL) {
+ BitmapStorageDescriptor::Free(new_space_bitmap_);
+ new_space_bitmap_ = NULL;
+ }
+}
void MarkCompactCollector::CollectGarbage() {
@@ -86,12 +108,31 @@
Finish();
- // Save the count of marked objects remaining after the collection and
+ // Check that swept all marked objects and
// null out the GC tracer.
- previous_marked_count_ = tracer_->marked_count();
- ASSERT(previous_marked_count_ == 0);
+ ASSERT(tracer_->marked_count() == 0);
tracer_ = NULL;
}
+
+
+#ifdef DEBUG
+static void VerifyMarkbitsAreClean(PagedSpace* space) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* p = it.next();
+ ASSERT(p->markbits()->IsClean());
+ }
+}
+
+static void VerifyMarkbitsAreClean() {
+ VerifyMarkbitsAreClean(Heap::old_pointer_space());
+ VerifyMarkbitsAreClean(Heap::old_data_space());
+ VerifyMarkbitsAreClean(Heap::code_space());
+ VerifyMarkbitsAreClean(Heap::cell_space());
+ VerifyMarkbitsAreClean(Heap::map_space());
+}
+#endif
void MarkCompactCollector::Prepare(GCTracer* tracer) {
@@ -123,8 +164,16 @@
space != NULL; space = spaces.next()) {
space->PrepareForMarkCompact(compacting_collection_);
}
+
+ Address new_space_top = Heap::new_space()->top();
+ Address new_space_bottom = Heap::new_space()->bottom();
+
+ Marking::ClearRange(new_space_bottom,
+ static_cast<int>(new_space_top - new_space_bottom));
#ifdef DEBUG
+ VerifyMarkbitsAreClean();
+
live_bytes_ = 0;
live_young_objects_size_ = 0;
live_old_pointer_objects_size_ = 0;
@@ -241,7 +290,7 @@
SharedFunctionInfo* shared = candidate->unchecked_shared();
Code* code = shared->unchecked_code();
- if (!code->IsMarked()) {
+ if (!Marking::IsMarked(code->address())) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
@@ -265,7 +314,7 @@
SetNextCandidate(candidate, NULL);
Code* code = candidate->unchecked_code();
- if (!code->IsMarked()) {
+ if (!Marking::IsMarked(code->address())) {
candidate->set_code(lazy_compile);
}
@@ -337,9 +386,7 @@
// except the maps for the object and its possible substrings might be
// marked.
HeapObject* object = HeapObject::cast(*p);
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- InstanceType type = map_word.ToMap()->instance_type();
+ InstanceType type = object->map()->instance_type();
if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
Object* second =
reinterpret_cast<ConsString*>(object)->unchecked_second();
@@ -494,7 +541,7 @@
static inline void VisitUnmarkedObject(HeapObject* obj) {
#ifdef DEBUG
ASSERT(Heap::Contains(obj));
- ASSERT(!obj->IsMarked());
+ ASSERT(!Marking::IsMarked(obj->address()));
#endif
Map* map = obj->map();
MarkCompactCollector::SetMark(obj);
@@ -514,7 +561,7 @@
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
- if (obj->IsMarked()) continue;
+ if (Marking::IsMarked(obj)) continue;
VisitUnmarkedObject(obj);
}
return true;
@@ -575,7 +622,7 @@
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- if (function->unchecked_code()->IsMarked()) {
+ if (Marking::IsMarked(function->unchecked_code())) {
shared_info->set_code_age(0);
return false;
}
@@ -591,7 +638,7 @@
inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- if (shared_info->unchecked_code()->IsMarked()) {
+ if (Marking::IsMarked(shared_info->unchecked_code())) {
shared_info->set_code_age(0);
return false;
}
@@ -643,10 +690,7 @@
static inline Map* SafeMap(Object* obj) {
- MapWord map_word = HeapObject::cast(obj)->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return map_word.ToMap();
+ return HeapObject::cast(obj)->map();
}
@@ -782,7 +826,7 @@
// Visit shared function info to avoid double checking of it's
// flushability.
SharedFunctionInfo* shared_info = object->unchecked_shared();
- if (!shared_info->IsMarked()) {
+ if (!Marking::IsMarked(shared_info)) {
Map* shared_info_map = shared_info->map();
MarkCompactCollector::SetMark(shared_info);
MarkCompactCollector::MarkObject(shared_info_map);
@@ -928,7 +972,7 @@
// Replace flat cons strings in place.
HeapObject* object = ShortCircuitConsString(p);
- if (object->IsMarked()) return;
+ if (Marking::IsMarked(object)) return;
Map* map = object->map();
// Mark the object.
@@ -953,7 +997,8 @@
virtual void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
+ if ((*p)->IsHeapObject() &&
+ !Marking::IsMarked(HeapObject::cast(*p))) {
// Check if the symbol being pruned is an external symbol. We need
to
// delete the associated external data as this symbol is going
away.
@@ -982,8 +1027,7 @@
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- MapWord first_word = HeapObject::cast(object)->map_word();
- if (first_word.IsMarked()) {
+ if (Marking::IsMarked(HeapObject::cast(object))) {
return object;
} else {
return NULL;
@@ -992,15 +1036,14 @@
};
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
- ASSERT(!object->IsMarked());
+void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
+ ASSERT(Marking::IsMarked(object));
ASSERT(Heap::Contains(object));
if (object->IsMap()) {
Map* map = Map::cast(object);
if (FLAG_cleanup_caches_in_maps_at_gc) {
map->ClearCodeCache();
}
- SetMark(map);
if (FLAG_collect_maps &&
map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
map->instance_type() <= JS_FUNCTION_TYPE) {
@@ -1009,7 +1052,6 @@
marking_stack.Push(map);
}
} else {
- SetMark(object);
marking_stack.Push(object);
}
}
@@ -1033,7 +1075,7 @@
void MarkCompactCollector::MarkDescriptorArray(
DescriptorArray* descriptors) {
- if (descriptors->IsMarked()) return;
+ if (Marking::IsMarked(descriptors)) return;
// Empty descriptor array is marked as a root before any maps are marked.
ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
SetMark(descriptors);
@@ -1041,7 +1083,7 @@
FixedArray* contents = reinterpret_cast<FixedArray*>(
descriptors->get(DescriptorArray::kContentArrayIndex));
ASSERT(contents->IsHeapObject());
- ASSERT(!contents->IsMarked());
+ ASSERT(!Marking::IsMarked(contents));
ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
SetMark(contents);
@@ -1056,7 +1098,7 @@
PropertyDetails details(Smi::cast(contents->get(i + 1)));
if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
- if (object->IsHeapObject() && !object->IsMarked()) {
+ if (object->IsHeapObject() && !Marking::IsMarked(object)) {
SetMark(object);
marking_stack.Push(object);
}
@@ -1088,10 +1130,7 @@
static int OverflowObjectSize(HeapObject* obj) {
// Recover the normal map pointer, it might be marked as live and
// overflowed.
- MapWord map_word = obj->map_word();
- map_word.ClearMark();
- map_word.ClearOverflow();
- return obj->SizeFromMap(map_word.ToMap());
+ return obj->Size();
}
@@ -1100,6 +1139,7 @@
// is reached, whichever comes first.
template<class T>
static void ScanOverflowedObjects(T* it) {
+#if 0
// The caller should ensure that the marking stack is initially not full,
// so that we don't waste effort pointlessly scanning for objects.
ASSERT(!marking_stack.is_full());
@@ -1107,17 +1147,20 @@
for (HeapObject* object = it->next(); object != NULL; object =
it->next()) {
if (object->IsOverflowed()) {
object->ClearOverflow();
- ASSERT(object->IsMarked());
+ ASSERT(Marking::IsMarked(object));
ASSERT(Heap::Contains(object));
marking_stack.Push(object);
if (marking_stack.is_full()) return;
}
}
+#endif
+ UNREACHABLE();
}
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
- return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
+ return (*p)->IsHeapObject() &&
+ !Marking::IsMarked(HeapObject::cast(*p));
}
@@ -1159,7 +1202,7 @@
bool group_marked = false;
for (int j = 0; j < objects.length(); j++) {
Object* object = *objects[j];
- if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
+ if (object->IsHeapObject() &&
Marking::IsMarked(HeapObject::cast(object))) {
group_marked = true;
break;
}
@@ -1191,14 +1234,12 @@
HeapObject* object = marking_stack.Pop();
ASSERT(object->IsHeapObject());
ASSERT(Heap::Contains(object));
- ASSERT(object->IsMarked());
+ ASSERT(Marking::IsMarked(object));
ASSERT(!object->IsOverflowed());
// Because the object is marked, we have to recover the original map
// pointer and use it to mark the object's body.
- MapWord map_word = object->map_word();
- map_word.ClearMark();
- Map* map = map_word.ToMap();
+ Map* map = object->map();
MarkObject(map);
StaticMarkingVisitor::IterateBody(map, object);
@@ -1379,14 +1420,12 @@
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
- MapWord metamap = object->map_word();
- metamap.ClearMark();
- return metamap.ToMap()->instance_type() == MAP_TYPE;
+ return object->map()->instance_type() == MAP_TYPE;
}
void MarkCompactCollector::ClearNonLiveTransitions() {
- HeapObjectIterator map_iterator(Heap::map_space(), &SizeOfMarkedObject);
+ HeapObjectIterator map_iterator(Heap::map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. At the same
time,
// set all the prototype fields of maps back to their original value,
@@ -1400,14 +1439,15 @@
for (HeapObject* obj = map_iterator.next();
obj != NULL; obj = map_iterator.next()) {
Map* map = reinterpret_cast<Map*>(obj);
- if (!map->IsMarked() && map->IsByteArray()) continue;
+ if (!Marking::IsMarked(map) && map->IsByteArray()) continue;
ASSERT(SafeIsMap(map));
// Only JSObject and subtypes have map transitions and back pointers.
if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
if (map->instance_type() > JS_FUNCTION_TYPE) continue;
- if (map->IsMarked() && map->attached_to_shared_function_info()) {
+ if (Marking::IsMarked(map) &&
+ map->attached_to_shared_function_info()) {
// This map is used for inobject slack tracking and has been detached
// from SharedFunctionInfo during the mark phase.
// Since it survived the GC, reattach it now.
@@ -1425,16 +1465,16 @@
// Follow back pointers, setting them to prototype,
// clearing map transitions when necessary.
current = map;
- bool on_dead_path = !current->IsMarked();
+ bool on_dead_path = !Marking::IsMarked(current);
Object* next;
while (SafeIsMap(current)) {
next = current->prototype();
// There should never be a dead map above a live map.
- ASSERT(on_dead_path || current->IsMarked());
+ ASSERT(on_dead_path || Marking::IsMarked(current));
// A live map above a dead map indicates a dead transition.
// This test will always be false on the first iteration.
- if (on_dead_path && current->IsMarked()) {
+ if (on_dead_path && Marking::IsMarked(current)) {
on_dead_path = false;
current->ClearNonLiveTransitions(real_prototype);
}
@@ -1583,7 +1623,7 @@
}
-static void SweepNewSpace(NewSpace* space) {
+void MarkCompactCollector::SweepNewSpace(NewSpace* space) {
Heap::CheckNewSpaceExpansionCriteria();
Address from_bottom = space->bottom();
@@ -1602,8 +1642,8 @@
for (Address current = from_bottom; current < from_top; current += size)
{
HeapObject* object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
+ if (Marking::IsMarked(object)) {
+ Marking::ClearMark(object);
MarkCompactCollector::tracer()->decrement_marked_count();
size = object->Size();
@@ -1677,7 +1717,7 @@
}
-static void SweepSpace(PagedSpace* space) {
+void MarkCompactCollector::SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
@@ -1713,8 +1753,8 @@
current < p->AllocationTop();
current += object->Size()) {
object = HeapObject::FromAddress(current);
- if (object->IsMarked()) {
- object->ClearMark();
+ if (Marking::IsMarked(object)) {
+ Marking::ClearMark(object);
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
@@ -1904,13 +1944,6 @@
}
#endif
}
-
-
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
- MapWord map_word = obj->map_word();
- map_word.ClearMark();
- return obj->SizeFromMap(map_word.ToMap());
-}
void MarkCompactCollector::Initialize() {
=======================================
--- /branches/experimental/gc/src/mark-compact.h Wed Dec 15 01:35:55 2010
+++ /branches/experimental/gc/src/mark-compact.h Fri Jan 7 06:11:14 2011
@@ -41,6 +41,122 @@
class MarkingVisitor;
+class Marking {
+ public:
+ INLINE(static bool IsMarked(HeapObject* obj)) {
+ return IsMarked(obj->address());
+ }
+
+ INLINE(static void SetMark(HeapObject* obj)) {
+ SetMark(obj->address());
+ }
+
+ INLINE(static void ClearMark(HeapObject* obj)) {
+ ClearMark(obj->address());
+ }
+
+ INLINE(static bool TestAndMark(Address addr)) {
+ if (Heap::InNewSpace(addr)) {
+ uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+ return new_space_bitmap_->TestAndSet(index);
+ } else {
+ Page *p = Page::FromAddress(addr);
+ return p->markbits()->TestAndSet(p->Address2Markbit(addr));
+ }
+ }
+
+ INLINE(static bool IsMarked(Address addr)) {
+ if (Heap::InNewSpace(addr)) {
+ uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+ return new_space_bitmap_->Get(index);
+ } else {
+ Page *p = Page::FromAddress(addr);
+ return p->markbits()->Get(p->Address2Markbit(addr));
+ }
+ }
+
+ INLINE(static void SetMark(Address addr)) {
+ if (Heap::InNewSpace(addr)) {
+ uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+ new_space_bitmap_->Set(index);
+ } else {
+ Page *p = Page::FromAddress(addr);
+ p->markbits()->Set(p->FastAddress2Markbit(addr));
+ }
+ }
+
+ INLINE(static void ClearMark(Address addr)) {
+ if (Heap::InNewSpace(addr)) {
+ uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+ new_space_bitmap_->Clear(index);
+ } else {
+ Page *p = Page::FromAddress(addr);
+ p->markbits()->Clear(p->FastAddress2Markbit(addr));
+ }
+ }
+
+ INLINE(static void ClearRange(Address addr, int size)) {
+ if (Heap::InNewSpace(addr)) {
+ uint32_t index = Heap::new_space()->Address2MarkbitIndex(addr);
+ new_space_bitmap_->ClearRange(index, size >> kPointerSizeLog2);
+ } else {
+ Page *p = Page::FromAddress(addr);
+ p->markbits()->ClearRange(p->FastAddress2Markbit(addr),
+ size >> kPointerSizeLog2);
+ }
+ }
+
+ static bool Setup();
+
+ static void TearDown();
+
+ private:
+ class BitmapStorageDescriptor {
+ public:
+ INLINE(static int CellsCount(Address addr)) {
+ return HeaderOf(addr)->cells_count_;
+ }
+
+ static Bitmap<BitmapStorageDescriptor>* Allocate(int cells_count) {
+ VirtualMemory* memory = new VirtualMemory(SizeFor(cells_count));
+
+ if (!memory->Commit(memory->address(), memory->size(), false)) {
+ delete memory;
+ return NULL;
+ }
+
+ Address bitmap_address =
+ reinterpret_cast<Address>(memory->address()) + sizeof(Header);
+ HeaderOf(bitmap_address)->cells_count_ = cells_count;
+ HeaderOf(bitmap_address)->storage_ = memory;
+ return Bitmap<BitmapStorageDescriptor>::FromAddress(bitmap_address);
+ }
+
+ static void Free(Bitmap<BitmapStorageDescriptor>* bitmap) {
+ delete HeaderOf(bitmap->address())->storage_;
+ }
+
+ private:
+ struct Header {
+ VirtualMemory* storage_;
+ int cells_count_;
+ };
+
+ static int SizeFor(int cell_count) {
+ return sizeof(Header) +
+ Bitmap<BitmapStorageDescriptor>::SizeFor(cell_count);
+ }
+
+ static Header* HeaderOf(Address addr) {
+ return reinterpret_cast<Header*>(addr - sizeof(Header));
+ }
+ };
+
+ typedef Bitmap<BitmapStorageDescriptor> NewSpaceMarkbitsBitmap;
+
+ static NewSpaceMarkbitsBitmap* new_space_bitmap_;
+};
+
//
-------------------------------------------------------------------------
// Mark-Compact collector
//
@@ -101,10 +217,6 @@
return compacting_collection_;
#endif
}
-
- // The count of the number of objects left marked at the end of the last
- // completed full GC (expected to be zero).
- static int previous_marked_count() { return previous_marked_count_; }
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
@@ -119,9 +231,6 @@
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj);
- // Returns size of a possibly marked object.
- static int SizeOfMarkedObject(HeapObject* obj);
-
// Distinguishable invalid map encodings (for single word and multiple
words)
// that indicate free regions.
static const uint32_t kSingleFreeEncoding = 0;
@@ -152,10 +261,6 @@
// Global flag indicating whether spaces will be compacted on the next
GC.
static bool compact_on_next_gc_;
- // The number of objects left marked at the end of the last completed
full
- // GC (expected to be zero).
- static int previous_marked_count_;
-
// A pointer to the current stack-allocated GC tracer object during a
full
// collection (NULL before and after).
static GCTracer* tracer_;
@@ -184,19 +289,25 @@
// Marking operations for objects reachable from roots.
static void MarkLiveObjects();
- static void MarkUnmarkedObject(HeapObject* obj);
-
static inline void MarkObject(HeapObject* obj) {
- if (!obj->IsMarked()) MarkUnmarkedObject(obj);
+ if (!Marking::TestAndMark(obj->address())) {
+ tracer_->increment_marked_count();
+#ifdef DEBUG
+ UpdateLiveObjectCount(obj);
+#endif
+ ProcessNewlyMarkedObject(obj);
+ }
}
static inline void SetMark(HeapObject* obj) {
+ Marking::SetMark(obj);
tracer_->increment_marked_count();
#ifdef DEBUG
UpdateLiveObjectCount(obj);
#endif
- obj->SetMark();
- }
+ }
+
+ static void ProcessNewlyMarkedObject(HeapObject* obj);
// Creates back pointers for all map transitions, stores them in
// the prototype field. The original prototype pointers are restored
@@ -286,6 +397,9 @@
// regions to each space's free list.
static void SweepSpaces();
+ static void SweepNewSpace(NewSpace* space);
+ static void SweepSpace(PagedSpace* space);
+
#ifdef DEBUG
//
-----------------------------------------------------------------------
// Debugging variables, functions and classes
=======================================
--- /branches/experimental/gc/src/objects-inl.h Tue Dec 21 04:32:46 2010
+++ /branches/experimental/gc/src/objects-inl.h Fri Jan 7 06:11:14 2011
@@ -989,36 +989,6 @@
ASSERT(IsForwardingAddress());
return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
}
-
-
-bool MapWord::IsMarked() {
- return (value_ & kMarkingMask) == 0;
-}
-
-
-void MapWord::SetMark() {
- value_ &= ~kMarkingMask;
-}
-
-
-void MapWord::ClearMark() {
- value_ |= kMarkingMask;
-}
-
-
-bool MapWord::IsOverflowed() {
- return (value_ & kOverflowMask) != 0;
-}
-
-
-void MapWord::SetOverflow() {
- value_ |= kOverflowMask;
-}
-
-
-void MapWord::ClearOverflow() {
- value_ &= ~kOverflowMask;
-}
#ifdef DEBUG
@@ -1079,47 +1049,6 @@
void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
}
-
-
-bool HeapObject::IsMarked() {
- return map_word().IsMarked();
-}
-
-
-void HeapObject::SetMark() {
- ASSERT(!IsMarked());
- MapWord first_word = map_word();
- first_word.SetMark();
- set_map_word(first_word);
-}
-
-
-void HeapObject::ClearMark() {
- ASSERT(IsMarked());
- MapWord first_word = map_word();
- first_word.ClearMark();
- set_map_word(first_word);
-}
-
-
-bool HeapObject::IsOverflowed() {
- return map_word().IsOverflowed();
-}
-
-
-void HeapObject::SetOverflow() {
- MapWord first_word = map_word();
- first_word.SetOverflow();
- set_map_word(first_word);
-}
-
-
-void HeapObject::ClearOverflow() {
- ASSERT(IsOverflowed());
- MapWord first_word = map_word();
- first_word.ClearOverflow();
- set_map_word(first_word);
-}
double HeapNumber::value() {
=======================================
--- /branches/experimental/gc/src/objects.cc Wed Jan 5 05:52:13 2011
+++ /branches/experimental/gc/src/objects.cc Fri Jan 7 06:11:14 2011
@@ -39,6 +39,7 @@
#include "objects-inl.h"
#include "objects-visiting.h"
#include "macro-assembler.h"
+#include "mark-compact.h"
#include "safepoint-table.h"
#include "scanner-base.h"
#include "scopeinfo.h"
@@ -5372,7 +5373,7 @@
details.type() == CONSTANT_TRANSITION) {
Map* target = reinterpret_cast<Map*>(contents->get(i));
ASSERT(target->IsHeapObject());
- if (!target->IsMarked()) {
+ if (!Marking::IsMarked(target)) {
ASSERT(target->IsMap());
contents->set_unchecked(i + 1, NullDescriptorDetails);
contents->set_null_unchecked(i);
=======================================
--- /branches/experimental/gc/src/objects.h Wed Jan 5 05:52:13 2011
+++ /branches/experimental/gc/src/objects.h Fri Jan 7 06:11:14 2011
@@ -930,42 +930,13 @@
// View this map word as a forwarding address.
inline HeapObject* ToForwardingAddress();
- // Marking phase of full collection: the map word of live objects is
- // marked, and may be marked as overflowed (eg, the object is live, its
- // children have not been visited, and it does not fit in the marking
- // stack).
-
- // True if this map word's mark bit is set.
- inline bool IsMarked();
-
- // Return this map word but with its mark bit set.
- inline void SetMark();
-
- // Return this map word but with its mark bit cleared.
- inline void ClearMark();
-
- // True if this map word's overflow bit is set.
- inline bool IsOverflowed();
-
- // Return this map word but with its overflow bit set.
- inline void SetOverflow();
-
- // Return this map word but with its overflow bit cleared.
- inline void ClearOverflow();
-
- // Bits used by the marking phase of the garbage collector.
- //
- // The first word of a heap object is normally a map pointer. The last
two
- // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits
to
- // mark an object as live and/or overflowed:
- // last bit = 0, marked as alive
- // second bit = 1, overflowed
- // An object is only marked as overflowed when it is marked as live while
- // the marking stack is overflowed.
- static const int kMarkingBit = 0; // marking bit
- static const int kMarkingMask = (1 << kMarkingBit); // marking mask
- static const int kOverflowBit = 1; // overflow bit
- static const int kOverflowMask = (1 << kOverflowBit); // overflow mask
+ static inline MapWord FromRawValue(uintptr_t value) {
+ return MapWord(value);
+ }
+
+ inline uintptr_t ToRawValue() {
+ return value_;
+ }
private:
// HeapObject calls the private constructor and directly reads the value.
@@ -1014,30 +985,19 @@
// GC internal.
inline int SizeFromMap(Map* map);
- // Support for the marking heap objects during the marking phase of GC.
- // True if the object is marked live.
- inline bool IsMarked();
-
- // Mutate this object's map pointer to indicate that the object is live.
- inline void SetMark();
-
- // Mutate this object's map pointer to remove the indication that the
- // object is live (ie, partially restore the map pointer).
- inline void ClearMark();
-
// True if this object is marked as overflowed. Overflowed objects have
// been reached and marked during marking of the heap, but their children
// have not necessarily been marked and they have not been pushed on the
// marking stack.
- inline bool IsOverflowed();
+ inline bool IsOverflowed() { return false; }
// Mutate this object's map pointer to indicate that the object is
// overflowed.
- inline void SetOverflow();
+ inline void SetOverflow() {}
// Mutate this object's map pointer to remove the indication that the
// object is overflowed (ie, partially restore the map pointer).
- inline void ClearOverflow();
+ inline void ClearOverflow() {}
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are
invalid.
=======================================
--- /branches/experimental/gc/src/spaces.cc Thu Dec 23 08:04:56 2010
+++ /branches/experimental/gc/src/spaces.cc Fri Jan 7 06:11:14 2011
@@ -1564,7 +1564,7 @@
while (cur_addr != NULL) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
cur_addr = cur_node->next();
- cur_node->SetMark();
+ IntrusiveMarking::SetMark(cur_node);
}
}
}
@@ -1634,7 +1634,7 @@
while (cur_addr != NULL && cur_addr != tail_) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
cur_addr = cur_node->next();
- cur_node->SetMark();
+ IntrusiveMarking::SetMark(cur_node);
}
}
@@ -1697,6 +1697,7 @@
first->SetAllocationWatermark(first->ObjectAreaStart());
first->SetCachedAllocationWatermark(first->ObjectAreaStart());
first->SetRegionMarks(Page::kAllRegionsCleanMarks);
+ first->markbits()->Clear();
first = first->next_page();
} while (first != NULL);
}
@@ -2328,8 +2329,8 @@
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- if (object->IsMarked()) {
- object->ClearMark();
+ if (Marking::IsMarked(object)) {
+ Marking::ClearMark(object);
MarkCompactCollector::tracer()->decrement_marked_count();
previous = current;
current = current->next_page();
=======================================
--- /branches/experimental/gc/src/spaces.h Tue Jan 4 06:22:51 2011
+++ /branches/experimental/gc/src/spaces.h Fri Jan 7 06:11:14 2011
@@ -113,6 +113,121 @@
class AllocationInfo;
class Space;
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+template<typename StorageDescriptor>
+class Bitmap {
+ public:
+ typedef uint32_t CellType;
+ static const uint32_t kBitsPerCell = 32;
+ static const uint32_t kBitsPerCellLog2 = 5;
+ static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+
+ static int CellsForLength(int length) {
+ return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+ }
+
+ int CellsCount() {
+ return StorageDescriptor::CellsCount(this->address());
+ }
+
+ static int SizeFor(int cells_count) {
+ return sizeof(CellType)*cells_count;
+ }
+
+ INLINE(CellType* cells()) {
+ return reinterpret_cast<CellType*>(this);
+ }
+
+ INLINE(Address address()) {
+ return reinterpret_cast<Address>(this);
+ }
+
+ INLINE(static Bitmap* FromAddress(Address addr)) {
+ return reinterpret_cast<Bitmap*>(addr);
+ }
+
+ INLINE(static Bitmap* FromAddress(uint32_t* addr)) {
+ return reinterpret_cast<Bitmap*>(addr);
+ }
+
+ INLINE(bool TestAndSet(const uint32_t index)) {
+ const uint32_t mask = 1 << (index & kBitIndexMask);
+ if (cells()[index >> kBitsPerCellLog2] & mask) {
+ return true;
+ } else {
+ cells()[index >> kBitsPerCellLog2] |= mask;
+ return false;
+ }
+ }
+
+ INLINE(bool Get(uint32_t index)) {
+ uint32_t mask = 1 << (index & kBitIndexMask);
+ return (this->cells()[index >> kBitsPerCellLog2] & mask) != 0;
+ }
+
+ INLINE(void Set(uint32_t index)) {
+ uint32_t mask = 1 << (index & kBitIndexMask);
+ cells()[index >> kBitsPerCellLog2] |= mask;
+ }
+
+ INLINE(void Clear(uint32_t index)) {
+ uint32_t mask = 1 << (index & kBitIndexMask);
+ cells()[index >> kBitsPerCellLog2] &= ~mask;
+ }
+
+ INLINE(void ClearRange(uint32_t start, uint32_t size)) {
+ const uint32_t end = start + size;
+ const uint32_t start_cell = start >> kBitsPerCellLog2;
+ const uint32_t end_cell = end >> kBitsPerCellLog2;
+
+ const uint32_t start_mask = (-1) << (start & kBitIndexMask);
+ const uint32_t end_mask = (1 << (end & kBitIndexMask)) - 1;
+
+ ASSERT(static_cast<int>(start_cell) < CellsCount());
+ ASSERT(static_cast<int>(end_cell) < CellsCount());
+
+ if (start_cell == end_cell) {
+ cells()[start_cell] &= ~(start_mask & end_mask);
+ } else {
+ cells()[start_cell] &= ~start_mask;
+ if (end_mask != 0) cells()[end_cell] &= ~end_mask;
+
+ for(uint32_t cell = start_cell + 1, last_cell = end_cell - 1;
+ cell <= last_cell;
+ cell++) {
+ cells()[cell] = 0;
+ }
+ }
+ }
+
+ INLINE(void Clear()) {
+ for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
+ }
+
+ static void PrintWord(const uint32_t& word, const char* sep = " ") {
+ for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+ PrintF((mask & word) ? "1" : "0");
+ }
+ PrintF("%s", sep);
+ }
+
+ void Print() {
+ for (int i = 0; i < CellsCount(); i++) {
+ PrintWord(cells()[i]);
+ }
+ PrintF("\n");
+ }
+
+ bool IsClean() {
+ for (int i = 0; i < CellsCount(); i++) {
+ if (cells()[i] != 0) return false;
+ }
+ return true;
+ }
+};
+
+
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accomodate
@@ -161,14 +276,51 @@
static const size_t kHeaderSize = kPointerSize + kPointerSize +
kPointerSize +
kPointerSize + kPointerSize;
+ static const size_t kMarksBitmapLength =
+ (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+ static const size_t kMarksBitmapSize =
+ (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
static const int kBodyOffset =
- CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize));
+ CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + kMarksBitmapSize));
size_t size() const { return size_; }
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
}
+
+ // ---------------------------------------------------------------------
+ // Markbits support
+ class BitmapStorageDescriptor {
+ public:
+ INLINE(static int CellsCount(Address addr)) {
+ return Bitmap<BitmapStorageDescriptor>::CellsForLength(
+ kMarksBitmapLength);
+ }
+ };
+
+ typedef Bitmap<BitmapStorageDescriptor> MarkbitsBitmap;
+
+ inline MarkbitsBitmap* markbits() {
+ return MarkbitsBitmap::FromAddress(address() + kHeaderSize);
+ }
+
+ inline uint32_t Address2Markbit(Address addr) {
+ return static_cast<uint32_t>(addr - this->address()) >>
kPointerSizeLog2;
+ }
+
+ inline static uint32_t FastAddress2Markbit(Address addr) {
+ const intptr_t offset =
+ reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+ return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+ }
+
+ inline Address Markbit2Address(uint32_t index) {
+ return this->address() + (index << kPointerSizeLog2);
+ }
protected:
MemoryChunk* next_chunk_;
@@ -189,6 +341,7 @@
chunk->size_ = size;
chunk->flags_ = 0;
chunk->owner_ = owner;
+ chunk->markbits()->Clear();
if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
@@ -1413,6 +1566,16 @@
// new space with the mask will result in the start address.
Address start() { return start_; }
uintptr_t mask() { return address_mask_; }
+
+ INLINE(uint32_t Address2MarkbitIndex(Address addr)) {
+ ASSERT(Contains(addr));
+ ASSERT(IsAligned(OffsetFrom(addr), kPointerSize));
+ return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+ }
+
+ INLINE(Address MarkbitIndex2Address(uint32_t index)) {
+ return reinterpret_cast<Address>(index << kPointerSizeLog2);
+ }
// The allocation top and limit addresses.
Address* allocation_top_address() { return &allocation_info_.top; }
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev