Revision: 8559
Author: vego...@chromium.org
Date: Wed Jul 6 13:56:48 2011
Log: Support slots recording for compaction during incremental marking.
Currently we still rely on a single big slots buffer which is not filtered
or canonicalized and can grow limitlessly.
R=erik.co...@gmail.com
Review URL: http://codereview.chromium.org/7302003
http://code.google.com/p/v8/source/detail?r=8559
Modified:
/branches/experimental/gc/src/assembler.cc
/branches/experimental/gc/src/assembler.h
/branches/experimental/gc/src/heap.cc
/branches/experimental/gc/src/ia32/assembler-ia32-inl.h
/branches/experimental/gc/src/ia32/code-stubs-ia32.cc
/branches/experimental/gc/src/ia32/code-stubs-ia32.h
/branches/experimental/gc/src/ia32/deoptimizer-ia32.cc
/branches/experimental/gc/src/ia32/full-codegen-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32.cc
/branches/experimental/gc/src/ia32/macro-assembler-ia32.h
/branches/experimental/gc/src/incremental-marking-inl.h
/branches/experimental/gc/src/incremental-marking.cc
/branches/experimental/gc/src/incremental-marking.h
/branches/experimental/gc/src/mark-compact.cc
/branches/experimental/gc/src/mark-compact.h
/branches/experimental/gc/src/objects-inl.h
/branches/experimental/gc/src/serialize.cc
/branches/experimental/gc/src/spaces.cc
/branches/experimental/gc/src/spaces.h
=======================================
--- /branches/experimental/gc/src/assembler.cc Tue May 24 05:03:26 2011
+++ /branches/experimental/gc/src/assembler.cc Wed Jul 6 13:56:48 2011
@@ -743,6 +743,14 @@
isolate,
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
+
+
+ExternalReference ExternalReference::
+ incremental_evacuation_record_write_function(Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+
FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
+}
ExternalReference ExternalReference::
=======================================
--- /branches/experimental/gc/src/assembler.h Fri Jun 24 05:46:32 2011
+++ /branches/experimental/gc/src/assembler.h Wed Jul 6 13:56:48 2011
@@ -564,6 +564,8 @@
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
+ static ExternalReference incremental_evacuation_record_write_function(
+ Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference perform_gc_function(Isolate* isolate);
=======================================
--- /branches/experimental/gc/src/heap.cc Tue Jun 28 06:42:01 2011
+++ /branches/experimental/gc/src/heap.cc Wed Jul 6 13:56:48 2011
@@ -4306,13 +4306,23 @@
// the store buffer. These pages are scanned to find pointers that
point
// to the new space. In that case we may hit newly promoted objects
and
// fix the pointers before the promotion queue gets to them. Thus
the 'if'.
- if (Heap::InFromSpace(object)) {
- callback(reinterpret_cast<HeapObject**>(slot),
HeapObject::cast(object));
- if (InNewSpace(*slot)) {
- ASSERT(Heap::InToSpace(*slot));
- ASSERT((*slot)->IsHeapObject());
- store_buffer_.EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
+ if (object->IsHeapObject()) {
+ if (Heap::InFromSpace(object)) {
+ callback(reinterpret_cast<HeapObject**>(slot),
+ HeapObject::cast(object));
+ Object* new_object = *slot;
+ if (InNewSpace(new_object)) {
+ ASSERT(Heap::InToSpace(new_object));
+ ASSERT(new_object->IsHeapObject());
+ store_buffer_.EnterDirectlyIntoStoreBuffer(
+ reinterpret_cast<Address>(slot));
+ }
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+ } else if (MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when
we
+ // promote objects.
+ mark_compact_collector()->RecordSlot(slot, slot, object);
}
}
slot_address += kPointerSize;
=======================================
--- /branches/experimental/gc/src/ia32/assembler-ia32-inl.h Tue May 24
05:03:26 2011
+++ /branches/experimental/gc/src/ia32/assembler-ia32-inl.h Wed Jul 6
13:56:48 2011
@@ -93,8 +93,10 @@
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
if (code != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
+
+ // TODO(gc) We are not compacting code space.
code->GetHeap()->incremental_marking()->RecordWrite(
- code, HeapObject::cast(target_code));
+ code, NULL, HeapObject::cast(target_code));
}
}
@@ -123,7 +125,7 @@
CPU::FlushICache(pc_, sizeof(Address));
if (code != NULL && target->IsHeapObject()) {
code->GetHeap()->incremental_marking()->RecordWrite(
- code, HeapObject::cast(target));
+ code, &Memory::Object_at(pc_), HeapObject::cast(target));
}
}
@@ -157,7 +159,9 @@
Memory::Address_at(pc_) = address;
CPU::FlushICache(pc_, sizeof(Address));
if (code != NULL) {
- code->GetHeap()->incremental_marking()->RecordWrite(code, cell);
+ // TODO(gc) We are not compacting cell space.
+ code->GetHeap()->incremental_marking()->RecordWrite(
+ code, NULL, cell);
}
}
=======================================
--- /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Fri Jun 10
14:58:26 2011
+++ /branches/experimental/gc/src/ia32/code-stubs-ia32.cc Wed Jul 6
13:56:48 2011
@@ -6235,18 +6235,15 @@
// we keep the GC informed. The word in the object where the value has
been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
- Label skip_non_incremental_part;
-
- // The first instruction is generated as a label so as to get the offset
- // fixed up correctly by the bind(Label*) call. We patch it back and
forth
- // between a 2-byte compare instruction (a nop in this position) and the
real
- // branch when we start and stop incremental heap marking.
- __ jmp(&skip_non_incremental_part, Label::kNear);
- if (!masm->isolate()->heap()->incremental_marking()->IsMarking()) {
- ASSERT(masm->byte_at(masm->pc_offset() - 2) ==
- kSkipNonIncrementalPartInstruction);
- masm->set_byte_at(masm->pc_offset() - 2, kTwoByteNopInstruction);
- }
+ Label skip_to_incremental_noncompacting;
+ Label skip_to_incremental_compacting;
+
+ // The first two instructions are generated with labels so as to get the
+ // offset fixed up correctly by the bind(Label*) call. We patch it back
and
+ // forth between a 2-byte compare instruction (a nop in this position)
and the
+ // real branch when we start and stop incremental heap marking.
+ __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+ __ jmp(&skip_to_incremental_compacting, Label::kFar);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(
@@ -6255,12 +6252,25 @@
__ ret(0);
}
- __ bind(&skip_non_incremental_part);
- GenerateIncremental(masm);
+ __ bind(&skip_to_incremental_noncompacting);
+ GenerateIncremental(masm, INCREMENTAL);
+
+ __ bind(&skip_to_incremental_compacting);
+ GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+ if (!masm->isolate()->heap()->incremental_marking()->IsMarking()) {
+ ASSERT(masm->byte_at(0) == kTwoByteJumpInstruction);
+ masm->set_byte_at(0, kTwoByteNopInstruction);
+ }
+
+ if (!masm->isolate()->heap()->incremental_marking()->IsMarking()) {
+ ASSERT(masm->byte_at(2) == kFiveByteJumpInstruction);
+ masm->set_byte_at(2, kFiveByteNopInstruction);
+ }
}
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm) {
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode)
{
regs_.Save(masm);
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
@@ -6273,15 +6283,17 @@
__ CheckPageFlag(regs_.object(),
regs_.scratch0(),
- MemoryChunk::SCAN_ON_SCAVENGE,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
- masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker);
- InformIncrementalMarker(masm);
+ masm,
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ RememberedSetHelper(
address_, value_, save_fp_regs_mode_,
MacroAssembler::kReturnAtEnd);
@@ -6290,36 +6302,55 @@
}
CheckNeedsToInformIncrementalMarker(
- masm, kReturnOnNoNeedToInformIncrementalMarker);
- InformIncrementalMarker(masm);
+ masm,
+ kReturnOnNoNeedToInformIncrementalMarker,
+ mode);
+ InformIncrementalMarker(masm, mode);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+void RecordWriteStub::InformIncrementalMarker(
+ MacroAssembler* masm,
+ RecordWriteStub::Mode mode) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
+ }
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
+
// TODO(gc): Create a fast version of this C function that does not
duplicate
// the checks done in the stub.
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ CallCFunction(
+ ExternalReference::incremental_evacuation_record_write_function(
+ masm->isolate()),
+ argument_count);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
+ }
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
- RecordWriteStub::OnNoNeedToInformIncrementalMarker on_no_need) {
- Label object_is_black, need_incremental;
+ RecordWriteStub::OnNoNeedToInformIncrementalMarker on_no_need,
+ RecordWriteStub::Mode mode) {
+ Label object_is_black, need_incremental, need_incremental_pop_object;
// Let's look at the color of the object: If it is not black we don't
have
// to inform the incremental marker.
@@ -6342,16 +6373,38 @@
// Get the value from the slot.
__ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ if (mode == INCREMENTAL_COMPACTION) {
+ Label ensure_not_white;
+
+ __ CheckPageFlag(regs_.scratch0(), // Contains value.
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateMask,
+ zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ CheckPageFlag(regs_.object(),
+ regs_.scratch1(), // Scratch.
+ MemoryChunk::kEvacuationCandidateOrNewSpaceMask,
+ not_zero,
+ &ensure_not_white,
+ Label::kNear);
+
+ __ jmp(&need_incremental);
+
+ __ bind(&ensure_not_white);
+ }
+
// We need an extra register for this, so we push the object register
// temporarily.
__ push(regs_.object());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
- &need_incremental,
+ &need_incremental_pop_object,
Label::kNear);
-
__ pop(regs_.object());
+
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker)
{
__ RememberedSetHelper(
@@ -6360,9 +6413,11 @@
__ ret(0);
}
- __ bind(&need_incremental);
+ __ bind(&need_incremental_pop_object);
__ pop(regs_.object());
+ __ bind(&need_incremental);
+
// Fall through when we need to inform the incremental marker.
}
=======================================
--- /branches/experimental/gc/src/ia32/code-stubs-ia32.h Fri Jun 10
14:58:26 2011
+++ /branches/experimental/gc/src/ia32/code-stubs-ia32.h Wed Jul 6
13:56:48 2011
@@ -544,22 +544,56 @@
value) { // One scratch reg.
}
- static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al,
#imm8.
- static const byte kSkipNonIncrementalPartInstruction = 0xeb; // Jmp
#imm8.
-
- static byte GetInstruction(bool enable) {
- // Can't use ternary operator here, because gcc makes an undefined
- // reference to a static const int.
- if (enable) {
- return kSkipNonIncrementalPartInstruction;
- } else {
- return kTwoByteNopInstruction;
- }
+ enum Mode {
+ STORE_BUFFER_ONLY,
+ INCREMENTAL,
+ INCREMENTAL_COMPACTION
+ };
+
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8.
+
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32.
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32.
+
+ static Mode GetMode(Code* stub) {
+ byte first_instruction = stub->instruction_start()[0];
+ byte second_instruction = stub->instruction_start()[2];
+
+ if (first_instruction == kTwoByteJumpInstruction) {
+ return INCREMENTAL;
+ }
+
+ ASSERT(first_instruction == kTwoByteNopInstruction);
+
+ if (second_instruction == kFiveByteJumpInstruction) {
+ return INCREMENTAL_COMPACTION;
+ }
+
+ ASSERT(second_instruction == kFiveByteNopInstruction);
+
+ return STORE_BUFFER_ONLY;
}
- static void Patch(Code* stub, bool enable) {
- ASSERT(*stub->instruction_start() == GetInstruction(!enable));
- *stub->instruction_start() = GetInstruction(enable);
+ static void Patch(Code* stub, Mode mode) {
+ switch (mode) {
+ case STORE_BUFFER_ONLY:
+ ASSERT(GetMode(stub) == INCREMENTAL ||
+ GetMode(stub) == INCREMENTAL_COMPACTION);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteNopInstruction;
+ break;
+ case INCREMENTAL:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteJumpInstruction;
+ break;
+ case INCREMENTAL_COMPACTION:
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+ stub->instruction_start()[0] = kTwoByteNopInstruction;
+ stub->instruction_start()[2] = kFiveByteJumpInstruction;
+ break;
+ }
+ ASSERT(GetMode(stub) == mode);
}
private:
@@ -708,14 +742,15 @@
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
- };
-
+ }
+;
void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
- OnNoNeedToInformIncrementalMarker on_no_need);
- void InformIncrementalMarker(MacroAssembler* masm);
+ OnNoNeedToInformIncrementalMarker on_no_need,
+ Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
Major MajorKey() { return RecordWrite; }
=======================================
--- /branches/experimental/gc/src/ia32/deoptimizer-ia32.cc Mon May 9
14:11:15 2011
+++ /branches/experimental/gc/src/ia32/deoptimizer-ia32.cc Wed Jul 6
13:56:48 2011
@@ -252,8 +252,10 @@
*(call_target_address - 2) = 0x90; // nop
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
- // TODO(gc) ISOLATES MERGE
- HEAP->incremental_marking()->RecordWrite(unoptimized_code,
replacement_code);
+
+ // TODO(gc) we are not compacting code space.
+ unoptimized_code->GetHeap()->incremental_marking()->RecordWrite(
+ unoptimized_code, NULL, replacement_code);
}
=======================================
--- /branches/experimental/gc/src/ia32/full-codegen-ia32.cc Fri Jun 10
14:58:26 2011
+++ /branches/experimental/gc/src/ia32/full-codegen-ia32.cc Wed Jul 6
13:56:48 2011
@@ -3199,7 +3199,7 @@
Label no_remembered_set;
__ CheckPageFlag(elements,
temp,
- MemoryChunk::SCAN_ON_SCAVENGE,
+ 1 << MemoryChunk::SCAN_ON_SCAVENGE,
not_zero,
&no_remembered_set,
Label::kNear);
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Fri Jun 24
05:46:32 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.cc Wed Jul 6
13:56:48 2011
@@ -75,44 +75,6 @@
static_cast<uint8_t>(mask));
j(cc, condition_met, condition_met_distance);
}
-
-
-void MacroAssembler::IncrementalMarkingRecordWriteHelper(
- Register object,
- Register value,
- Register address) {
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
- ASSERT(!value.is(object));
-
- bool preserve[Register::kNumRegisters];
-
- for (int i = 0; i < Register::kNumRegisters; i++) preserve[i] = false;
-
- preserve[eax.code()] = true;
- preserve[ecx.code()] = true;
- preserve[edx.code()] = true;
- preserve[object.code()] = true;
- preserve[value.code()] = true;
- preserve[address.code()] = true;
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- if (preserve[i]) push(Register::from_code(i));
- }
-
- // TODO(gc) we are assuming that xmm registers are not modified by
- // the C function we are calling.
- PrepareCallCFunction(2, address);
- mov(Operand(esp, 0 * kPointerSize), object);
- mov(Operand(esp, 1 * kPointerSize), value);
- CallCFunction(
-
ExternalReference::incremental_marking_record_write_function(isolate()),
- 2);
-
- for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
- if (preserve[i]) pop(Register::from_code(i));
- }
-}
void MacroAssembler::RememberedSetHelper(
@@ -314,13 +276,13 @@
CheckPageFlag(value,
value, // Used as scratch.
- MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING,
+ MemoryChunk::kPointersToHereAreInterestingMask,
zero,
&done,
Label::kNear);
CheckPageFlag(object,
value, // Used as scratch.
- MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING,
+ MemoryChunk::kPointersFromHereAreInterestingMask,
zero,
&done,
Label::kNear);
@@ -2270,7 +2232,7 @@
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
- MemoryChunk::MemoryChunkFlags flag,
+ int mask,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
@@ -2281,11 +2243,11 @@
mov(scratch, Immediate(~Page::kPageAlignmentMask));
and_(scratch, Operand(object));
}
- if (flag < kBitsPerByte) {
+ if (mask < (1 << kBitsPerByte)) {
test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(1u << flag));
+ static_cast<uint8_t>(mask));
} else {
- test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(1 <<
flag));
+ test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
}
j(cc, condition_met, condition_met_distance);
}
=======================================
--- /branches/experimental/gc/src/ia32/macro-assembler-ia32.h Fri Jun 24
05:46:32 2011
+++ /branches/experimental/gc/src/ia32/macro-assembler-ia32.h Wed Jul 6
13:56:48 2011
@@ -68,11 +68,6 @@
//
---------------------------------------------------------------------------
// GC Support
-
- void IncrementalMarkingRecordWriteHelper(Register object,
- Register value,
- Register address);
-
enum RememberedSetFinalAction {
kReturnAtEnd,
kFallThroughAtEnd
@@ -88,7 +83,7 @@
void CheckPageFlag(Register object,
Register scratch,
- MemoryChunk::MemoryChunkFlags flag,
+ int mask,
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
=======================================
--- /branches/experimental/gc/src/incremental-marking-inl.h Wed May 18
08:02:58 2011
+++ /branches/experimental/gc/src/incremental-marking-inl.h Wed Jul 6
13:56:48 2011
@@ -34,7 +34,9 @@
namespace internal {
-void IncrementalMarking::RecordWrite(HeapObject* obj, Object* value) {
+void IncrementalMarking::RecordWrite(HeapObject* obj,
+ Object** slot,
+ Object* value) {
if (IsMarking() && value->IsHeapObject()) {
MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
if (Marking::IsWhite(value_bit)) {
@@ -43,6 +45,18 @@
BlackToGreyAndUnshift(obj, obj_bit);
RestartIfNotMarking();
}
+
+ // Object is either grey or white it will be scanned if survives.
+ return;
+ }
+
+ if (is_compacting_ && slot != NULL) {
+ MarkBit obj_bit = Marking::MarkBitFrom(obj);
+ if (Marking::IsBlack(obj_bit)) {
+ // Object is not going to be rescanned we need to record the slot.
+ heap_->mark_compact_collector()->RecordSlot(
+ HeapObject::RawField(obj, 0), slot, value);
+ }
}
}
}
@@ -50,6 +64,8 @@
void IncrementalMarking::RecordWriteOf(HeapObject* value) {
if (IsMarking()) {
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(value));
+
MarkBit value_bit = Marking::MarkBitFrom(value);
if (Marking::IsWhite(value_bit)) {
WhiteToGreyAndPush(value, value_bit);
=======================================
--- /branches/experimental/gc/src/incremental-marking.cc Tue Jun 28
06:42:01 2011
+++ /branches/experimental/gc/src/incremental-marking.cc Wed Jul 6
13:56:48 2011
@@ -57,7 +57,20 @@
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Object* value,
Isolate* isolate) {
- isolate->heap()->incremental_marking()->RecordWrite(obj, value);
+ ASSERT(obj->IsHeapObject());
+
+ IncrementalMarking* marking = isolate->heap()->incremental_marking();
+ ASSERT(!marking->is_compacting_);
+ marking->RecordWrite(obj, NULL, value);
+}
+
+
+void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
+ Object** slot,
+ Isolate*
isolate) {
+ IncrementalMarking* marking = isolate->heap()->incremental_marking();
+ ASSERT(marking->is_compacting_);
+ marking->RecordWrite(obj, slot, *slot);
}
@@ -70,22 +83,24 @@
}
void VisitPointer(Object** p) {
- MarkObjectByPointer(p);
+ MarkObjectByPointer(p, p);
}
void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(start, p);
}
private:
// Mark object pointed to by p.
- INLINE(void MarkObjectByPointer(Object** p)) {
+ INLINE(void MarkObjectByPointer(Object** anchor, Object** p)) {
Object* obj = *p;
// Since we can be sure that the object is not tagged as a failure we
can
// inline a slightly more efficient tag check here than IsHeapObject()
would
// produce.
if (obj->NonFailureIsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(obj);
+
+ heap_->mark_compact_collector()->RecordSlot(anchor, p, obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
@@ -285,14 +300,14 @@
static const intptr_t kActivationThreshold = 0;
#endif
- // TODO(gc) ISOLATES MERGE
return FLAG_incremental_marking &&
heap_->PromotedSpaceSize() > kActivationThreshold;
}
-static void PatchIncrementalMarkingRecordWriteStubs(bool enable) {
- NumberDictionary* stubs = HEAP->code_stubs();
+static void PatchIncrementalMarkingRecordWriteStubs(
+ Heap* heap, RecordWriteStub::Mode mode) {
+ NumberDictionary* stubs = heap->code_stubs();
int capacity = stubs->Capacity();
for (int i = 0; i < capacity; i++) {
@@ -304,7 +319,7 @@
CodeStub::RecordWrite) {
Object* e = stubs->ValueAt(i);
if (e->IsCode()) {
- RecordWriteStub::Patch(Code::cast(e), enable);
+ RecordWriteStub::Patch(Code::cast(e), mode);
}
}
}
@@ -350,10 +365,16 @@
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start marking\n");
}
+
+ is_compacting_ = !FLAG_never_compact &&
+ heap_->mark_compact_collector()->StartCompaction();
state_ = MARKING;
- PatchIncrementalMarkingRecordWriteStubs(true);
+ RecordWriteStub::Mode mode = is_compacting_ ?
+ RecordWriteStub::INCREMENTAL_COMPACTION :
RecordWriteStub::INCREMENTAL;
+
+ PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
EnsureMarkingDequeIsCommitted();
@@ -404,6 +425,7 @@
while (current != limit) {
HeapObject* obj = array[current];
+ ASSERT(obj->IsHeapObject());
current = ((current + 1) & mask);
if (heap_->InNewSpace(obj)) {
MapWord map_word = obj->map_word();
@@ -473,21 +495,25 @@
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
- PatchIncrementalMarkingRecordWriteStubs(false);
+ PatchIncrementalMarkingRecordWriteStubs(heap_,
+
RecordWriteStub::STORE_BUFFER_ONLY);
DeactivateIncrementalWriteBarrier();
}
heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
state_ = STOPPED;
+ is_compacting_ = false;
}
void IncrementalMarking::Finalize() {
Hurry();
state_ = STOPPED;
+ is_compacting_ = false;
heap_->new_space()->LowerInlineAllocationLimit(0);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
- PatchIncrementalMarkingRecordWriteStubs(false);
+ PatchIncrementalMarkingRecordWriteStubs(heap_,
+
RecordWriteStub::STORE_BUFFER_ONLY);
DeactivateIncrementalWriteBarrier();
ASSERT(marking_deque_.IsEmpty());
heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
@@ -505,8 +531,7 @@
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
- // TODO(gc) ISOLATES
- ISOLATE->stack_guard()->RequestGC();
+ heap_->isolate()->stack_guard()->RequestGC();
}
=======================================
--- /branches/experimental/gc/src/incremental-marking.h Fri Jun 24 05:46:32
2011
+++ /branches/experimental/gc/src/incremental-marking.h Wed Jul 6 13:56:48
2011
@@ -110,7 +110,11 @@
Object* value,
Isolate* isolate);
- inline void RecordWrite(HeapObject* obj, Object* value);
+ static void RecordWriteForEvacuationFromCode(HeapObject* obj,
+ Object** slot,
+ Isolate* isolate);
+
+ inline void RecordWrite(HeapObject* obj, Object** slot, Object* value);
inline void RecordWriteOf(HeapObject* value);
inline void RecordWrites(HeapObject* obj);
@@ -193,6 +197,7 @@
Heap* heap_;
State state_;
+ bool is_compacting_;
VirtualMemory* marking_deque_memory_;
MarkingDeque marking_deque_;
=======================================
--- /branches/experimental/gc/src/mark-compact.cc Fri Jun 24 05:46:32 2011
+++ /branches/experimental/gc/src/mark-compact.cc Wed Jul 6 13:56:48 2011
@@ -57,6 +57,8 @@
#ifdef DEBUG
state_(IDLE),
#endif
+ sweep_precisely_(false),
+ compacting_(false),
tracer_(NULL),
#ifdef DEBUG
live_young_objects_size_(0),
@@ -154,7 +156,6 @@
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
if (MarkCompactCollector::IsOnEvacuationCandidate(object)) {
- HEAP->TracePathToObject(source_);
CHECK(false);
}
}
@@ -223,6 +224,28 @@
p->MarkEvacuationCandidate();
evacuation_candidates_.Add(p);
}
+
+
+bool MarkCompactCollector::StartCompaction() {
+ // Don't start compaction if we are in the middle of incremental
+ // marking cycle. We did not collect any slots.
+ if (!compacting_ && !heap_->incremental_marking()->IsMarking()) {
+ ASSERT(evacuation_candidates_.length() == 0);
+
+ // TODO(gc) Shrink slots buffer when we receive low memory
notification.
+ slots_buffer_.Clear();
+
+ CollectEvacuationCandidates(heap()->old_pointer_space());
+ CollectEvacuationCandidates(heap()->old_data_space());
+
+ heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+
+ compacting_ = evacuation_candidates_.length() > 0;
+ }
+
+ return compacting_;
+}
void MarkCompactCollector::CollectGarbage() {
@@ -370,18 +393,6 @@
}
}
}
-
-
-static void ClearEvacuationCandidates(PagedSpace* space) {
- ASSERT(space->identity() == OLD_POINTER_SPACE ||
- space->identity() == OLD_DATA_SPACE);
-
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- p->ClearEvacuationCandidate();
- }
-}
void MarkCompactCollector::Prepare(GCTracer* tracer) {
@@ -412,18 +423,7 @@
}
#endif
- if (!FLAG_never_compact) {
- slots_buffer_.Clear();
- evacuation_candidates_.Rewind(0);
-
- if (!heap()->incremental_marking()->IsMarking()) {
- CollectEvacuationCandidates(heap()->old_pointer_space());
- CollectEvacuationCandidates(heap()->old_data_space());
- } else {
- ClearEvacuationCandidates(heap()->old_pointer_space());
- ClearEvacuationCandidates(heap()->old_data_space());
- }
- }
+ if (!FLAG_never_compact) StartCompaction();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
@@ -2268,6 +2268,8 @@
void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
AlwaysAllocateScope always_allocate;
+ ASSERT(p->IsEvacuationCandidate() && !p->WasEvacuated());
+
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
MarkBit::CellType* cells = p->markbits()->cells();
@@ -2320,6 +2322,21 @@
}
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ virtual Object* RetainAs(Object* object) {
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MapWord map_word = heap_object->map_word();
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress();
+ }
+ }
+ return object;
+ }
+};
+
+
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpace();
EvacuatePages();
@@ -2349,7 +2366,7 @@
&Heap::ScavengeStoreBufferCallback);
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
- slots_buffer_.Iterate(&updating_visitor);
+ slots_buffer_.Update();
// Update pointers from cells.
HeapObjectIterator cell_iterator(heap_->cell_space());
@@ -2376,6 +2393,9 @@
// Update JSFunction pointers from the runtime profiler.
heap_->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
#ifdef DEBUG
if (FLAG_verify_heap) {
VerifyEvacuation(heap_);
@@ -2383,16 +2403,19 @@
#endif
int npages = evacuation_candidates_.length();
+ ASSERT(compacting_ == (npages > 0));
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
+ ASSERT(p->IsEvacuationCandidate() && !p->WasEvacuated());
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
p->set_scan_on_scavenge(false);
-
- // We are not clearing evacuation candidate flag here
- // because it is required to notify lazy sweeper to skip
- // these pages.
- }
+ p->ClearEvacuationCandidate();
+ p->SetFlag(MemoryChunk::EVACUATED);
+ p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
+ }
+ evacuation_candidates_.Rewind(0);
+ compacting_ = false;
}
@@ -2720,8 +2743,7 @@
// memory that can be ignored when scanning. Dead objects other than free
// spaces will not contain the free space map.
int MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
- // We might start advancing sweeper before evacuation happened.
- if (p->IsEvacuationCandidate()) return 0;
+ ASSERT(!p->IsEvacuationCandidate() && !p->WasEvacuated());
int freed_bytes = 0;
@@ -2809,6 +2831,7 @@
// over it. Map space is swept precisely, because it is not compacted.
static void SweepPrecisely(PagedSpace* space,
Page* p) {
+ ASSERT(!p->IsEvacuationCandidate() && !p->WasEvacuated());
MarkBit::CellType* cells = p->markbits()->cells();
p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
@@ -3064,19 +3087,34 @@
}
-void SlotsBuffer::Iterate(ObjectVisitor* visitor) {
+static inline void UpdateSlot(Object** slot) {
+ Object* obj = *slot;
+ if (!obj->IsHeapObject()) return;
+
+ HeapObject* heap_obj = HeapObject::cast(obj);
+
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot));
+ *slot = map_word.ToForwardingAddress();
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
+ }
+}
+
+
+void SlotsBuffer::Update() {
if (buffer_idx_ < 0) return;
for (int buffer_index = 0; buffer_index < buffer_idx_; ++buffer_index) {
ObjectSlot* buffer = buffers_[buffer_index];
for (int slot_idx = 0; slot_idx < kBufferSize; ++slot_idx) {
- visitor->VisitPointer(buffer[slot_idx]);
+ UpdateSlot(buffer[slot_idx]);
}
}
ObjectSlot* last_buffer = buffers_[buffer_idx_];
for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- visitor->VisitPointer(last_buffer[slot_idx]);
+ UpdateSlot(last_buffer[slot_idx]);
}
}
=======================================
--- /branches/experimental/gc/src/mark-compact.h Fri Jun 24 05:46:32 2011
+++ /branches/experimental/gc/src/mark-compact.h Wed Jul 6 13:56:48 2011
@@ -276,7 +276,7 @@
void Clear();
void Add(ObjectSlot slot);
- void Iterate(ObjectVisitor* visitor);
+ void Update();
void Report();
private:
@@ -344,6 +344,8 @@
// Performs a global garbage collection.
void CollectGarbage();
+ bool StartCompaction();
+
// During a full GC, there is a stack-allocated GCTracer that is used for
// bookkeeping information. Return a pointer to that tracer.
GCTracer* tracer() { return tracer_; }
@@ -427,6 +429,10 @@
// heap.
bool sweep_precisely_;
+ // True if we are collecting slots to perform evacuation from evacuation
+ // candidates.
+ bool compacting_;
+
// A pointer to the current stack-allocated GC tracer object during a
full
// collection (NULL before and after).
GCTracer* tracer_;
=======================================
--- /branches/experimental/gc/src/objects-inl.h Tue Jun 21 04:44:11 2011
+++ /branches/experimental/gc/src/objects-inl.h Wed Jul 6 13:56:48 2011
@@ -865,12 +865,12 @@
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-#define WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordWrite(object, value); \
- if (HEAP->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- }
-// TODO(gc) !!!
+#define WRITE_BARRIER(heap, object, offset, value) \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ if (heap->InNewSpace(value)) { \
+ heap->RecordWrite(object->address(), offset); \
+ }
#ifndef V8_TARGET_ARCH_MIPS
#define READ_DOUBLE_FIELD(p, offset) \
@@ -1118,7 +1118,10 @@
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
if (value != NULL) {
- value->GetHeap()->incremental_marking()->RecordWrite(this, value);
+ // We are passing NULL as a slot because maps can never be on
evacuation
+ // candidate.
+ // TODO(gc) Maps are compacted by a separate (non-evacuation)
algorithm.
+ value->GetHeap()->incremental_marking()->RecordWrite(this, NULL,
value);
}
}
@@ -1252,8 +1255,8 @@
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
- // TODO(gc) ISOLATES MERGE cell should heap accessor.
- GetHeap()->incremental_marking()->RecordWrite(this, val);
+ GetHeap()->incremental_marking()->RecordWrite(
+ this, HeapObject::RawField(this, kValueOffset), val);
}
@@ -1494,7 +1497,10 @@
ASSERT(index >= 0 && index < array->length());
ASSERT(!HEAP->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
- array->GetHeap()->incremental_marking()->RecordWrite(array, value);
+ array->GetHeap()->incremental_marking()->RecordWrite(
+ array,
+ HeapObject::RawField(array, kHeaderSize + index * kPointerSize),
+ value);
}
@@ -2906,10 +2912,10 @@
ACCESSORS(JSFunction, shared, SharedFunctionInfo,
kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS_GCSAFE(JSFunction,
- next_function_link,
- Object,
- kNextFunctionLinkOffset)
+ACCESSORS(JSFunction,
+ next_function_link,
+ Object,
+ kNextFunctionLinkOffset)
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3332,7 +3338,10 @@
ASSERT(!HEAP->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset,
reinterpret_cast<intptr_t>(entry));
- GetHeap()->incremental_marking()->RecordWrite(this, value);
+ GetHeap()->incremental_marking()->RecordWrite(
+ this,
+ HeapObject::RawField(this, kCodeEntryOffset),
+ value);
}
=======================================
--- /branches/experimental/gc/src/serialize.cc Tue Jun 21 08:27:34 2011
+++ /branches/experimental/gc/src/serialize.cc Wed Jul 6 13:56:48 2011
@@ -351,6 +351,12 @@
RUNTIME_ENTRY,
6,
"StoreBuffer::StoreBufferOverflow");
+ Add(ExternalReference::
+ incremental_evacuation_record_write_function(isolate).address(),
+ RUNTIME_ENTRY,
+ 7,
+ "IncrementalMarking::RecordWrite");
+
// Miscellaneous
=======================================
--- /branches/experimental/gc/src/spaces.cc Wed Jun 29 04:36:32 2011
+++ /branches/experimental/gc/src/spaces.cc Wed Jul 6 13:56:48 2011
@@ -1628,7 +1628,6 @@
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
- // TODO(gc) ISOLATES MERGE cleanup HEAP macro usage
if (size_in_bytes > FreeSpace::kHeaderSize) {
set_map(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
@@ -1747,6 +1746,75 @@
ASSERT(IsVeryLong() || available_ == SumFreeLists());
return 0;
}
+
+
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int*
node_size) {
+ FreeListNode* node = *list;
+
+ if (node == NULL) return NULL;
+
+ while (node != NULL &&
+ Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+ available_ -= node->Size();
+ node = node->next();
+ }
+
+ if (node != NULL) {
+ *node_size = node->Size();
+ *list = node->next();
+ } else {
+ *list = NULL;
+ }
+
+ return node;
+}
+
+
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+ FreeListNode* node = NULL;
+
+ if (size_in_bytes <= kSmallAllocationMax) {
+ node = PickNodeFromList(&small_list_, node_size);
+ if (node != NULL) return node;
+ }
+
+ if (size_in_bytes <= kMediumAllocationMax) {
+ node = PickNodeFromList(&medium_list_, node_size);
+ if (node != NULL) return node;
+ }
+
+ if (size_in_bytes <= kLargeAllocationMax) {
+ node = PickNodeFromList(&large_list_, node_size);
+ if (node != NULL) return node;
+ }
+
+ for (FreeListNode** cur = &huge_list_;
+ *cur != NULL;
+ cur = (*cur)->next_address()) {
+ FreeListNode* cur_node = *cur;
+ while (cur_node != NULL &&
+
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+ available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ cur_node = cur_node->next();
+ }
+
+ *cur = cur_node;
+ if (cur_node == NULL) break;
+
+ ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+ FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+ int size = cur_as_free_space->Size();
+ if (size >= size_in_bytes) {
+ // Large enough node found. Unlink it from the list.
+ node = *cur;
+ *node_size = size;
+ *cur = node->next();
+ break;
+ }
+ }
+
+ return node;
+}
// Allocation on the old space free list. If it succeeds then a new linear
@@ -1760,51 +1828,23 @@
// Don't free list allocate if there is linear space available.
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
- FreeListNode* new_node = NULL;
int new_node_size = 0;
-
- if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) {
- new_node = small_list_;
- new_node_size = new_node->Size();
- small_list_ = new_node->next();
- } else if (size_in_bytes <= kMediumAllocationMax && medium_list_ !=
NULL) {
- new_node = medium_list_;
- new_node_size = new_node->Size();
- medium_list_ = new_node->next();
- } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) {
- new_node = large_list_;
- new_node_size = new_node->Size();
- large_list_ = new_node->next();
- } else {
- for (FreeListNode** cur = &huge_list_;
- *cur != NULL;
- cur = (*cur)->next_address()) {
- ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- new_node = *cur;
- new_node_size = size;
- *cur = new_node->next();
- break;
- }
- }
- if (new_node == NULL) return NULL;
- }
+ FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ if (new_node == NULL) return NULL;
available_ -= new_node_size;
ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ int bytes_left = new_node_size - size_in_bytes;
+ ASSERT(bytes_left >= 0);
+
int old_linear_size = owner_->limit() - owner_->top();
+
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free
list
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
- // TODO(gc) ISOLATES MERGE
- HEAP->incremental_marking()->Step(size_in_bytes - old_linear_size);
-
- ASSERT(new_node_size - size_in_bytes >= 0); // New linear size.
+ owner_->heap()->incremental_marking()->Step(size_in_bytes -
old_linear_size);
const int kThreshold = IncrementalMarking::kAllocatedThreshold;
@@ -1812,8 +1852,8 @@
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
- if (new_node_size - size_in_bytes > kThreshold &&
- HEAP->incremental_marking()->IsMarkingIncomplete() &&
+ if (bytes_left > kThreshold &&
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
@@ -1823,11 +1863,15 @@
new_node_size - size_in_bytes - linear_size);
owner_->SetTop(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
- } else {
+ } else if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetTop(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
+ } else {
+ // TODO(gc) Try not freeing linear allocation region when bytes_left
+ // are zero.
+ owner_->SetTop(NULL, NULL);
}
return new_node;
@@ -1906,18 +1950,6 @@
//
-----------------------------------------------------------------------------
// OldSpace implementation
-void OldSpace::PrepareForMarkCompact() {
- // Call prepare of the super class.
- PagedSpace::PrepareForMarkCompact();
-
- // Stop lazy sweeping for this space.
- first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
-
- // Clear the free list before a full GC---it will be rebuilt afterward.
- free_list_.Reset();
-}
-
-
bool NewSpace::ReserveSpace(int bytes) {
// We can't reliably unpack a partial snapshot that needs more new space
// space than the minimum NewSpace size.
@@ -1936,6 +1968,19 @@
int old_linear_size = limit() - top();
Free(top(), old_linear_size);
SetTop(NULL, NULL);
+
+ // Stop lazy sweeping for the space.
+ first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
+
+ // Clear the free list before a full GC---it will be rebuilt afterward.
+ free_list_.Reset();
+
+ // Clear EVACUATED flag from all pages.
+ PageIterator it(this);
+ while (it.has_next()) {
+ Page* page = it.next();
+ page->ClearFlag(MemoryChunk::EVACUATED);
+ }
}
@@ -1977,7 +2022,10 @@
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
- freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+ // Evacuation candidates were swept by evacuator.
+ if (!p->IsEvacuationCandidate() && !p->WasEvacuated()) {
+ freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+ }
p = next_page;
} while (p != last && freed_bytes < bytes_to_sweep);
@@ -1993,6 +2041,16 @@
return IsSweepingComplete();
}
+
+
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+ if (allocation_info_.top >= allocation_info_.limit) return;
+
+ if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
+ allocation_info_.top = NULL;
+ allocation_info_.limit = NULL;
+ }
+}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
=======================================
--- /branches/experimental/gc/src/spaces.h Tue Jun 28 06:42:01 2011
+++ /branches/experimental/gc/src/spaces.h Wed Jul 6 13:56:48 2011
@@ -375,9 +375,24 @@
NEW_SPACE_BELOW_AGE_MARK,
CONTAINS_ONLY_DATA,
EVACUATION_CANDIDATE,
+ EVACUATED,
NUM_MEMORY_CHUNK_FLAGS
};
+
+ static const int kPointersToHereAreInterestingMask =
+ 1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const int kPointersFromHereAreInterestingMask =
+ 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const int kEvacuationCandidateMask =
+ 1 << EVACUATION_CANDIDATE;
+
+ static const int kEvacuationCandidateOrNewSpaceMask =
+ (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 <<
IN_TO_SPACE);
+
+
void SetFlag(int flag) {
flags_ |= static_cast<uintptr_t>(1) << flag;
}
@@ -611,15 +626,18 @@
bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
bool IsEvacuationCandidateOrNewSpace() {
- intptr_t mask = (1 << EVACUATION_CANDIDATE) |
- (1 << IN_FROM_SPACE) |
- (1 << IN_TO_SPACE);
- return (flags_ & mask) != 0;
+ return (flags_ & kEvacuationCandidateOrNewSpaceMask) != 0;
}
void MarkEvacuationCandidate() { SetFlag(EVACUATION_CANDIDATE); }
void ClearEvacuationCandidate() { ClearFlag(EVACUATION_CANDIDATE); }
+
+ bool WasEvacuated() { return IsFlagSet(EVACUATED); }
+
+ void MarkEvacuated() { SetFlag(EVACUATED); }
+
+ void ClearEvacuated() { ClearFlag(EVACUATED); }
friend class MemoryAllocator;
};
@@ -1037,6 +1055,9 @@
// space.
class AllocationInfo {
public:
+ AllocationInfo() : top(NULL), limit(NULL) {
+ }
+
Address top; // Current allocation top.
Address limit; // Current allocation limit.
@@ -1230,6 +1251,10 @@
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+ FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+
+ FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
PagedSpace* owner_;
Heap* heap_;
@@ -1424,7 +1449,6 @@
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
-
bool IsFragmented(Page* p) {
intptr_t sizes[4];
@@ -1449,6 +1473,8 @@
return ratio > 15;
}
+
+ void EvictEvacuationCandidatesFromFreeLists();
protected:
// Maximum capacity of this space.
@@ -2181,10 +2207,6 @@
virtual Address PageAllocationLimit(Page* page) {
return page->ObjectAreaEnd();
}
-
- // Prepare for full garbage collection. Resets the relocation pointer
and
- // clears the free list.
- virtual void PrepareForMarkCompact();
public:
TRACK_MEMORY("OldSpace")
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev