Revision: 10052
Author: mstarzin...@chromium.org
Date: Wed Nov 23 04:13:52 2011
Log: Version 3.7.10
Set maximum length of FixedArray in terms of elements instead an absolute
number of bytes. (Chromium issue 103103)
Stability and performance improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=10052
Modified:
/trunk/ChangeLog
/trunk/src/code-stubs.h
/trunk/src/compiler.cc
/trunk/src/d8.gyp
/trunk/src/ia32/code-stubs-ia32.cc
/trunk/src/ia32/full-codegen-ia32.cc
/trunk/src/ia32/lithium-codegen-ia32.cc
/trunk/src/mark-compact.cc
/trunk/src/mips/code-stubs-mips.cc
/trunk/src/mips/code-stubs-mips.h
/trunk/src/mips/frames-mips.h
/trunk/src/mips/full-codegen-mips.cc
/trunk/src/mips/lithium-codegen-mips.cc
/trunk/src/mips/lithium-codegen-mips.h
/trunk/src/mips/lithium-mips.cc
/trunk/src/mips/lithium-mips.h
/trunk/src/mips/macro-assembler-mips.cc
/trunk/src/mips/macro-assembler-mips.h
/trunk/src/objects-inl.h
/trunk/src/objects.h
/trunk/src/platform-posix.cc
/trunk/src/runtime.cc
/trunk/src/runtime.h
/trunk/src/version.cc
/trunk/src/x64/code-stubs-x64.cc
/trunk/src/x64/full-codegen-x64.cc
/trunk/src/x64/lithium-codegen-x64.cc
/trunk/test/cctest/test-lockers.cc
=======================================
--- /trunk/ChangeLog Thu Nov 17 00:34:43 2011
+++ /trunk/ChangeLog Wed Nov 23 04:13:52 2011
@@ -1,6 +1,22 @@
+2011-11-23: Version 3.7.10
+
+ Set maximum length of FixedArray in terms of elements instead an
+ absolute number of bytes.
+ (Chromium issue 103103)
+
+ Stability and performance improvements on all platforms.
+
+
+2011-11-21: Version 3.7.9
+
+ Removed exit-time destructors.
+
+ Stability and performance improvements on all platforms.
+
+
2011-11-17: Version 3.7.8
- Removed hidden prototype from builtins, i.e., deleting an
overridden
+ Removed hidden prototype from builtins, i.e., deleting an
overridden
function on builtins will not make the original function reappear.
Added NetBSD support for scons build.
=======================================
--- /trunk/src/code-stubs.h Mon Nov 21 05:51:57 2011
+++ /trunk/src/code-stubs.h Wed Nov 23 04:13:52 2011
@@ -58,6 +58,7 @@
V(FastNewContext) \
V(FastNewBlockContext) \
V(FastCloneShallowArray) \
+ V(FastCloneShallowObject) \
V(ToBoolean) \
V(ToNumber) \
V(ArgumentsAccess) \
@@ -362,8 +363,8 @@
FastCloneShallowArrayStub(Mode mode, int length)
: mode_(mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
- ASSERT(length_ >= 0);
- ASSERT(length_ <= kMaximumClonedLength);
+ ASSERT_GE(length_, 0);
+ ASSERT_LE(length_, kMaximumClonedLength);
}
void Generate(MacroAssembler* masm);
@@ -380,6 +381,26 @@
};
+class FastCloneShallowObjectStub : public CodeStub {
+ public:
+ // Maximum number of properties in copied object.
+ static const int kMaximumClonedProperties = 6;
+
+ explicit FastCloneShallowObjectStub(int length) : length_(length) {
+ ASSERT_GE(length_, 0);
+ ASSERT_LE(length_, kMaximumClonedProperties);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int length_;
+
+ Major MajorKey() { return FastCloneShallowObject; }
+ int MinorKey() { return length_; }
+};
+
+
class InstanceofStub: public CodeStub {
public:
enum Flags {
=======================================
--- /trunk/src/compiler.cc Thu Nov 17 00:34:43 2011
+++ /trunk/src/compiler.cc Wed Nov 23 04:13:52 2011
@@ -168,7 +168,11 @@
static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed.
- if (!info->AllowOptimize()) info->DisableOptimization();
+ if (!info->AllowOptimize()) {
+ info->DisableOptimization();
+ } else if (info->IsOptimizable()) {
+ info->EnableDeoptimizationSupport();
+ }
// In case we are not optimizing simply return the code from
// the full code generator.
=======================================
--- /trunk/src/d8.gyp Mon Aug 29 03:41:00 2011
+++ /trunk/src/d8.gyp Wed Nov 23 04:13:52 2011
@@ -65,7 +65,7 @@
'sources': [ 'd8-readline.cc' ],
}],
[ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
- or OS=="openbsd" or OS=="solaris")', {
+ or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
=======================================
--- /trunk/src/ia32/code-stubs-ia32.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/ia32/code-stubs-ia32.cc Wed Nov 23 04:13:52 2011
@@ -366,6 +366,52 @@
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
+
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [esp + kPointerSize]: object literal flags.
+ // [esp + (2 * kPointerSize)]: constant properties.
+ // [esp + (3 * kPointerSize)]: literal index.
+ // [esp + (4 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ STATIC_ASSERT(kPointerSize == 4);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Factory* factory = masm->isolate()->factory();
+ __ cmp(ecx, factory->undefined_value());
+ __ j(equal, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ cmp(eax, Immediate(size >> kPointerSizeLog2));
+ __ j(not_equal, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ mov(ebx, FieldOperand(ecx, i));
+ __ mov(FieldOperand(eax, i), ebx);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(4 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
// The stub expects its argument on the stack and returns its result in
tos_:
=======================================
--- /trunk/src/ia32/full-codegen-ia32.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/ia32/full-codegen-ia32.cc Wed Nov 23 04:13:52 2011
@@ -1374,10 +1374,11 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->constant_properties()));
+ __ push(Immediate(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1385,10 +1386,15 @@
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ push(Immediate(Smi::FromInt(flags)));
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count >
FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
=======================================
--- /trunk/src/ia32/lithium-codegen-ia32.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/ia32/lithium-codegen-ia32.cc Wed Nov 23 04:13:52 2011
@@ -4163,11 +4163,14 @@
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(instr->hydrogen()->constant_properties()));
+ __ push(Immediate(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -4176,11 +4179,16 @@
: ObjectLiteral::kNoFlags;
__ push(Immediate(Smi::FromInt(flags)));
- // Pick the right runtime function to call.
+ // Pick the right runtime function or stub to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count >
FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
=======================================
--- /trunk/src/mark-compact.cc Mon Nov 14 05:36:17 2011
+++ /trunk/src/mark-compact.cc Wed Nov 23 04:13:52 2011
@@ -1012,7 +1012,9 @@
MarkBit code_mark =
Marking::MarkBitFrom(function->unchecked_code());
if (code_mark.Get()) {
- shared_info->set_code_age(0);
+ if (!Marking::MarkBitFrom(shared_info).Get()) {
+ shared_info->set_code_age(0);
+ }
return false;
}
@@ -1030,7 +1032,6 @@
MarkBit code_mark =
Marking::MarkBitFrom(shared_info->unchecked_code());
if (code_mark.Get()) {
- shared_info->set_code_age(0);
return false;
}
=======================================
--- /trunk/src/mips/code-stubs-mips.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/mips/code-stubs-mips.cc Wed Nov 23 04:13:52 2011
@@ -3856,7 +3856,7 @@
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
+ Label invoke, handler_entry, exit;
Isolate* isolate = masm->isolate();
// Registers:
@@ -3933,14 +3933,15 @@
__ bind(&cont);
__ push(t0);
- // Call a faked try-block that does the invoke.
- __ bal(&invoke); // bal exposes branch delay slot.
- __ nop(); // Branch delay slot nop.
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+ handler_offset_ = handler_entry.pos();
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in
v0.
@@ -3948,9 +3949,10 @@
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
- // Invoke: Link this frame into the handler chain.
+ // Invoke: Link this frame into the handler chain. There's only one
+ // handler block in this code object, so its index is 0.
__ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER, 0);
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bal(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@@ -5103,7 +5105,7 @@
}
-void CallFunctionStub::FinishCode(Code* code) {
+void CallFunctionStub::FinishCode(Handle<Code> code) {
code->set_has_function_cache(false);
}
=======================================
--- /trunk/src/mips/code-stubs-mips.h Fri Nov 11 04:00:53 2011
+++ /trunk/src/mips/code-stubs-mips.h Wed Nov 23 04:13:52 2011
@@ -137,7 +137,7 @@
return UnaryOpIC::ToState(operand_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_unary_op_type(operand_type_);
}
};
@@ -236,7 +236,7 @@
return BinaryOpIC::ToState(operands_type_);
}
- virtual void FinishCode(Code* code) {
+ virtual void FinishCode(Handle<Code> code) {
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
=======================================
--- /trunk/src/mips/frames-mips.h Thu Oct 13 02:49:59 2011
+++ /trunk/src/mips/frames-mips.h Wed Nov 23 04:13:52 2011
@@ -154,13 +154,13 @@
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kStateOffset = 1 * kPointerSize;
- static const int kContextOffset = 2 * kPointerSize;
- static const int kFPOffset = 3 * kPointerSize;
- static const int kPCOffset = 4 * kPointerSize;
-
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
+
+ static const int kSize = kFPOffset + kPointerSize;
};
=======================================
--- /trunk/src/mips/full-codegen-mips.cc Mon Nov 14 05:36:17 2011
+++ /trunk/src/mips/full-codegen-mips.cc Wed Nov 23 04:13:52 2011
@@ -137,6 +137,8 @@
ASSERT(info_ == NULL);
info_ = info;
scope_ = info->scope();
+ handler_table_ =
+ isolate()->factory()->NewFixedArray(function()->handler_count(),
TENURED);
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
=======================================
--- /trunk/src/mips/lithium-codegen-mips.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/mips/lithium-codegen-mips.cc Wed Nov 23 04:13:52 2011
@@ -1675,6 +1675,32 @@
EmitBranch(true_block, false_block, true_cond, temp2,
Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string) {
+ __ JumpIfSmi(input, is_not_string);
+ __ GetObjectType(input, temp1, temp1);
+
+ return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsString(reg, temp1, false_label);
+
+ EmitBranch(true_block, false_block, true_cond, temp1,
+ Operand(FIRST_NONSTRING_TYPE));
+}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
@@ -1700,6 +1726,40 @@
__ And(at, temp, Operand(1 << Map::kIsUndetectable));
EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ return eq;
+ case Token::LT:
+ return lt;
+ case Token::GT:
+ return gt;
+ case Token::LTE:
+ return le;
+ case Token::GTE:
+ return ge;
+ default:
+ UNREACHABLE();
+ return kNoCondition;
+ }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+ Token::Value op = instr->op();
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+ Handle<Code> ic = CompareIC::GetUninitialized(op);
+ CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+ Condition condition = ComputeCompareCondition(op);
+
+ EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
+}
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
@@ -2000,26 +2060,6 @@
// restore all registers.
__ StoreToSafepointRegisterSlot(result, result);
}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- return eq;
- case Token::LT:
- return lt;
- case Token::GT:
- return gt;
- case Token::LTE:
- return le;
- case Token::GTE:
- return ge;
- default:
- UNREACHABLE();
- return kNoCondition;
- }
-}
void LCodeGen::DoCmpT(LCmpT* instr) {
=======================================
--- /trunk/src/mips/lithium-codegen-mips.h Mon Nov 21 05:51:57 2011
+++ /trunk/src/mips/lithium-codegen-mips.h Wed Nov 23 04:13:52 2011
@@ -299,6 +299,13 @@
Label* is_not_object,
Label* is_object);
+ // Emits optimized code for %_IsString(x). Preserves input register.
+ // Returns the condition on which a final split to
+ // true and false label should be made, to optimize fallthrough.
+ Condition EmitIsString(Register input,
+ Register temp1,
+ Label* is_not_string);
+
// Emits optimized code for %_IsConstructCall().
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
=======================================
--- /trunk/src/mips/lithium-mips.cc Thu Nov 10 03:38:15 2011
+++ /trunk/src/mips/lithium-mips.cc Wed Nov 23 04:13:52 2011
@@ -226,6 +226,13 @@
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if is_string(");
+ InputAt(0)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
@@ -240,6 +247,14 @@
InputAt(0)->PrintTo(stream);
stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+ stream->Add("if string_compare(");
+ InputAt(0)->PrintTo(stream);
+ InputAt(1)->PrintTo(stream);
+ stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
@@ -1450,6 +1465,13 @@
LOperand* temp = TempRegister();
return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch*
instr) {
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* temp = TempRegister();
+ return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
@@ -1464,6 +1486,17 @@
return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
TempRegister());
}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+ HStringCompareAndBranch* instr) {
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ LOperand* left = UseFixed(instr->left(), a1);
+ LOperand* right = UseFixed(instr->right(), a0);
+ LStringCompareAndBranch* result = new LStringCompareAndBranch(left,
right);
+ return MarkAsCall(result, instr);
+}
LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
=======================================
--- /trunk/src/mips/lithium-mips.h Thu Nov 10 03:38:15 2011
+++ /trunk/src/mips/lithium-mips.h Wed Nov 23 04:13:52 2011
@@ -109,8 +109,10 @@
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
V(IsObjectAndBranch) \
+ V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
+ V(StringCompareAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
@@ -658,6 +660,20 @@
};
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+ LIsStringAndBranch(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -686,6 +702,23 @@
};
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+ LStringCompareAndBranch(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+ "string-compare-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+ Token::Value op() const { return hydrogen()->token(); }
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
=======================================
--- /trunk/src/mips/macro-assembler-mips.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/mips/macro-assembler-mips.cc Wed Nov 23 04:13:52 2011
@@ -2544,60 +2544,50 @@
// Exception handling.
void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type) {
+ HandlerType type,
+ int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // The return address is passed in register ra.
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // For the JSEntry handler, we must preserve a0-a3 and s0.
+ // t1-t3 are available. We will build up the handler from the bottom by
+ // pushing on the stack. First compute the state.
+ unsigned state = StackHandler::OffsetField::encode(handler_index);
if (try_location == IN_JAVASCRIPT) {
- if (type == TRY_CATCH_HANDLER) {
- li(t0, Operand(StackHandler::TRY_CATCH));
- } else {
- li(t0, Operand(StackHandler::TRY_FINALLY));
- }
- // Save the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
- lw(t1, MemOperand(t2));
-
- addiu(sp, sp, -StackHandlerConstants::kSize);
- sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
- sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
- sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
- sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- // Link this handler as the new current one.
- sw(sp, MemOperand(t2));
-
+ state |= (type == TRY_CATCH_HANDLER)
+ ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
+ : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
} else {
- // Must preserve a0-a3, and s0 (argv).
ASSERT(try_location == IN_JS_ENTRY);
- // The frame pointer does not point to a JS frame so we save NULL
- // for fp. We expect the code throwing an exception to check fp
- // before dereferencing it to restore the context.
- li(t0, Operand(StackHandler::ENTRY));
-
- // Save the current handler as the next handler.
- li(t2, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
- lw(t1, MemOperand(t2));
-
- ASSERT(Smi::FromInt(0) == 0); // Used for no context.
-
- addiu(sp, sp, -StackHandlerConstants::kSize);
- sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
- sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
- sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
- sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
- sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- // Link this handler as the new current one.
- sw(sp, MemOperand(t2));
- }
+ state |= StackHandler::KindField::encode(StackHandler::ENTRY);
+ }
+
+ // Set up the code object (t1) and the state (t2) for pushing.
+ li(t1, Operand(CodeObject()));
+ li(t2, Operand(state));
+
+ // Push the frame pointer, context, state, and code object.
+ if (try_location == IN_JAVASCRIPT) {
+ MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
+ } else {
+ ASSERT_EQ(Smi::FromInt(0), 0);
+ // The second zero_reg indicates no context.
+ // The first zero_reg is the NULL frame pointer.
+ // The operands are reversed to match the order of MultiPush/Pop.
+ Push(zero_reg, zero_reg, t2, t1);
+ }
+
+ // Link the current handler as the next handler.
+ li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+ lw(t1, MemOperand(t2));
+ push(t1);
+ // Set this new handler as the current one.
+ sw(sp, MemOperand(t2));
}
@@ -2610,19 +2600,36 @@
}
-void MacroAssembler::Throw(Register value) {
- // v0 is expected to hold the exception.
- Move(v0, value);
-
+void MacroAssembler::JumpToHandlerEntry() {
+ // Compute the handler entry address and jump to it. The handler table
is
+ // a fixed array of (smi-tagged) code offsets.
+ // v0 = exception, a1 = code object, a2 = state.
+ lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler
table.
+ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ srl(a2, a2, StackHandler::kKindWidth); // Handler index.
+ sll(a2, a2, kPointerSizeLog2);
+ Addu(a2, a3, a2);
+ lw(a2, MemOperand(a2)); // Smi-tagged offset.
+ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code
start.
+ sra(t9, a2, kSmiTagSize);
+ Addu(t9, t9, a1);
+ Jump(t9); // Jump.
+}
+
+
+void MacroAssembler::Throw(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
-
- // Drop the sp to the top of the handler.
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+ // The exception is expected in v0.
+ Move(v0, value);
+
+ // Drop the stack pointer to the top of the top handler.
li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
lw(sp, MemOperand(a3));
@@ -2631,44 +2638,19 @@
pop(a2);
sw(a2, MemOperand(a3));
- // Restore context and frame pointer, discard state (a3).
- MultiPop(a3.bit() | cp.bit() | fp.bit());
+ // Get the code object (a1) and state (a2). Restore the context and
frame
+ // pointer.
+ MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
// If the handler is a JS frame, restore the context to the frame.
- // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
- // of them.
+ // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+ // or cp.
Label done;
- Branch(&done, eq, fp, Operand(zero_reg));
+ Branch(&done, eq, cp, Operand(zero_reg));
sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
bind(&done);
-#ifdef DEBUG
- // When emitting debug_code, set ra as return address for the jump.
- // 5 instructions: add: 1, pop: 2, jump: 2.
- const int kOffsetRaInstructions = 5;
- Label find_ra;
-
- if (emit_debug_code()) {
- // Compute ra for the Jump(t9).
- const int kOffsetRaBytes = kOffsetRaInstructions *
Assembler::kInstrSize;
-
- // This branch-and-link sequence is needed to get the current PC on
mips,
- // saved to the ra register. Then adjusted for instruction count.
- bal(&find_ra); // bal exposes branch-delay.
- nop(); // Branch delay slot nop.
- bind(&find_ra);
- addiu(ra, ra, kOffsetRaBytes);
- }
-#endif
-
- pop(t9); // 2 instructions: lw, add sp.
- Jump(t9); // 2 instructions: jr, nop (in delay slot).
-
- if (emit_debug_code()) {
- // Make sure that the expected number of instructions were generated.
- ASSERT_EQ(kOffsetRaInstructions,
- InstructionsGeneratedSince(&find_ra));
- }
+ JumpToHandlerEntry();
}
@@ -2677,10 +2659,10 @@
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in v0.
if (type == OUT_OF_MEMORY) {
@@ -2705,26 +2687,27 @@
li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
lw(sp, MemOperand(a3));
- // Unwind the handlers until the top ENTRY handler is found.
+ // Unwind the handlers until the ENTRY handler is found.
Label fetch_next, check_kind;
jmp(&check_kind);
bind(&fetch_next);
lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
+ STATIC_ASSERT(StackHandler::ENTRY == 0);
lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- Branch(&fetch_next, ne, a2, Operand(StackHandler::ENTRY));
+ And(a2, a2, Operand(StackHandler::KindField::kMask));
+ Branch(&fetch_next, ne, a2, Operand(zero_reg));
// Set the top handler address to next handler past the top ENTRY
handler.
pop(a2);
sw(a2, MemOperand(a3));
- // Clear the context and frame pointer (0 was saved in the handler), and
- // discard the state (a2).
- MultiPop(a2.bit() | cp.bit() | fp.bit());
-
- pop(t9); // 2 instructions: lw, add sp.
- Jump(t9); // 2 instructions: jr, nop (in delay slot).
+ // Get the code object (a1) and state (a2). Clear the context and frame
+ // pointer (0 was saved in the handler).
+ MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+
+ JumpToHandlerEntry();
}
=======================================
--- /trunk/src/mips/macro-assembler-mips.h Mon Nov 7 02:14:12 2011
+++ /trunk/src/mips/macro-assembler-mips.h Wed Nov 23 04:13:52 2011
@@ -843,9 +843,9 @@
// Exception handling.
// Push a new try handler and link into try handler chain.
- // The return address must be passed in register ra.
- // Clobber t0, t1, t2.
- void PushTryHandler(CodeLocation try_location, HandlerType type);
+ void PushTryHandler(CodeLocation try_location,
+ HandlerType type,
+ int handler_index);
// Unlink the stack handler on top of the stack from the try handler
chain.
// Must preserve the result register.
@@ -1381,6 +1381,10 @@
Register bitmap_reg,
Register mask_reg);
+ // Helper for throwing exceptions. Compute a handler address and jump to
+ // it. See the implementation for register usage.
+ void JumpToHandlerEntry();
+
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
=======================================
--- /trunk/src/objects-inl.h Mon Nov 21 05:51:57 2011
+++ /trunk/src/objects-inl.h Wed Nov 23 04:13:52 2011
@@ -3667,8 +3667,8 @@
void SharedFunctionInfo::set_code_age(int code_age) {
- set_compiler_hints(compiler_hints() |
- ((code_age & kCodeAgeMask) << kCodeAgeShift));
+ int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
+ set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
}
=======================================
--- /trunk/src/objects.h Mon Nov 21 05:51:57 2011
+++ /trunk/src/objects.h Wed Nov 23 04:13:52 2011
@@ -2098,7 +2098,7 @@
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
// consumption.
- static const int kMaxSize = 512 * MB;
+ static const int kMaxSize = 128 * MB * kPointerSize;
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
@@ -3339,9 +3339,6 @@
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
- // Maximal size of a single FreeSpace.
- static const int kMaxSize = 512 * MB;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
};
=======================================
--- /trunk/src/platform-posix.cc Thu Nov 10 03:38:15 2011
+++ /trunk/src/platform-posix.cc Wed Nov 23 04:13:52 2011
@@ -46,7 +46,7 @@
#undef MAP_TYPE
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
#include <android/log.h>
#endif
=======================================
--- /trunk/src/runtime.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/runtime.cc Wed Nov 23 04:13:52 2011
@@ -253,18 +253,6 @@
}
return copy;
}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
- CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return DeepCopyBoilerplate(isolate, boilerplate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
- CONVERT_CHECKED(JSObject, boilerplate, args[0]);
- return isolate->heap()->CopyJSObject(boilerplate);
-}
static Handle<Map> ComputeObjectLiteralMap(
@@ -543,28 +531,6 @@
return Handle<Object>::null();
}
}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
- // Takes a FixedArray of elements containing the literal elements of
- // the array literal and produces JSArray with those elements.
- // Additionally takes the literals array of the surrounding function
- // which contains the context from which to get the Array function
- // to use for creating the array literal.
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_CHECKED(FixedArray, literals, 0);
- CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_CHECKED(FixedArray, elements, 2);
-
- Handle<Object> object =
- CreateArrayLiteralBoilerplate(isolate, literals, elements);
- if (object.is_null()) return Failure::Exception();
-
- // Update the functions literal and return the boilerplate.
- literals->set(literals_index, *object);
- return *object;
-}
RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
=======================================
--- /trunk/src/runtime.h Thu Nov 17 00:34:43 2011
+++ /trunk/src/runtime.h Wed Nov 23 04:13:52 2011
@@ -278,9 +278,6 @@
\
/* Literals */ \
F(MaterializeRegExpLiteral, 4, 1)\
- F(CreateArrayLiteralBoilerplate, 4, 1) \
- F(CloneLiteralBoilerplate, 1, 1) \
- F(CloneShallowLiteralBoilerplate, 1, 1) \
F(CreateObjectLiteral, 4, 1) \
F(CreateObjectLiteralShallow, 4, 1) \
F(CreateArrayLiteral, 3, 1) \
=======================================
--- /trunk/src/version.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/version.cc Wed Nov 23 04:13:52 2011
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 7
-#define BUILD_NUMBER 9
+#define BUILD_NUMBER 10
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/src/x64/code-stubs-x64.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/x64/code-stubs-x64.cc Wed Nov 23 04:13:52 2011
@@ -352,6 +352,49 @@
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
+
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [rsp + kPointerSize]: object literal flags.
+ // [rsp + (2 * kPointerSize)]: constant properties.
+ // [rsp + (3 * kPointerSize)]: literal index.
+ // [rsp + (4 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+ __ movq(rax, Operand(rsp, 3 * kPointerSize));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rcx,
+ FieldOperand(rcx, index.reg, index.scale,
FixedArray::kHeaderSize));
+ __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case);
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
+ __ j(not_equal, &slow_case);
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ movq(rbx, FieldOperand(rcx, i));
+ __ movq(FieldOperand(rax, i), rbx);
+ }
+
+ // Return and remove the on-stack parameters.
+ __ ret(4 * kPointerSize);
+
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+}
// The stub expects its argument on the stack and returns its result in
tos_:
=======================================
--- /trunk/src/x64/full-codegen-x64.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/x64/full-codegen-x64.cc Wed Nov 23 04:13:52 2011
@@ -1376,10 +1376,11 @@
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->constant_properties());
+ __ Push(constant_properties);
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1387,10 +1388,15 @@
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ Push(Smi::FromInt(flags));
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count >
FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
=======================================
--- /trunk/src/x64/lithium-codegen-x64.cc Mon Nov 21 05:51:57 2011
+++ /trunk/src/x64/lithium-codegen-x64.cc Wed Nov 23 04:13:52 2011
@@ -3927,18 +3927,32 @@
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
// Setup the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_properties());
- __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
+ __ Push(constant_properties);
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= instr->hydrogen()->has_function()
+ ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ Push(Smi::FromInt(flags));
// Pick the right runtime function to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count >
FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
=======================================
--- /trunk/test/cctest/test-lockers.cc Thu Nov 17 00:34:43 2011
+++ /trunk/test/cctest/test-lockers.cc Wed Nov 23 04:13:52 2011
@@ -204,7 +204,11 @@
// Run many threads all locking on the same isolate
TEST(IsolateLockingStress) {
+#ifdef V8_TARGET_ARCH_MIPS
+ const int kNThreads = 50;
+#else
const int kNThreads = 100;
+#endif
i::List<JoinableThread*> threads(kNThreads);
v8::Isolate* isolate = v8::Isolate::New();
for (int i = 0; i < kNThreads; i++) {
@@ -237,7 +241,7 @@
// Run many threads each accessing its own isolate without locking
TEST(MultithreadedParallelIsolates) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
const int kNThreads = 10;
#else
const int kNThreads = 50;
@@ -275,7 +279,11 @@
// Run many threads with nested locks
TEST(IsolateNestedLocking) {
+#ifdef V8_TARGET_ARCH_MIPS
+ const int kNThreads = 50;
+#else
const int kNThreads = 100;
+#endif
v8::Isolate* isolate = v8::Isolate::New();
i::List<JoinableThread*> threads(kNThreads);
for (int i = 0; i < kNThreads; i++) {
@@ -311,7 +319,7 @@
// Run parallel threads that lock and access different isolates in parallel
TEST(SeparateIsolatesLocksNonexclusive) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
const int kNThreads = 50;
#else
const int kNThreads = 100;
@@ -385,7 +393,7 @@
// Use unlocker inside of a Locker, multiple threads.
TEST(LockerUnlocker) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
const int kNThreads = 50;
#else
const int kNThreads = 100;
@@ -438,7 +446,7 @@
// Use Unlocker inside two Lockers.
TEST(LockTwiceAndUnlock) {
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
const int kNThreads = 50;
#else
const int kNThreads = 100;
@@ -559,7 +567,11 @@
// Locker inside an Unlocker inside a Locker.
TEST(LockUnlockLockMultithreaded) {
+#ifdef V8_TARGET_ARCH_MIPS
+ const int kNThreads = 50;
+#else
const int kNThreads = 100;
+#endif
v8::Isolate* isolate = v8::Isolate::New();
Persistent<v8::Context> context;
{
@@ -606,7 +618,11 @@
// Locker inside an Unlocker inside a Locker for default isolate.
TEST(LockUnlockLockDefaultIsolateMultithreaded) {
+#ifdef V8_TARGET_ARCH_MIPS
+ const int kNThreads = 50;
+#else
const int kNThreads = 100;
+#endif
Persistent<v8::Context> context;
{
v8::Locker locker_;
@@ -674,7 +690,11 @@
// Test installing extensions in separate isolates concurrently.
// http://code.google.com/p/v8/issues/detail?id=1821
TEST(ExtensionsRegistration) {
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+ const int kNThreads = 10;
+#else
const int kNThreads = 40;
+#endif
v8::RegisterExtension(new v8::Extension("test0",
kSimpleExtensionSource));
v8::RegisterExtension(new v8::Extension("test1",
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev