Revision: 22798
Author: [email protected]
Date: Mon Aug 4 06:49:33 2014 UTC
Log: Version 3.28.54 (based on bleeding_edge revision r22797)
Performance and stability improvements on all platforms.
http://code.google.com/p/v8/source/detail?r=22798
Modified:
/trunk/ChangeLog
/trunk/src/compiler/arm/code-generator-arm.cc
/trunk/src/compiler/arm/instruction-selector-arm.cc
/trunk/src/compiler/arm64/code-generator-arm64.cc
/trunk/src/compiler/arm64/instruction-selector-arm64.cc
/trunk/src/compiler/arm64/linkage-arm64.cc
/trunk/src/compiler/code-generator-impl.h
/trunk/src/compiler/ia32/code-generator-ia32.cc
/trunk/src/compiler/ia32/instruction-selector-ia32.cc
/trunk/src/compiler/instruction-codes.h
/trunk/src/compiler/instruction-selector-impl.h
/trunk/src/compiler/instruction-selector.cc
/trunk/src/compiler/instruction-selector.h
/trunk/src/compiler/instruction.cc
/trunk/src/compiler/js-generic-lowering.cc
/trunk/src/compiler/machine-node-factory.h
/trunk/src/compiler/machine-operator.h
/trunk/src/compiler/node.cc
/trunk/src/compiler/node.h
/trunk/src/compiler/opcodes.h
/trunk/src/compiler/pipeline.cc
/trunk/src/compiler/pipeline.h
/trunk/src/compiler/register-allocator.cc
/trunk/src/compiler/representation-change.h
/trunk/src/compiler/simplified-lowering.cc
/trunk/src/compiler/x64/code-generator-x64.cc
/trunk/src/compiler/x64/instruction-selector-x64.cc
/trunk/src/compiler.cc
/trunk/src/runtime.cc
/trunk/src/runtime.h
/trunk/src/version.cc
/trunk/test/cctest/compiler/test-instruction-selector-arm.cc
/trunk/test/cctest/compiler/test-machine-operator-reducer.cc
/trunk/test/cctest/compiler/test-representation-change.cc
/trunk/test/cctest/compiler/test-run-machops.cc
/trunk/test/mjsunit/mjsunit.status
/trunk/test/mozilla/mozilla.status
/trunk/test/test262/test262.status
/trunk/test/webkit/webkit.status
/trunk/tools/generate-runtime-tests.py
/trunk/tools/run-tests.py
/trunk/tools/whitespace.txt
=======================================
--- /trunk/ChangeLog Fri Aug 1 10:40:37 2014 UTC
+++ /trunk/ChangeLog Mon Aug 4 06:49:33 2014 UTC
@@ -1,3 +1,8 @@
+2014-08-04: Version 3.28.54
+
+ Performance and stability improvements on all platforms.
+
+
2014-08-01: Version 3.28.53
Performance and stability improvements on all platforms.
=======================================
--- /trunk/src/compiler/arm/code-generator-arm.cc Thu Jul 31 18:45:14 2014
UTC
+++ /trunk/src/compiler/arm/code-generator-arm.cc Mon Aug 4 06:49:33 2014
UTC
@@ -501,6 +501,12 @@
case kUnsignedGreaterThan:
__ b(hi, tlabel);
break;
+ case kOverflow:
+ __ b(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ b(vc, tlabel);
+ break;
}
if (!fallthru) __ b(flabel); // no fallthru to flabel.
__ bind(&done);
@@ -513,9 +519,11 @@
ArmOperandConverter i(this, instr);
Label done;
- // Materialize a full 32-bit 1 or 0 value.
+ // Materialize a full 32-bit 1 or 0 value. The result register is always
the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
switch (condition) {
case kUnorderedEqual:
@@ -578,6 +586,12 @@
case kUnsignedGreaterThan:
cc = hi;
break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
}
__ bind(&check);
__ mov(reg, Operand(0));
=======================================
--- /trunk/src/compiler/arm/instruction-selector-arm.cc Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/src/compiler/arm/instruction-selector-arm.cc Mon Aug 4 06:49:33
2014 UTC
@@ -215,6 +215,18 @@
}
return true;
}
+
+
+static inline bool TryMatchShift(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node*
node,
+ InstructionOperand** value_return,
+ InstructionOperand** shift_return) {
+ return (
+ TryMatchASR(selector, opcode_return, node, value_return,
shift_return) ||
+ TryMatchLSL(selector, opcode_return, node, value_return,
shift_return) ||
+ TryMatchLSR(selector, opcode_return, node, value_return,
shift_return) ||
+ TryMatchROR(selector, opcode_return, node, value_return,
shift_return));
+}
static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
@@ -229,10 +241,7 @@
*input_count_return = 1;
return true;
}
- if (TryMatchASR(selector, opcode_return, node, &inputs[0], &inputs[1]) ||
- TryMatchLSL(selector, opcode_return, node, &inputs[0], &inputs[1]) ||
- TryMatchLSR(selector, opcode_return, node, &inputs[0], &inputs[1]) ||
- TryMatchROR(selector, opcode_return, node, &inputs[0], &inputs[1])) {
+ if (TryMatchShift(selector, opcode_return, node, &inputs[0],
&inputs[1])) {
*input_count_return = 2;
return true;
}
@@ -247,6 +256,8 @@
Int32BinopMatcher m(node);
InstructionOperand* inputs[3];
size_t input_count = 0;
+ InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
+ const size_t output_count = ARRAY_SIZE(outputs);
if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
&input_count, &inputs[1])) {
@@ -268,8 +279,54 @@
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
- InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
- const size_t output_count = ARRAY_SIZE(outputs);
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
+
+
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
+ InstructionCode opcode,
+ InstructionCode reverse_opcode) {
+ ArmOperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[3];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
+ m.left().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ opcode |= AddressingModeField::encode(kMode_Operand2_R);
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ }
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineAsRegister(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ outputs[output_count++] = g.DefineAsRegister(projections[1]);
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+ ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
+
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -377,23 +434,16 @@
Node* left, Node* right) {
ArmOperandGenerator g(selector);
InstructionCode opcode = kArmBic;
- InstructionOperand* inputs[3];
- size_t input_count = 0;
- InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
- const size_t output_count = ARRAY_SIZE(outputs);
-
- inputs[input_count++] = g.UseRegister(left);
- if (!TryMatchImmediateOrShift(selector, &opcode, right, &input_count,
- &inputs[input_count])) {
- opcode |= AddressingModeField::encode(kMode_Operand2_R);
- inputs[input_count++] = g.UseRegister(right);
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchShift(selector, &opcode, right, &value_operand,
&shift_operand)) {
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+ value_operand, shift_operand);
+ return;
}
-
- ASSERT_NE(0, input_count);
- ASSERT_GE(ARRAY_SIZE(inputs), input_count);
- ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
-
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
}
@@ -464,11 +514,19 @@
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
if (m.right().Is(-1)) {
- Emit(kArmMvn | AddressingModeField::encode(kMode_Operand2_R),
- g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
- } else {
- VisitBinop(this, node, kArmEor, kArmEor);
+ InstructionCode opcode = kArmMvn;
+ InstructionOperand* value_operand;
+ InstructionOperand* shift_operand;
+ if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
+ &shift_operand)) {
+ Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
+ return;
+ }
+ Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+ return;
}
+ VisitBinop(this, node, kArmEor, kArmEor);
}
@@ -537,6 +595,11 @@
}
VisitBinop(this, node, kArmAdd, kArmAdd);
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kArmAdd, kArmAdd);
+}
void InstructionSelector::VisitInt32Sub(Node* node) {
@@ -551,6 +614,11 @@
}
VisitBinop(this, node, kArmSub, kArmRsb);
}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kArmSub, kArmRsb);
+}
void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -652,28 +720,28 @@
}
-void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtF64S32, g.DefineAsDoubleRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertUint32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtF64U32, g.DefineAsDoubleRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
g.UseDoubleRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertFloat64ToUint32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
g.UseDoubleRegister(node->InputAt(0)));
=======================================
--- /trunk/src/compiler/arm64/code-generator-arm64.cc Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/src/compiler/arm64/code-generator-arm64.cc Mon Aug 4 06:49:33
2014 UTC
@@ -15,6 +15,8 @@
namespace internal {
namespace compiler {
+#if V8_TURBOFAN_TARGET
+
#define __ masm()->
@@ -129,8 +131,8 @@
// Assembles an instruction after register allocation, producing machine
code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
-
- switch (ArchOpcodeField::decode(instr->opcode())) {
+ InstructionCode opcode = instr->opcode();
+ switch (ArchOpcodeField::decode(opcode)) {
case kArchJmp:
__ B(code_->GetLabel(i.InputBlock(0)));
break;
@@ -153,7 +155,12 @@
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kArm64Add32:
- __ Add(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+ i.InputOperand32(1));
+ } else {
+ __ Add(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ }
break;
case kArm64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
@@ -238,7 +245,12 @@
__ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kArm64Sub32:
- __ Sub(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ if (FlagsModeField::decode(opcode) != kFlags_none) {
+ __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+ i.InputOperand32(1));
+ } else {
+ __ Sub(i.OutputRegister32(), i.InputRegister32(0),
i.InputOperand32(1));
+ }
break;
case kArm64Shl:
ASSEMBLE_SHIFT(Lsl, 64);
@@ -507,6 +519,12 @@
case kUnsignedGreaterThan:
__ B(hi, tlabel);
break;
+ case kOverflow:
+ __ B(vs, tlabel);
+ break;
+ case kNotOverflow:
+ __ B(vc, tlabel);
+ break;
}
if (!fallthru) __ B(flabel); // no fallthru to flabel.
__ Bind(&done);
@@ -519,9 +537,11 @@
Arm64OperandConverter i(this, instr);
Label done;
- // Materialize a full 64-bit 1 or 0 value.
+ // Materialize a full 64-bit 1 or 0 value. The result register is always
the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = nv;
switch (condition) {
case kUnorderedEqual:
@@ -584,6 +604,12 @@
case kUnsignedGreaterThan:
cc = hi;
break;
+ case kOverflow:
+ cc = vs;
+ break;
+ case kNotOverflow:
+ cc = vc;
+ break;
}
__ bind(&check);
__ Cset(reg, cc);
@@ -826,6 +852,8 @@
#endif // DEBUG
+#endif // V8_TURBOFAN_TARGET
+
} // namespace compiler
} // namespace internal
} // namespace v8
=======================================
--- /trunk/src/compiler/arm64/instruction-selector-arm64.cc Thu Jul 31
18:45:14 2014 UTC
+++ /trunk/src/compiler/arm64/instruction-selector-arm64.cc Mon Aug 4
06:49:33 2014 UTC
@@ -9,6 +9,8 @@
namespace internal {
namespace compiler {
+#if V8_TURBOFAN_TARGET
+
enum ImmediateMode {
kArithimeticImm, // 12 bit unsigned immediate shifted left 0 or 12 bits
kShift32Imm, // 0 - 31
@@ -112,6 +114,39 @@
bool commutative) {
VisitRRO(selector, opcode, node, operand_mode);
}
+
+
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
+ InstructionCode opcode) {
+ Arm64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[2];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineAsRegister(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ outputs[output_count++] = g.DefineAsRegister(projections[1]);
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
void InstructionSelector::VisitLoad(Node* node) {
@@ -297,6 +332,11 @@
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kArm64Add32);
+}
void InstructionSelector::VisitInt64Add(Node* node) {
@@ -321,6 +361,11 @@
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kArm64Sub32);
+}
void InstructionSelector::VisitInt64Sub(Node* node) {
@@ -388,28 +433,28 @@
}
-void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Int32ToFloat64, g.DefineAsDoubleRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertUint32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Uint32ToFloat64, g.DefineAsDoubleRegister(node),
g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
g.UseDoubleRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertFloat64ToUint32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
Arm64OperandGenerator g(this);
Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
g.UseDoubleRegister(node->InputAt(0)));
@@ -614,6 +659,8 @@
Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
}
}
+
+#endif // V8_TURBOFAN_TARGET
} // namespace compiler
} // namespace internal
=======================================
--- /trunk/src/compiler/arm64/linkage-arm64.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/arm64/linkage-arm64.cc Mon Aug 4 06:49:33 2014 UTC
@@ -14,6 +14,8 @@
namespace internal {
namespace compiler {
+#if V8_TURBOFAN_TARGET
+
struct LinkageHelperTraits {
static Register ReturnValueReg() { return x0; }
static Register ReturnValue2Reg() { return x1; }
@@ -61,6 +63,8 @@
return LinkageHelper::GetSimplifiedCDescriptor<LinkageHelperTraits>(
zone, num_params, return_type, param_types);
}
+
+#endif // V8_TURBOFAN_TARGET
}
}
} // namespace v8::internal::compiler
=======================================
--- /trunk/src/compiler/code-generator-impl.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/code-generator-impl.h Mon Aug 4 06:49:33 2014 UTC
@@ -72,7 +72,9 @@
return gen_->schedule()->GetBlockById(block_id);
}
- Register OutputRegister() { return ToRegister(instr_->Output()); }
+ Register OutputRegister(int index = 0) {
+ return ToRegister(instr_->OutputAt(index));
+ }
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
=======================================
--- /trunk/src/compiler/ia32/code-generator-ia32.cc Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/src/compiler/ia32/code-generator-ia32.cc Mon Aug 4 06:49:33
2014 UTC
@@ -464,6 +464,12 @@
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
}
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to
flabel.
__ bind(&done);
@@ -476,9 +482,11 @@
IA32OperandConverter i(this, instr);
Label done;
- // Materialize a full 32-bit 1 or 0 value.
+ // Materialize a full 32-bit 1 or 0 value. The result register is always
the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
@@ -541,6 +549,12 @@
case kUnsignedGreaterThan:
cc = above;
break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
}
__ bind(&check);
if (reg.is_byte_register()) {
=======================================
--- /trunk/src/compiler/ia32/instruction-selector-ia32.cc Thu Jul 31
18:45:14 2014 UTC
+++ /trunk/src/compiler/ia32/instruction-selector-ia32.cc Mon Aug 4
06:49:33 2014 UTC
@@ -160,25 +160,64 @@
// Shared routine for multiple binary operations.
-static inline void VisitBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
+ Int32BinopMatcher m(node);
// TODO(turbofan): match complex addressing modes.
// TODO(turbofan): if commutative, pick the non-live-in operand as the
left as
// this might be the last use and therefore its register can be reused.
- if (g.CanBeImmediate(right)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
- g.UseImmediate(right));
- } else if (g.CanBeImmediate(left) &&
- node->op()->HasProperty(Operator::kCommutative)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
- g.UseImmediate(left));
+ if (g.CanBeImmediate(m.right().node())) {
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
g.Use(m.left().node()),
+ g.UseImmediate(m.right().node()));
} else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
g.Use(m.right().node()));
+ }
+}
+
+
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[2];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the
left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(m.right().node())) {
+ inputs[input_count++] = g.Use(m.left().node());
+ inputs[input_count++] = g.UseImmediate(m.right().node());
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.Use(m.right().node());
+ }
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ // TODO(turbofan): Use byte register here.
+ outputs[output_count++] =
+ (projections[0] ? g.DefineAsRegister(projections[1])
+ : g.DefineSameAsFirst(projections[1]));
}
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -246,6 +285,11 @@
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kIA32Add);
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kIA32Add);
+}
void InstructionSelector::VisitInt32Sub(Node* node) {
@@ -257,6 +301,11 @@
VisitBinop(this, node, kIA32Sub);
}
}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kIA32Sub);
+}
void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -319,14 +368,14 @@
}
-void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
g.Use(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertUint32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
// TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node),
@@ -334,13 +383,13 @@
}
-void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertFloat64ToUint32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
IA32OperandGenerator g(this);
// TODO(turbofan): IA32 SSE subsd() should take an operand.
Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node),
=======================================
--- /trunk/src/compiler/instruction-codes.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/instruction-codes.h Mon Aug 4 06:49:33 2014 UTC
@@ -87,7 +87,9 @@
kUnorderedLessThan,
kUnorderedGreaterThanOrEqual,
kUnorderedLessThanOrEqual,
- kUnorderedGreaterThan
+ kUnorderedGreaterThan,
+ kOverflow,
+ kNotOverflow
};
OStream& operator<<(OStream& os, const FlagsCondition& fc);
@@ -105,7 +107,7 @@
typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
typedef BitField<AddressingMode, 7, 4> AddressingModeField;
typedef BitField<FlagsMode, 11, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 13, 4> FlagsConditionField;
+typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
typedef BitField<int, 13, 19> MiscField;
} // namespace compiler
=======================================
--- /trunk/src/compiler/instruction-selector-impl.h Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/src/compiler/instruction-selector-impl.h Mon Aug 4 06:49:33
2014 UTC
@@ -256,6 +256,8 @@
switch (condition_) {
case kEqual:
case kNotEqual:
+ case kOverflow:
+ case kNotOverflow:
return;
case kSignedLessThan:
condition_ = kSignedGreaterThan;
=======================================
--- /trunk/src/compiler/instruction-selector.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/instruction-selector.cc Mon Aug 4 06:49:33 2014 UTC
@@ -252,17 +252,8 @@
if (buffer->descriptor->ReturnCount() == 1) {
buffer->output_nodes[0] = call;
} else {
- // Iterate over all uses of {call} and collect the projections into
the
- // {result} buffer.
- for (UseIter i = call->uses().begin(); i != call->uses().end(); ++i)
{
- if ((*i)->opcode() == IrOpcode::kProjection) {
- int index = OpParameter<int32_t>(*i);
- ASSERT_GE(index, 0);
- ASSERT_LT(index, buffer->descriptor->ReturnCount());
- ASSERT_EQ(NULL, buffer->output_nodes[index]);
- buffer->output_nodes[index] = *i;
- }
- }
+ call->CollectProjections(buffer->descriptor->ReturnCount(),
+ buffer->output_nodes);
}
// Filter out the outputs that aren't live because no projection uses
them.
@@ -447,13 +438,10 @@
case IrOpcode::kIfFalse:
case IrOpcode::kEffectPhi:
case IrOpcode::kMerge:
- case IrOpcode::kProjection:
case IrOpcode::kLazyDeoptimization:
case IrOpcode::kContinuation:
// No code needed for these graph artifacts.
return;
- case IrOpcode::kPhi:
- return VisitPhi(node);
case IrOpcode::kParameter: {
int index = OpParameter<int>(node);
MachineRepresentation rep = linkage()
@@ -463,6 +451,10 @@
MarkAsRepresentation(rep, node);
return VisitParameter(node);
}
+ case IrOpcode::kPhi:
+ return VisitPhi(node);
+ case IrOpcode::kProjection:
+ return VisitProjection(node);
case IrOpcode::kInt32Constant:
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
@@ -515,8 +507,12 @@
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
return VisitInt32Add(node);
+ case IrOpcode::kInt32AddWithOverflow:
+ return VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
return VisitInt32Sub(node);
+ case IrOpcode::kInt32SubWithOverflow:
+ return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
return VisitInt32Mul(node);
case IrOpcode::kInt32Div:
@@ -557,14 +553,14 @@
return VisitConvertInt32ToInt64(node);
case IrOpcode::kConvertInt64ToInt32:
return VisitConvertInt64ToInt32(node);
- case IrOpcode::kConvertInt32ToFloat64:
- return MarkAsDouble(node), VisitConvertInt32ToFloat64(node);
- case IrOpcode::kConvertUint32ToFloat64:
- return MarkAsDouble(node), VisitConvertUint32ToFloat64(node);
- case IrOpcode::kConvertFloat64ToInt32:
- return VisitConvertFloat64ToInt32(node);
- case IrOpcode::kConvertFloat64ToUint32:
- return VisitConvertFloat64ToUint32(node);
+ case IrOpcode::kChangeInt32ToFloat64:
+ return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
+ case IrOpcode::kChangeUint32ToFloat64:
+ return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
+ case IrOpcode::kChangeFloat64ToInt32:
+ return VisitChangeFloat64ToInt32(node);
+ case IrOpcode::kChangeFloat64ToUint32:
+ return VisitChangeFloat64ToUint32(node);
case IrOpcode::kFloat64Add:
return MarkAsDouble(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
@@ -736,6 +732,13 @@
#endif // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_TARGET
+void InstructionSelector::VisitParameter(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
+ OpParameter<int>(node))));
+}
+
+
void InstructionSelector::VisitPhi(Node* node) {
// TODO(bmeurer): Emit a PhiInstruction here.
for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
@@ -744,10 +747,10 @@
}
-void InstructionSelector::VisitParameter(Node* node) {
- OperandGenerator g(this);
- Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetParameterLocation(
- OpParameter<int>(node))));
+void InstructionSelector::VisitProjection(Node* node) {
+ for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
+ MarkAsUsed(*i);
+ }
}
=======================================
--- /trunk/src/compiler/instruction-selector.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/instruction-selector.h Mon Aug 4 06:49:33 2014 UTC
@@ -129,8 +129,9 @@
void VisitWord64Compare(Node* node, FlagsContinuation* cont);
void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+ void VisitParameter(Node* node);
void VisitPhi(Node* node);
- void VisitParameter(Node* node);
+ void VisitProjection(Node* node);
void VisitConstant(Node* node);
void VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization);
=======================================
--- /trunk/src/compiler/instruction.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/instruction.cc Mon Aug 4 06:49:33 2014 UTC
@@ -239,6 +239,10 @@
return os << "unordered less than or equal";
case kUnorderedGreaterThan:
return os << "unordered greater than";
+ case kOverflow:
+ return os << "overflow";
+ case kNotOverflow:
+ return os << "not overflow";
}
UNREACHABLE();
return os;
=======================================
--- /trunk/src/compiler/js-generic-lowering.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/js-generic-lowering.cc Mon Aug 4 06:49:33 2014 UTC
@@ -388,9 +388,19 @@
Node* JSGenericLowering::LowerJSStoreContext(Node* node) {
ContextAccess access = OpParameter<ContextAccess>(node);
- PatchInsertInput(node, 1, SmiConstant(access.depth()));
- PatchInsertInput(node, 2, SmiConstant(access.index()));
- ReplaceWithRuntimeCall(node, Runtime::kStoreContextRelative, 4);
+ // TODO(mstarzinger): Use simplified operators instead of machine
operators
+ // here so that load/store optimization can be applied afterwards.
+ for (int i = 0; i < access.depth(); ++i) {
+ node->ReplaceInput(
+ 0, graph()->NewNode(
+ machine()->Load(kMachineTagged),
+ NodeProperties::GetValueInput(node, 0),
+ Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+ NodeProperties::GetEffectInput(node)));
+ }
+ node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
+ node->ReplaceInput(1,
Int32Constant(Context::SlotOffset(access.index())));
+ PatchOperator(node, machine()->Store(kMachineTagged, kFullWriteBarrier));
return node;
}
=======================================
--- /trunk/src/compiler/machine-node-factory.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/machine-node-factory.h Mon Aug 4 06:49:33 2014 UTC
@@ -84,6 +84,10 @@
PrintableUnique<Object>::CreateUninitialized(ZONE(), object);
return NEW_NODE_0(COMMON()->HeapConstant(val));
}
+
+ Node* Projection(int index, Node* a) {
+ return NEW_NODE_1(COMMON()->Projection(index), a);
+ }
// Memory Operations.
Node* Load(MachineRepresentation rep, Node* base) {
@@ -195,9 +199,21 @@
Node* Int32Add(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
}
+ void Int32AddWithOverflow(Node* a, Node* b, Node** val_return,
+ Node** ovf_return) {
+ Node* add = NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
+ if (val_return) *val_return = Projection(0, add);
+ if (ovf_return) *ovf_return = Projection(1, add);
+ }
Node* Int32Sub(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
}
+ void Int32SubWithOverflow(Node* a, Node* b, Node** val_return,
+ Node** ovf_return) {
+ Node* add = NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
+ if (val_return) *val_return = Projection(0, add);
+ if (ovf_return) *ovf_return = Projection(1, add);
+ }
Node* Int32Mul(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
}
@@ -329,17 +345,17 @@
Node* ConvertInt64ToInt32(Node* a) {
return NEW_NODE_1(MACHINE()->ConvertInt64ToInt32(), a);
}
- Node* ConvertInt32ToFloat64(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertInt32ToFloat64(), a);
+ Node* ChangeInt32ToFloat64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeInt32ToFloat64(), a);
}
- Node* ConvertUint32ToFloat64(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertUint32ToFloat64(), a);
+ Node* ChangeUint32ToFloat64(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeUint32ToFloat64(), a);
}
- Node* ConvertFloat64ToInt32(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertFloat64ToInt32(), a);
+ Node* ChangeFloat64ToInt32(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeFloat64ToInt32(), a);
}
- Node* ConvertFloat64ToUint32(Node* a) {
- return NEW_NODE_1(MACHINE()->ConvertFloat64ToUint32(), a);
+ Node* ChangeFloat64ToUint32(Node* a) {
+ return NEW_NODE_1(MACHINE()->ChangeFloat64ToUint32(), a);
}
#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
=======================================
--- /trunk/src/compiler/machine-operator.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/machine-operator.h Mon Aug 4 06:49:33 2014 UTC
@@ -68,12 +68,17 @@
inputs, outputs, #name, pname)
#define BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+#define BINOP_O(name) SIMPLE(name, Operator::kPure, 2, 2)
#define BINOP_C(name) \
SIMPLE(name, Operator::kCommutative | Operator::kPure, 2, 1)
#define
BINOP_AC(name) \
SIMPLE(name,
\
Operator::kAssociative | Operator::kCommutative |
Operator::kPure, 2, \
1)
+#define
BINOP_ACO(name) \
+
SIMPLE(name,
\
+ Operator::kAssociative | Operator::kCommutative |
Operator::kPure, 2, \
+ 2)
#define UNOP(name) SIMPLE(name, Operator::kPure, 1, 1)
#define WORD_SIZE(x) return is64() ? Word64##x() : Word32##x()
@@ -113,7 +118,9 @@
Operator* Word64Equal() { BINOP_C(Word64Equal); }
Operator* Int32Add() { BINOP_AC(Int32Add); }
+ Operator* Int32AddWithOverflow() { BINOP_ACO(Int32AddWithOverflow); }
Operator* Int32Sub() { BINOP(Int32Sub); }
+ Operator* Int32SubWithOverflow() { BINOP_O(Int32SubWithOverflow); }
Operator* Int32Mul() { BINOP_AC(Int32Mul); }
Operator* Int32Div() { BINOP(Int32Div); }
Operator* Int32UDiv() { BINOP(Int32UDiv); }
@@ -142,10 +149,10 @@
// defined for these operators, since they are intended only for use with
// integers.
// TODO(titzer): rename ConvertXXX to ChangeXXX in machine operators.
- Operator* ConvertInt32ToFloat64() { UNOP(ConvertInt32ToFloat64); }
- Operator* ConvertUint32ToFloat64() { UNOP(ConvertUint32ToFloat64); }
- Operator* ConvertFloat64ToInt32() { UNOP(ConvertFloat64ToInt32); }
- Operator* ConvertFloat64ToUint32() { UNOP(ConvertFloat64ToUint32); }
+ Operator* ChangeInt32ToFloat64() { UNOP(ChangeInt32ToFloat64); }
+ Operator* ChangeUint32ToFloat64() { UNOP(ChangeUint32ToFloat64); }
+ Operator* ChangeFloat64ToInt32() { UNOP(ChangeFloat64ToInt32); }
+ Operator* ChangeFloat64ToUint32() { UNOP(ChangeFloat64ToUint32); }
// Floating point operators always operate with IEEE 754
round-to-nearest.
Operator* Float64Add() { BINOP_C(Float64Add); }
=======================================
--- /trunk/src/compiler/node.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/node.cc Mon Aug 4 06:49:33 2014 UTC
@@ -4,10 +4,25 @@
#include "src/compiler/node.h"
+#include "src/compiler/generic-node-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
+void Node::CollectProjections(int projection_count, Node** projections) {
+ for (int i = 0; i < projection_count; ++i) projections[i] = NULL;
+ for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+ if ((*i)->opcode() != IrOpcode::kProjection) continue;
+ int32_t index = OpParameter<int32_t>(*i);
+ ASSERT_GE(index, 0);
+ ASSERT_LT(index, projection_count);
+ ASSERT_EQ(NULL, projections[index]);
+ projections[index] = *i;
+ }
+}
+
+
OStream& operator<<(OStream& os, const Operator& op) { return
op.PrintTo(os); }
@@ -23,6 +38,7 @@
}
return os;
}
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
=======================================
--- /trunk/src/compiler/node.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/node.h Mon Aug 4 06:49:33 2014 UTC
@@ -53,6 +53,8 @@
: GenericNode<NodeData, Node>(graph, input_count) {}
void Initialize(Operator* op) { set_op(op); }
+
+ void CollectProjections(int projection_count, Node** projections);
};
OStream& operator<<(OStream& os, const Node& n);
=======================================
--- /trunk/src/compiler/opcodes.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/opcodes.h Mon Aug 4 06:49:33 2014 UTC
@@ -155,56 +155,58 @@
V(StoreElement)
// Opcodes for Machine-level operators.
-#define MACHINE_OP_LIST(V) \
- V(Load) \
- V(Store) \
- V(Word32And) \
- V(Word32Or) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Equal) \
- V(Word64And) \
- V(Word64Or) \
- V(Word64Xor) \
- V(Word64Shl) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Equal) \
- V(Int32Add) \
- V(Int32Sub) \
- V(Int32Mul) \
- V(Int32Div) \
- V(Int32UDiv) \
- V(Int32Mod) \
- V(Int32UMod) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(Uint32LessThan) \
- V(Uint32LessThanOrEqual) \
- V(Int64Add) \
- V(Int64Sub) \
- V(Int64Mul) \
- V(Int64Div) \
- V(Int64UDiv) \
- V(Int64Mod) \
- V(Int64UMod) \
- V(Int64LessThan) \
- V(Int64LessThanOrEqual) \
- V(ConvertInt64ToInt32) \
- V(ConvertInt32ToInt64) \
- V(ConvertInt32ToFloat64) \
- V(ConvertUint32ToFloat64) \
- V(ConvertFloat64ToInt32) \
- V(ConvertFloat64ToUint32) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Mod) \
- V(Float64Equal) \
- V(Float64LessThan) \
+#define MACHINE_OP_LIST(V) \
+ V(Load) \
+ V(Store) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Equal) \
+ V(Word64And) \
+ V(Word64Or) \
+ V(Word64Xor) \
+ V(Word64Shl) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Equal) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32SubWithOverflow) \
+ V(Int32Mul) \
+ V(Int32Div) \
+ V(Int32UDiv) \
+ V(Int32Mod) \
+ V(Int32UMod) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
+ V(Int64Add) \
+ V(Int64Sub) \
+ V(Int64Mul) \
+ V(Int64Div) \
+ V(Int64UDiv) \
+ V(Int64Mod) \
+ V(Int64UMod) \
+ V(Int64LessThan) \
+ V(Int64LessThanOrEqual) \
+ V(ConvertInt64ToInt32) \
+ V(ConvertInt32ToInt64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
V(Float64LessThanOrEqual)
#define VALUE_OP_LIST(V) \
=======================================
--- /trunk/src/compiler/pipeline.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/pipeline.cc Mon Aug 4 06:49:33 2014 UTC
@@ -170,23 +170,23 @@
}
}
- {
- // Lower any remaining generic JSOperators.
- PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
- "generic lowering");
- MachineOperatorBuilder machine(zone());
- JSGenericLowering lowering(info(), &jsgraph, &machine,
&source_positions);
- lowering.LowerAllNodes();
+ Handle<Code> code = Handle<Code>::null();
+ if (SupportedTarget()) {
+ {
+ // Lower any remaining generic JSOperators.
+ PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+ "generic lowering");
+ MachineOperatorBuilder machine(zone());
+ JSGenericLowering lowering(info(), &jsgraph, &machine,
&source_positions);
+ lowering.LowerAllNodes();
- VerifyAndPrintGraph(&graph, "Lowered generic");
- }
+ VerifyAndPrintGraph(&graph, "Lowered generic");
+ }
- // Compute a schedule.
- Schedule* schedule = ComputeSchedule(&graph);
- TraceSchedule(schedule);
+ // Compute a schedule.
+ Schedule* schedule = ComputeSchedule(&graph);
+ TraceSchedule(schedule);
- Handle<Code> code = Handle<Code>::null();
- if (SupportedTarget()) {
{
// Generate optimized code.
PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
@@ -194,6 +194,7 @@
code = GenerateCode(&linkage, &graph, schedule, &source_positions);
info()->SetCode(code);
}
+
// Print optimized code.
v8::internal::CodeGenerator::PrintCode(code, info());
}
=======================================
--- /trunk/src/compiler/pipeline.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/pipeline.h Mon Aug 4 06:49:33 2014 UTC
@@ -12,8 +12,7 @@
// Note: TODO(turbofan) implies a performance improvement opportunity,
// and TODO(name) implies an incomplete implementation
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM
#ifndef _WIN64
#define V8_TURBOFAN_TARGET 1
#else
=======================================
--- /trunk/src/compiler/register-allocator.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/register-allocator.cc Mon Aug 4 06:49:33 2014 UTC
@@ -819,11 +819,11 @@
// Handle "output same as input" for second instruction.
for (size_t i = 0; i < second->OutputCount(); i++) {
- InstructionOperand* output = second->Output();
+ InstructionOperand* output = second->OutputAt(i);
if (!output->IsUnallocated()) continue;
UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (second_output->HasSameAsInputPolicy()) {
- ASSERT(second->OutputCount() == 1); // Only valid for one output.
+ ASSERT(i == 0); // Only valid for first output.
UnallocatedOperand* cur_input =
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
=======================================
--- /trunk/src/compiler/representation-change.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler/representation-change.h Mon Aug 4 06:49:33 2014 UTC
@@ -150,9 +150,9 @@
Operator* op;
if (output_type & rWord32) {
if (output_type & tUint32) {
- op = machine()->ConvertUint32ToFloat64();
+ op = machine()->ChangeUint32ToFloat64();
} else if (output_type & tInt32) {
- op = machine()->ConvertInt32ToFloat64();
+ op = machine()->ChangeInt32ToFloat64();
} else {
return TypeError(node, output_type, rFloat64);
}
@@ -188,12 +188,10 @@
// Select the correct X -> Word32 operator.
Operator* op = NULL;
if (output_type & rFloat64) {
- // TODO(turbofan): could have cheaper float64 conversions that don't
do
- // the full JavaScript truncation here.
if (output_type & tUint32) {
- op = machine()->ConvertFloat64ToUint32();
+ op = machine()->ChangeFloat64ToUint32();
} else if (output_type & tInt32) {
- op = machine()->ConvertFloat64ToInt32();
+ op = machine()->ChangeFloat64ToInt32();
} else {
return TypeError(node, output_type, rWord32);
}
=======================================
--- /trunk/src/compiler/simplified-lowering.cc Fri Aug 1 10:40:37 2014 UTC
+++ /trunk/src/compiler/simplified-lowering.cc Mon Aug 4 06:49:33 2014 UTC
@@ -66,8 +66,8 @@
Node* loaded = graph()->NewNode(
machine()->Load(kMachineFloat64), val,
OffsetMinusTagConstant(HeapNumber::kValueOffset), effect);
- Operator* op = is_signed ? machine()->ConvertFloat64ToInt32()
- : machine()->ConvertFloat64ToUint32();
+ Operator* op = is_signed ? machine()->ChangeFloat64ToInt32()
+ : machine()->ChangeFloat64ToUint32();
Node* converted = graph()->NewNode(op, loaded);
// false branch.
@@ -100,7 +100,7 @@
Node* fbranch = graph()->NewNode(common()->IfFalse(), branch);
Node* untagged = Untag(val);
Node* converted =
- graph()->NewNode(machine()->ConvertInt32ToFloat64(), untagged);
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), untagged);
// merge.
Node* merge = graph()->NewNode(common()->Merge(2), tbranch, fbranch);
=======================================
--- /trunk/src/compiler/x64/code-generator-x64.cc Thu Jul 31 18:45:14 2014
UTC
+++ /trunk/src/compiler/x64/code-generator-x64.cc Mon Aug 4 06:49:33 2014
UTC
@@ -667,6 +667,12 @@
case kUnsignedGreaterThan:
__ j(above, tlabel);
break;
+ case kOverflow:
+ __ j(overflow, tlabel);
+ break;
+ case kNotOverflow:
+ __ j(no_overflow, tlabel);
+ break;
}
if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to
flabel.
__ bind(&done);
@@ -679,9 +685,11 @@
X64OperandConverter i(this, instr);
Label done;
- // Materialize a full 32-bit 1 or 0 value.
+ // Materialize a full 64-bit 1 or 0 value. The result register is always
the
+ // last output of the instruction.
Label check;
- Register reg = i.OutputRegister();
+ ASSERT_NE(0, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
@@ -744,6 +752,12 @@
case kUnsignedGreaterThan:
cc = above;
break;
+ case kOverflow:
+ cc = overflow;
+ break;
+ case kNotOverflow:
+ cc = no_overflow;
+ break;
}
__ bind(&check);
__ setcc(cc, reg);
=======================================
--- /trunk/src/compiler/x64/instruction-selector-x64.cc Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/src/compiler/x64/instruction-selector-x64.cc Mon Aug 4 06:49:33
2014 UTC
@@ -194,6 +194,49 @@
g.Use(right));
}
}
+
+
+static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
+ InstructionCode opcode) {
+ X64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand* inputs[2];
+ size_t input_count = 0;
+ InstructionOperand* outputs[2];
+ size_t output_count = 0;
+
+ // TODO(turbofan): match complex addressing modes.
+ // TODO(turbofan): if commutative, pick the non-live-in operand as the
left as
+ // this might be the last use and therefore its register can be reused.
+ if (g.CanBeImmediate(m.right().node())) {
+ inputs[input_count++] = g.Use(m.left().node());
+ inputs[input_count++] = g.UseImmediate(m.right().node());
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.Use(m.right().node());
+ }
+
+ // Define outputs depending on the projections.
+ Node* projections[2];
+ node->CollectProjections(ARRAY_SIZE(projections), projections);
+ if (projections[0]) {
+ outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
+ }
+ if (projections[1]) {
+ opcode |= FlagsModeField::encode(kFlags_set);
+ opcode |= FlagsConditionField::encode(kOverflow);
+ outputs[output_count++] =
+ (projections[0] ? g.DefineAsRegister(projections[1])
+ : g.DefineSameAsFirst(projections[1]));
+ }
+
+ ASSERT_NE(0, input_count);
+ ASSERT_NE(0, output_count);
+ ASSERT_GE(ARRAY_SIZE(inputs), input_count);
+ ASSERT_GE(ARRAY_SIZE(outputs), output_count);
+
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
+}
void InstructionSelector::VisitWord32And(Node* node) {
@@ -325,6 +368,11 @@
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kX64Add32, true);
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kX64Add32);
+}
void InstructionSelector::VisitInt64Add(Node* node) {
@@ -349,6 +397,11 @@
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ VisitBinopWithOverflow(this, node, kX64Sub32);
+}
void InstructionSelector::VisitInt64Sub(Node* node) {
@@ -445,14 +498,14 @@
}
-void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
g.Use(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertUint32ToFloat64(Node* node) {
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
X64OperandGenerator g(this);
// TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
Emit(kSSEUint32ToFloat64, g.DefineAsDoubleRegister(node),
@@ -460,13 +513,13 @@
}
-void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node),
g.Use(node->InputAt(0)));
}
-void InstructionSelector::VisitConvertFloat64ToUint32(Node* node) {
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
X64OperandGenerator g(this);
// TODO(turbofan): X64 SSE cvttsd2siq should support operands.
Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node),
=======================================
--- /trunk/src/compiler.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/compiler.cc Mon Aug 4 06:49:33 2014 UTC
@@ -416,7 +416,9 @@
!info()->is_osr()) {
compiler::Pipeline pipeline(info());
pipeline.GenerateCode();
- return SetLastStatus(SUCCEEDED);
+ if (!info()->code().is_null()) {
+ return SetLastStatus(SUCCEEDED);
+ }
}
if (FLAG_trace_hydrogen) {
=======================================
--- /trunk/src/runtime.cc Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/runtime.cc Mon Aug 4 06:49:33 2014 UTC
@@ -9406,22 +9406,6 @@
return *value;
}
-
-
-RUNTIME_FUNCTION(Runtime_StoreContextRelative) {
- SealHandleScope shs(isolate);
- ASSERT(args.length() == 4);
- CONVERT_ARG_CHECKED(Context, context, 0);
- CONVERT_SMI_ARG_CHECKED(depth, 1);
- CONVERT_SMI_ARG_CHECKED(index, 2);
- CONVERT_ARG_CHECKED(Object, value, 3);
- while (depth-- > 0) {
- context = context->previous();
- ASSERT(context->IsContext());
- }
- context->set(index, value);
- return isolate->heap()->undefined_value();
-}
RUNTIME_FUNCTION(Runtime_Throw) {
=======================================
--- /trunk/src/runtime.h Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/src/runtime.h Mon Aug 4 06:49:33 2014 UTC
@@ -481,7 +481,6 @@
F(LoadLookupSlot, 2, 2) \
F(LoadLookupSlotNoReferenceError, 2, 2) \
F(StoreLookupSlot, 4, 1) \
- F(StoreContextRelative, 4, 1) /* TODO(turbofan): Only temporary */ \
\
/* Declarations and initialization */ \
F(DeclareGlobals, 3, 1) \
=======================================
--- /trunk/src/version.cc Fri Aug 1 10:40:37 2014 UTC
+++ /trunk/src/version.cc Mon Aug 4 06:49:33 2014 UTC
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 28
-#define BUILD_NUMBER 53
+#define BUILD_NUMBER 54
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
=======================================
--- /trunk/test/cctest/compiler/test-instruction-selector-arm.cc Thu Jul 31
18:45:14 2014 UTC
+++ /trunk/test/cctest/compiler/test-instruction-selector-arm.cc Mon Aug 4
06:49:33 2014 UTC
@@ -41,6 +41,26 @@
};
+struct ODPI {
+ Operator* op;
+ ArchOpcode arch_opcode;
+ ArchOpcode reverse_arch_opcode;
+};
+
+
+// ARM data processing instructions with overflow.
+class ODPIs V8_FINAL : public std::list<ODPI>, private HandleAndZoneScope {
+ public:
+ ODPIs() {
+ MachineOperatorBuilder machine(main_zone());
+ ODPI add = {machine.Int32AddWithOverflow(), kArmAdd, kArmAdd};
+ push_back(add);
+ ODPI sub = {machine.Int32SubWithOverflow(), kArmSub, kArmRsb};
+ push_back(sub);
+ }
+};
+
+
// ARM immediates.
class Immediates V8_FINAL : public std::list<int32_t> {
public:
@@ -223,6 +243,351 @@
}
}
}
+
+
+TEST(InstructionSelectorODPIP) {
+ ODPIs odpis;
+ for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
+ ODPI odpi = *i;
+ {
+ InstructionSelectorTester m;
+ m.Return(
+ m.Projection(1, m.NewNode(odpi.op, m.Parameter(0),
m.Parameter(1))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(
+ m.Projection(0, m.NewNode(odpi.op, m.Parameter(0),
m.Parameter(1))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST(InstructionSelectorODPIImm) {
+ ODPIs odpis;
+ Immediates immediates;
+ for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
+ ODPI odpi = *i;
+ for (Immediates::const_iterator j = immediates.begin();
+ j != immediates.end(); ++j) {
+ int32_t imm = *j;
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 1, m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 1, m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 0, m.NewNode(odpi.op, m.Parameter(0), m.Int32Constant(imm))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 0, m.NewNode(odpi.op, m.Int32Constant(imm), m.Parameter(0))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0),
m.Int32Constant(imm));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node = m.NewNode(odpi.op, m.Int32Constant(imm),
m.Parameter(0));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+ }
+}
+
+
+TEST(InstructionSelectorODPIAndShiftP) {
+ ODPIs odpis;
+ Shifts shifts;
+ for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
+ ODPI odpi = *i;
+ for (Shifts::const_iterator j = shifts.begin(); j != shifts.end();
++j) {
+ Shift shift = *j;
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 1, m.NewNode(odpi.op, m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1),
m.Parameter(2)))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 1, m.NewNode(odpi.op,
+ m.NewNode(shift.op, m.Parameter(0),
m.Parameter(1)),
+ m.Parameter(2))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 0, m.NewNode(odpi.op, m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1),
m.Parameter(2)))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 0, m.NewNode(odpi.op,
+ m.NewNode(shift.op, m.Parameter(0),
m.Parameter(1)),
+ m.Parameter(2))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node =
+ m.NewNode(odpi.op, m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node = m.NewNode(
+ odpi.op, m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
+ m.Parameter(2));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+ }
+}
+
+
+TEST(InstructionSelectorODPIAndShiftImm) {
+ ODPIs odpis;
+ Shifts shifts;
+ for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
+ ODPI odpi = *i;
+ for (Shifts::const_iterator j = shifts.begin(); j != shifts.end();
++j) {
+ Shift shift = *j;
+ for (int32_t imm = shift.i_low; imm <= shift.i_high; ++imm) {
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(1, m.NewNode(odpi.op, m.Parameter(0),
+ m.NewNode(shift.op,
m.Parameter(1),
+
m.Int32Constant(imm)))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 1, m.NewNode(odpi.op, m.NewNode(shift.op, m.Parameter(0),
+ m.Int32Constant(imm)),
+ m.Parameter(1))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(0, m.NewNode(odpi.op, m.Parameter(0),
+ m.NewNode(shift.op,
m.Parameter(1),
+
m.Int32Constant(imm)))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Projection(
+ 0, m.NewNode(odpi.op, m.NewNode(shift.op, m.Parameter(0),
+ m.Int32Constant(imm)),
+ m.Parameter(1))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(1, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node = m.NewNode(
+ odpi.op, m.Parameter(0),
+ m.NewNode(shift.op, m.Parameter(1), m.Int32Constant(imm)));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ {
+ InstructionSelectorTester m;
+ Node* node = m.NewNode(odpi.op, m.NewNode(shift.op,
m.Parameter(0),
+ m.Int32Constant(imm)),
+ m.Parameter(1));
+ m.Return(m.Word32Equal(m.Projection(0, node), m.Projection(1,
node)));
+ m.SelectInstructions();
+ CHECK_LE(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.i_mode, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_EQ(3, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
+ CHECK_EQ(2, m.code[0]->OutputCount());
+ }
+ }
+ }
+ }
+}
TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1P) {
@@ -263,6 +628,58 @@
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
}
}
+
+
+TEST(InstructionSelectorWord32AndAndWord32XorWithMinus1AndShiftP) {
+ Shifts shifts;
+ for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+ Shift shift = *i;
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Word32And(
+ m.Parameter(0),
+ m.Word32Xor(m.Int32Constant(-1),
+ m.NewNode(shift.op, m.Parameter(1),
m.Parameter(2)))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Word32And(
+ m.Parameter(0),
+ m.Word32Xor(m.NewNode(shift.op, m.Parameter(1), m.Parameter(2)),
+ m.Int32Constant(-1))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Word32And(
+ m.Word32Xor(m.Int32Constant(-1),
+ m.NewNode(shift.op, m.Parameter(0), m.Parameter(1))),
+ m.Parameter(2)));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Word32And(
+ m.Word32Xor(m.NewNode(shift.op, m.Parameter(0), m.Parameter(1)),
+ m.Int32Constant(-1)),
+ m.Parameter(2)));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmBic, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ }
+ }
+}
TEST(InstructionSelectorWord32XorWithMinus1P) {
@@ -283,6 +700,33 @@
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
}
}
+
+
+TEST(InstructionSelectorWord32XorWithMinus1AndShiftP) {
+ Shifts shifts;
+ for (Shifts::const_iterator i = shifts.begin(); i != shifts.end(); ++i) {
+ Shift shift = *i;
+ {
+ InstructionSelectorTester m;
+ m.Return(
+ m.Word32Xor(m.Int32Constant(-1),
+ m.NewNode(shift.op, m.Parameter(0),
m.Parameter(1))));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ }
+ {
+ InstructionSelectorTester m;
+ m.Return(m.Word32Xor(m.NewNode(shift.op, m.Parameter(0),
m.Parameter(1)),
+ m.Int32Constant(-1)));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(kArmMvn, m.code[0]->arch_opcode());
+ CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
+ }
+ }
+}
TEST(InstructionSelectorShiftP) {
=======================================
--- /trunk/test/cctest/compiler/test-machine-operator-reducer.cc Thu Jul 31
18:45:14 2014 UTC
+++ /trunk/test/cctest/compiler/test-machine-operator-reducer.cc Mon Aug 4
06:49:33 2014 UTC
@@ -771,6 +771,6 @@
// TODO(titzer): test MachineOperatorReducer for Int64Mod
// TODO(titzer): test MachineOperatorReducer for Int64UMod
// TODO(titzer): test MachineOperatorReducer for Int64Neg
-// TODO(titzer): test MachineOperatorReducer for ConvertInt32ToFloat64
-// TODO(titzer): test MachineOperatorReducer for ConvertFloat64ToInt32
+// TODO(titzer): test MachineOperatorReducer for ChangeInt32ToFloat64
+// TODO(titzer): test MachineOperatorReducer for ChangeFloat64ToInt32
// TODO(titzer): test MachineOperatorReducer for Float64Compare
=======================================
--- /trunk/test/cctest/compiler/test-representation-change.cc Thu Jul 31
18:45:14 2014 UTC
+++ /trunk/test/cctest/compiler/test-representation-change.cc Mon Aug 4
06:49:33 2014 UTC
@@ -180,10 +180,10 @@
CheckChange(IrOpcode::kChangeTaggedToFloat64, rTagged, rFloat64);
// Int32,Uint32 <-> Float64 are actually machine conversions.
- CheckChange(IrOpcode::kConvertInt32ToFloat64, rWord32 | tInt32,
rFloat64);
- CheckChange(IrOpcode::kConvertUint32ToFloat64, rWord32 | tUint32,
rFloat64);
- CheckChange(IrOpcode::kConvertFloat64ToInt32, rFloat64 | tInt32,
rWord32);
- CheckChange(IrOpcode::kConvertFloat64ToUint32, rFloat64 | tUint32,
rWord32);
+ CheckChange(IrOpcode::kChangeInt32ToFloat64, rWord32 | tInt32, rFloat64);
+ CheckChange(IrOpcode::kChangeUint32ToFloat64, rWord32 | tUint32,
rFloat64);
+ CheckChange(IrOpcode::kChangeFloat64ToInt32, rFloat64 | tInt32, rWord32);
+ CheckChange(IrOpcode::kChangeFloat64ToUint32, rFloat64 | tUint32,
rWord32);
}
@@ -200,8 +200,8 @@
// CheckChange(IrOpcode::kChangeTaggedToInt32, rTagged, rWord32 |
tInt32);
// CheckChange(IrOpcode::kChangeTaggedToUint32, rTagged, rWord32 |
tUint32);
- // CheckChange(IrOpcode::kConvertInt32ToFloat64, rWord32, rFloat64);
- // CheckChange(IrOpcode::kConvertFloat64ToInt32, rFloat64, rWord32);
+ // CheckChange(IrOpcode::kChangeInt32ToFloat64, rWord32, rFloat64);
+ // CheckChange(IrOpcode::kChangeFloat64ToInt32, rFloat64, rWord32);
}
=======================================
--- /trunk/test/cctest/compiler/test-run-machops.cc Thu Jul 31 18:45:14
2014 UTC
+++ /trunk/test/cctest/compiler/test-run-machops.cc Mon Aug 4 06:49:33
2014 UTC
@@ -482,7 +482,7 @@
m.Goto(&header);
m.Bind(end);
- m.Return(m.ConvertFloat64ToInt32(phi));
+ m.Return(m.ChangeFloat64ToInt32(phi));
CHECK_EQ(10, m.Call());
}
@@ -2858,12 +2858,12 @@
}
-TEST(RunConvertInt32ToFloat64_A) {
+TEST(RunChangeInt32ToFloat64_A) {
RawMachineAssemblerTester<int32_t> m;
int32_t magic = 0x986234;
double result = 0;
- Node* convert = m.ConvertInt32ToFloat64(m.Int32Constant(magic));
+ Node* convert = m.ChangeInt32ToFloat64(m.Int32Constant(magic));
m.Store(kMachineFloat64, m.PointerConstant(&result), m.Int32Constant(0),
convert);
m.Return(m.Int32Constant(magic));
@@ -2873,11 +2873,11 @@
}
-TEST(RunConvertInt32ToFloat64_B) {
+TEST(RunChangeInt32ToFloat64_B) {
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
double output = 0;
- Node* convert = m.ConvertInt32ToFloat64(m.Parameter(0));
+ Node* convert = m.ChangeInt32ToFloat64(m.Parameter(0));
m.Store(kMachineFloat64, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Return(m.Parameter(0));
@@ -2890,11 +2890,11 @@
}
-TEST(RunConvertUint32ToFloat64_B) {
+TEST(RunChangeUint32ToFloat64_B) {
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
double output = 0;
- Node* convert = m.ConvertUint32ToFloat64(m.Parameter(0));
+ Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0));
m.Store(kMachineFloat64, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Return(m.Parameter(0));
@@ -2907,14 +2907,14 @@
}
-TEST(RunConvertFloat64ToInt32_A) {
+TEST(RunChangeFloat64ToInt32_A) {
RawMachineAssemblerTester<int32_t> m;
int32_t magic = 0x786234;
double input = 11.1;
int32_t result = 0;
m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(0),
- m.ConvertFloat64ToInt32(m.Float64Constant(input)));
+ m.ChangeFloat64ToInt32(m.Float64Constant(input)));
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
@@ -2922,14 +2922,14 @@
}
-TEST(RunConvertFloat64ToInt32_B) {
+TEST(RunChangeFloat64ToInt32_B) {
RawMachineAssemblerTester<int32_t> m;
double input = 0;
int32_t output = 0;
Node* load =
m.Load(kMachineFloat64, m.PointerConstant(&input),
m.Int32Constant(0));
- Node* convert = m.ConvertFloat64ToInt32(load);
+ Node* convert = m.ChangeFloat64ToInt32(load);
m.Store(kMachineWord32, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Return(convert);
@@ -2964,14 +2964,14 @@
}
-TEST(RunConvertFloat64ToUint32_B) {
+TEST(RunChangeFloat64ToUint32_B) {
RawMachineAssemblerTester<int32_t> m;
double input = 0;
int32_t output = 0;
Node* load =
m.Load(kMachineFloat64, m.PointerConstant(&input),
m.Int32Constant(0));
- Node* convert = m.ConvertFloat64ToUint32(load);
+ Node* convert = m.ChangeFloat64ToUint32(load);
m.Store(kMachineWord32, m.PointerConstant(&output), m.Int32Constant(0),
convert);
m.Return(convert);
@@ -3007,7 +3007,7 @@
}
-TEST(RunConvertFloat64ToInt32_spilled) {
+TEST(RunChangeFloat64ToInt32_spilled) {
RawMachineAssemblerTester<int32_t> m;
const int kNumInputs = 32;
int32_t magic = 0x786234;
@@ -3022,7 +3022,7 @@
for (int i = 0; i < kNumInputs; i++) {
m.Store(kMachineWord32, m.PointerConstant(&result), m.Int32Constant(i
* 4),
- m.ConvertFloat64ToInt32(input_node[i]));
+ m.ChangeFloat64ToInt32(input_node[i]));
}
m.Return(m.Int32Constant(magic));
@@ -3039,19 +3039,19 @@
}
-TEST(RunDeadConvertFloat64ToInt32) {
+TEST(RunDeadChangeFloat64ToInt32) {
RawMachineAssemblerTester<int32_t> m;
const int magic = 0x88abcda4;
- m.ConvertFloat64ToInt32(m.Float64Constant(999.78));
+ m.ChangeFloat64ToInt32(m.Float64Constant(999.78));
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
}
-TEST(RunDeadConvertInt32ToFloat64) {
+TEST(RunDeadChangeInt32ToFloat64) {
RawMachineAssemblerTester<int32_t> m;
const int magic = 0x8834abcd;
- m.ConvertInt32ToFloat64(m.Int32Constant(magic - 6888));
+ m.ChangeInt32ToFloat64(m.Int32Constant(magic - 6888));
m.Return(m.Int32Constant(magic));
CHECK_EQ(magic, m.Call());
}
@@ -3882,4 +3882,184 @@
#endif // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-#endif
+
+static bool sadd_overflow(int32_t x, int32_t y, int32_t* val) {
+ int32_t v =
+ static_cast<int32_t>(static_cast<uint32_t>(x) +
static_cast<uint32_t>(y));
+ *val = v;
+ return (((v ^ x) & (v ^ y)) >> 31) & 1;
+}
+
+
+static bool ssub_overflow(int32_t x, int32_t y, int32_t* val) {
+ int32_t v =
+ static_cast<int32_t>(static_cast<uint32_t>(x) -
static_cast<uint32_t>(y));
+ *val = v;
+ return (((v ^ x) & (v ^ ~y)) >> 31) & 1;
+}
+
+
+TEST(RunInt32AddWithOverflowP) {
+ int32_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ bt.AddReturn(ovf);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected_val;
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt32AddWithOverflowImm) {
+ int32_t actual_val = -1, expected_val = 0;
+ FOR_INT32_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val,
&ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val,
&ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j),
&val,
+ &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ int expected_ovf = sadd_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt32AddWithOverflowInBranchP) {
+ MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* val, *ovf;
+ m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Word32Not(val));
+ m.Bind(&blockb);
+ bt.AddReturn(val);
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ int32_t expected;
+ if (sadd_overflow(*i, *j, &expected)) expected = ~expected;
+ CHECK_EQ(expected, bt.call(*i, *j));
+ }
+ }
+}
+
+
+TEST(RunInt32SubWithOverflowP) {
+ int32_t actual_val = -1;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* val, *ovf;
+ m.Int32SubWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ bt.AddReturn(ovf);
+ FOR_INT32_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ int32_t expected_val;
+ int expected_ovf = ssub_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, bt.call(*i, *j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt32SubWithOverflowImm) {
+ int32_t actual_val = -1, expected_val = 0;
+ FOR_INT32_INPUTS(i) {
+ {
+ RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ Node* val, *ovf;
+ m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val,
&ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = ssub_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ {
+ RawMachineAssemblerTester<int32_t> m(kMachineWord32);
+ Node* val, *ovf;
+ m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val,
&ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ FOR_INT32_INPUTS(j) {
+ int expected_ovf = ssub_overflow(*j, *i, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call(*j));
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+ FOR_INT32_INPUTS(j) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* val, *ovf;
+ m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j),
&val,
+ &ovf);
+ m.StoreToPointer(&actual_val, kMachineWord32, val);
+ m.Return(ovf);
+ int expected_ovf = ssub_overflow(*i, *j, &expected_val);
+ CHECK_EQ(expected_ovf, m.Call());
+ CHECK_EQ(expected_val, actual_val);
+ }
+ }
+}
+
+
+TEST(RunInt32SubWithOverflowInBranchP) {
+ MLabel blocka, blockb;
+ RawMachineAssemblerTester<int32_t> m;
+ Int32BinopTester bt(&m);
+ Node* val, *ovf;
+ m.Int32SubWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ m.Branch(ovf, &blocka, &blockb);
+ m.Bind(&blocka);
+ bt.AddReturn(m.Word32Not(val));
+ m.Bind(&blockb);
+ bt.AddReturn(val);
+ FOR_UINT32_INPUTS(i) {
+ FOR_UINT32_INPUTS(j) {
+ int32_t expected;
+ if (ssub_overflow(*i, *j, &expected)) expected = ~expected;
+ CHECK_EQ(expected, bt.call(*i, *j));
+ }
+ }
+}
+
+#endif // V8_TURBOFAN_TARGET
=======================================
--- /trunk/test/mjsunit/mjsunit.status Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/test/mjsunit/mjsunit.status Mon Aug 4 06:49:33 2014 UTC
@@ -181,9 +181,6 @@
# No need to waste time for this test.
'd8-performance-now': [PASS, NO_VARIANTS],
-
##############################################################################
- 'big-object-literal': [PASS, ['arch == arm or arch == android_arm or
arch == android_arm64', SKIP]],
-
# Issue 488: this test sometimes times out.
'array-constructor': [PASS, TIMEOUT],
@@ -310,6 +307,7 @@
# Pass but take too long to run. Skip.
# Some similar tests (with fewer iterations) may be included in arm64-js
# tests.
+ 'big-object-literal': [SKIP],
'compiler/regress-arguments': [SKIP],
'compiler/regress-gvn': [SKIP],
'compiler/regress-max-locals-for-osr': [SKIP],
@@ -413,6 +411,7 @@
# Long running tests. Skipping because having them timeout takes too
long on
# the buildbot.
+ 'big-object-literal': [SKIP],
'compiler/alloc-number': [SKIP],
'regress/regress-490': [SKIP],
'regress/regress-634': [SKIP],
=======================================
--- /trunk/test/mozilla/mozilla.status Tue Jul 15 00:04:47 2014 UTC
+++ /trunk/test/mozilla/mozilla.status Mon Aug 4 06:49:33 2014 UTC
@@ -51,6 +51,19 @@
'ecma_3/Number/15.7.4.3-02': [PASS, FAIL],
'ecma_3/Date/15.9.5.5-02': [PASS, FAIL],
+ ################## TURBO-FAN FAILURES ###################
+
+ # TODO(turbofan): These are all covered by mjsunit as well. Enable them
once
+ # we pass 'mjsunit' and 'webkit' with TurboFan.
+ 'js1_4/Functions/function-001': [PASS, NO_VARIANTS],
+ 'js1_5/Regress/regress-104077': [PASS, NO_VARIANTS],
+ 'js1_5/Regress/regress-396684': [PASS, NO_VARIANTS],
+ 'js1_5/Regress/regress-80981': [PASS, NO_VARIANTS],
+
+ # TODO(turbofan): Large switch statements crash.
+ 'js1_5/Regress/regress-366601': [PASS, NO_VARIANTS],
+ 'js1_5/Regress/regress-398085-01': [PASS, NO_VARIANTS],
+
##################### SKIPPED TESTS #####################
# This test checks that we behave properly in an out-of-memory
@@ -170,7 +183,7 @@
'js1_5/String/regress-56940-02': [PASS, FAIL],
'js1_5/String/regress-157334-01': [PASS, FAIL],
'js1_5/String/regress-322772': [PASS, FAIL],
- 'js1_5/Array/regress-99120-01': [PASS, FAIL],
+ 'js1_5/Array/regress-99120-01': [PASS, FAIL, NO_VARIANTS],
'js1_5/Array/regress-99120-02': [PASS, FAIL],
'js1_5/Regress/regress-347306-01': [PASS, FAIL],
'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT,
NO_VARIANTS]],
=======================================
--- /trunk/test/test262/test262.status Wed Jul 23 00:04:36 2014 UTC
+++ /trunk/test/test262/test262.status Mon Aug 4 06:49:33 2014 UTC
@@ -31,6 +31,9 @@
'15.5.4.9_CE': [['no_i18n', SKIP]],
+ # TODO(turbofan): Timeouts on TurboFan need investigation.
+ '10.1.1_13': [PASS, NO_VARIANTS],
+
# BUG(v8:3455)
'11.2.3_b': [FAIL],
'12.2.3_b': [FAIL],
@@ -44,7 +47,7 @@
'9.2.1_2': [FAIL],
'9.2.6_2': [FAIL],
'10.1.1_a': [FAIL],
- '10.1.1_19_c': [PASS, FAIL],
+ '10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
'10.1.2.1_4': [FAIL],
'10.2.3_b': [PASS, FAIL],
'10.3_a': [FAIL],
=======================================
--- /trunk/test/webkit/webkit.status Thu Jul 24 00:04:58 2014 UTC
+++ /trunk/test/webkit/webkit.status Mon Aug 4 06:49:33 2014 UTC
@@ -33,6 +33,12 @@
'dfg-inline-arguments-become-int32': [PASS, FAIL],
'dfg-inline-arguments-reset': [PASS, FAIL],
'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
+ # TODO(turbofan): Sometimes the try-catch blacklist fails.
+ 'exception-with-handler-inside-eval-with-dynamic-scope': [PASS,
NO_VARIANTS],
+ # TODO(turbofan): We run out of stack earlier on 64-bit for now.
+ 'fast/js/deep-recursion-test': [PASS, NO_VARIANTS],
+ # TODO(turbofan): Some tests just timeout for now.
+ 'array-iterate-backwards': [PASS, NO_VARIANTS],
}], # ALWAYS
['mode == debug', {
# Too slow in debug mode.
=======================================
--- /trunk/tools/generate-runtime-tests.py Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/tools/generate-runtime-tests.py Mon Aug 4 06:49:33 2014 UTC
@@ -47,9 +47,9 @@
# that the parser doesn't bit-rot. Change the values as needed when you
add,
# remove or change runtime functions, but make sure we don't lose our
ability
# to parse them!
-EXPECTED_FUNCTION_COUNT = 426
+EXPECTED_FUNCTION_COUNT = 425
EXPECTED_FUZZABLE_COUNT = 338
-EXPECTED_CCTEST_COUNT = 10
+EXPECTED_CCTEST_COUNT = 9
EXPECTED_UNKNOWN_COUNT = 4
EXPECTED_BUILTINS_COUNT = 816
=======================================
--- /trunk/tools/run-tests.py Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/tools/run-tests.py Mon Aug 4 06:49:33 2014 UTC
@@ -62,7 +62,7 @@
"turbofan": ["--turbo-filter=*", "--always-opt"],
"nocrankshaft": ["--nocrankshaft"]}
-VARIANTS = ["default", "stress", "nocrankshaft"]
+VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
MODE_FLAGS = {
"debug" : ["--nohard-abort", "--nodead-code-elimination",
=======================================
--- /trunk/tools/whitespace.txt Thu Jul 31 18:45:14 2014 UTC
+++ /trunk/tools/whitespace.txt Mon Aug 4 06:49:33 2014 UTC
@@ -5,4 +5,4 @@
A Smi walks into a bar and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them and............
+The Smi looked at them and...............
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.