Revision: 22800
Author: [email protected]
Date: Mon Aug 4 08:18:37 2014 UTC
Log: [turbofan] Support for combining branches with
<Operation>WithOverflow.
Also unify the handling of binops in the InstructionSelector
backends.
TEST=cctest/test-run-machops,cctest/test-instruction-selector-arm
[email protected]
Review URL: https://codereview.chromium.org/415403005
http://code.google.com/p/v8/source/detail?r=22800
Modified:
/branches/bleeding_edge/src/compiler/arm/instruction-selector-arm.cc
/branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc
/branches/bleeding_edge/src/compiler/ia32/instruction-selector-ia32.cc
/branches/bleeding_edge/src/compiler/instruction-selector-impl.h
/branches/bleeding_edge/src/compiler/instruction-selector.cc
/branches/bleeding_edge/src/compiler/instruction-selector.h
/branches/bleeding_edge/src/compiler/machine-node-factory.h
/branches/bleeding_edge/src/compiler/node.cc
/branches/bleeding_edge/src/compiler/node.h
/branches/bleeding_edge/src/compiler/x64/instruction-selector-x64.cc
/branches/bleeding_edge/test/cctest/compiler/test-instruction-selector-arm.cc
/branches/bleeding_edge/test/cctest/compiler/test-run-machops.cc
=======================================
--- /branches/bleeding_edge/src/compiler/arm/instruction-selector-arm.cc
Fri Aug 1 12:18:20 2014 UTC
+++ /branches/bleeding_edge/src/compiler/arm/instruction-selector-arm.cc
Mon Aug 4 08:18:37 2014 UTC
@@ -249,46 +249,12 @@
}
-// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
- InstructionCode opcode, InstructionCode
reverse_opcode) {
+ InstructionCode opcode, InstructionCode
reverse_opcode,
+ FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[3];
- size_t input_count = 0;
- InstructionOperand* outputs[1] = {g.DefineAsRegister(node)};
- const size_t output_count = ARRAY_SIZE(outputs);
-
- if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
- &input_count, &inputs[1])) {
- inputs[0] = g.UseRegister(m.left().node());
- input_count++;
- } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
- m.left().node(), &input_count,
- &inputs[1])) {
- inputs[0] = g.UseRegister(m.right().node());
- opcode = reverse_opcode;
- input_count++;
- } else {
- opcode |= AddressingModeField::encode(kMode_Operand2_R);
- inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseRegister(m.right().node());
- }
-
- ASSERT_NE(0, input_count);
- ASSERT_GE(ARRAY_SIZE(inputs), input_count);
- ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
-
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
-}
-
-
-static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
- InstructionCode opcode,
- InstructionCode reverse_opcode) {
- ArmOperandGenerator g(selector);
- Int32BinopMatcher m(node);
- InstructionOperand* inputs[3];
+ InstructionOperand* inputs[5];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
@@ -309,16 +275,14 @@
inputs[input_count++] = g.UseRegister(m.right().node());
}
- // Define outputs depending on the projections.
- Node* projections[2];
- node->CollectProjections(ARRAY_SIZE(projections), projections);
- if (projections[0]) {
- outputs[output_count++] = g.DefineAsRegister(projections[0]);
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
}
- if (projections[1]) {
- opcode |= FlagsModeField::encode(kFlags_set);
- opcode |= FlagsConditionField::encode(kOverflow);
- outputs[output_count++] = g.DefineAsRegister(projections[1]);
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
@@ -327,7 +291,16 @@
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
ASSERT_NE(kMode_None, AddressingModeField::decode(opcode));
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, InstructionCode
reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, reverse_opcode, &cont);
}
@@ -595,11 +568,6 @@
}
VisitBinop(this, node, kArmAdd, kArmAdd);
}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kArmAdd, kArmAdd);
-}
void InstructionSelector::VisitInt32Sub(Node* node) {
@@ -614,11 +582,6 @@
}
VisitBinop(this, node, kArmSub, kArmRsb);
}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kArmSub, kArmRsb);
-}
void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -865,12 +828,24 @@
Emit(kArmDrop | MiscField::encode(buffer.pushed_count), NULL);
}
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kArmAdd, kArmAdd, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kArmSub, kArmRsb, cont);
+}
// Shared routine for multiple compare operations.
static void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation*
cont,
- bool commutative, bool requires_output) {
+ bool commutative) {
ArmOperandGenerator g(selector);
Int32BinopMatcher m(node);
InstructionOperand* inputs[5];
@@ -894,9 +869,6 @@
}
if (cont->IsBranch()) {
- if (requires_output) {
- outputs[output_count++] = g.DefineAsRegister(node);
- }
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
} else {
@@ -917,15 +889,15 @@
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
switch (node->opcode()) {
case IrOpcode::kInt32Add:
- return VisitWordCompare(this, node, kArmCmn, cont, true, false);
+ return VisitWordCompare(this, node, kArmCmn, cont, true);
case IrOpcode::kInt32Sub:
- return VisitWordCompare(this, node, kArmCmp, cont, false, false);
+ return VisitWordCompare(this, node, kArmCmp, cont, false);
case IrOpcode::kWord32And:
- return VisitWordCompare(this, node, kArmTst, cont, true, false);
+ return VisitWordCompare(this, node, kArmTst, cont, true);
case IrOpcode::kWord32Or:
- return VisitWordCompare(this, node, kArmOrr, cont, true, true);
+ return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
case IrOpcode::kWord32Xor:
- return VisitWordCompare(this, node, kArmTeq, cont, true, false);
+ return VisitWordCompare(this, node, kArmTeq, cont, true);
default:
break;
}
@@ -946,7 +918,7 @@
void InstructionSelector::VisitWord32Compare(Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(this, node, kArmCmp, cont, false, false);
+ VisitWordCompare(this, node, kArmCmp, cont, false);
}
=======================================
---
/branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc
Fri Aug 1 13:51:23 2014 UTC
+++
/branches/bleeding_edge/src/compiler/arm64/instruction-selector-arm64.cc
Mon Aug 4 08:18:37 2014 UTC
@@ -110,34 +110,26 @@
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, ImmediateMode operand_mode,
- bool commutative) {
- VisitRRO(selector, opcode, node, operand_mode);
-}
-
-
-static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
- InstructionCode opcode) {
+ InstructionCode opcode, ImmediateMode operand_mode,
+ FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[2];
+ InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
inputs[input_count++] = g.UseRegister(m.left().node());
- inputs[input_count++] = g.UseRegister(m.right().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
- // Define outputs depending on the projections.
- Node* projections[2];
- node->CollectProjections(ARRAY_SIZE(projections), projections);
- if (projections[0]) {
- outputs[output_count++] = g.DefineAsRegister(projections[0]);
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
}
- if (projections[1]) {
- opcode |= FlagsModeField::encode(kFlags_set);
- opcode |= FlagsConditionField::encode(kOverflow);
- outputs[output_count++] = g.DefineAsRegister(projections[1]);
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
@@ -145,7 +137,17 @@
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, ImmediateMode operand_mode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, operand_mode);
}
@@ -256,22 +258,22 @@
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kArm64And32, kLogical32Imm, true);
+ VisitBinop(this, node, kArm64And32, kLogical32Imm);
}
void InstructionSelector::VisitWord64And(Node* node) {
- VisitBinop(this, node, kArm64And, kLogical64Imm, true);
+ VisitBinop(this, node, kArm64And, kLogical64Imm);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kArm64Or32, kLogical32Imm, true);
+ VisitBinop(this, node, kArm64Or32, kLogical32Imm);
}
void InstructionSelector::VisitWord64Or(Node* node) {
- VisitBinop(this, node, kArm64Or, kLogical64Imm, true);
+ VisitBinop(this, node, kArm64Or, kLogical64Imm);
}
@@ -284,7 +286,7 @@
selector->Emit(not_opcode, g.DefineAsRegister(node),
g.UseRegister(m.left().node()));
} else {
- VisitBinop(selector, node, xor_opcode, kLogical32Imm, true);
+ VisitBinop(selector, node, xor_opcode, kLogical32Imm);
}
}
@@ -330,17 +332,12 @@
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop(this, node, kArm64Add32, kArithimeticImm, true);
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kArm64Add32);
+ VisitBinop(this, node, kArm64Add32, kArithimeticImm);
}
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop(this, node, kArm64Add, kArithimeticImm, true);
+ VisitBinop(this, node, kArm64Add, kArithimeticImm);
}
@@ -353,7 +350,7 @@
selector->Emit(neg_opcode, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
} else {
- VisitBinop(selector, node, sub_opcode, kArithimeticImm, false);
+ VisitBinop(selector, node, sub_opcode, kArithimeticImm);
}
}
@@ -361,11 +358,6 @@
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kArm64Sub32);
-}
void InstructionSelector::VisitInt64Sub(Node* node) {
@@ -487,6 +479,18 @@
g.UseFixedDouble(node->InputAt(0), d0),
g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
+}
// Shared routine for multiple compare operations.
=======================================
--- /branches/bleeding_edge/src/compiler/ia32/instruction-selector-ia32.cc
Fri Aug 1 12:18:20 2014 UTC
+++ /branches/bleeding_edge/src/compiler/ia32/instruction-selector-ia32.cc
Mon Aug 4 08:18:37 2014 UTC
@@ -161,27 +161,10 @@
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ InstructionCode opcode, FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
Int32BinopMatcher m(node);
- // TODO(turbofan): match complex addressing modes.
- // TODO(turbofan): if commutative, pick the non-live-in operand as the
left as
- // this might be the last use and therefore its register can be reused.
- if (g.CanBeImmediate(m.right().node())) {
- selector->Emit(opcode, g.DefineSameAsFirst(node),
g.Use(m.left().node()),
- g.UseImmediate(m.right().node()));
- } else {
- selector->Emit(opcode, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()),
g.Use(m.right().node()));
- }
-}
-
-
-static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
- InstructionCode opcode) {
- IA32OperandGenerator g(selector);
- Int32BinopMatcher m(node);
- InstructionOperand* inputs[2];
+ InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
@@ -197,19 +180,15 @@
inputs[input_count++] = g.Use(m.right().node());
}
- // Define outputs depending on the projections.
- Node* projections[2];
- node->CollectProjections(ARRAY_SIZE(projections), projections);
- if (projections[0]) {
- outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
}
- if (projections[1]) {
- opcode |= FlagsModeField::encode(kFlags_set);
- opcode |= FlagsConditionField::encode(kOverflow);
+
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (cont->IsSet()) {
// TODO(turbofan): Use byte register here.
- outputs[output_count++] =
- (projections[0] ? g.DefineAsRegister(projections[1])
- : g.DefineSameAsFirst(projections[1]));
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
@@ -217,7 +196,17 @@
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
}
@@ -285,11 +274,6 @@
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop(this, node, kIA32Add);
}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kIA32Add);
-}
void InstructionSelector::VisitInt32Sub(Node* node) {
@@ -301,11 +285,6 @@
VisitBinop(this, node, kIA32Sub);
}
}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kIA32Sub);
-}
void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -436,6 +415,18 @@
g.UseDoubleRegister(node->InputAt(0)),
g.UseDoubleRegister(node->InputAt(1)), 1, temps);
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kIA32Add, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kIA32Sub, cont);
+}
// Shared routine for multiple compare operations.
=======================================
--- /branches/bleeding_edge/src/compiler/instruction-selector-impl.h Fri
Aug 1 09:32:58 2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction-selector-impl.h Mon
Aug 4 08:18:37 2014 UTC
@@ -48,6 +48,7 @@
}
InstructionOperand* DefineAsConstant(Node* node) {
+ selector()->MarkAsDefined(node);
sequence()->AddConstant(node->id(), ToConstant(node));
return ConstantOperand::Create(node->id(), zone());
}
@@ -179,12 +180,16 @@
ASSERT_NOT_NULL(node);
ASSERT_NOT_NULL(operand);
operand->set_virtual_register(node->id());
+ selector()->MarkAsDefined(node);
return operand;
}
UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
- selector_->MarkAsUsed(node);
- return Define(node, operand);
+ ASSERT_NOT_NULL(node);
+ ASSERT_NOT_NULL(operand);
+ operand->set_virtual_register(node->id());
+ selector()->MarkAsUsed(node);
+ return operand;
}
UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location) {
@@ -215,6 +220,8 @@
// instruction and the branch or set it should be combined with.
class FlagsContinuation V8_FINAL {
public:
+ FlagsContinuation() : mode_(kFlags_none) {}
+
// Creates a new flags continuation from the given condition and
true/false
// blocks.
FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
@@ -236,7 +243,10 @@
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const { return mode_ == kFlags_branch; }
bool IsSet() const { return mode_ == kFlags_set; }
- FlagsCondition condition() const { return condition_; }
+ FlagsCondition condition() const {
+ ASSERT(!IsNone());
+ return condition_;
+ }
Node* result() const {
ASSERT(IsSet());
return result_;
@@ -250,9 +260,13 @@
return false_block_;
}
- void Negate() { condition_ = static_cast<FlagsCondition>(condition_ ^
1); }
+ void Negate() {
+ ASSERT(!IsNone());
+ condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
+ }
void Commute() {
+ ASSERT(!IsNone());
switch (condition_) {
case kEqual:
case kNotEqual:
@@ -312,8 +326,11 @@
// Encodes this flags continuation into the given opcode.
InstructionCode Encode(InstructionCode opcode) {
- return opcode | FlagsModeField::encode(mode_) |
- FlagsConditionField::encode(condition_);
+ opcode |= FlagsModeField::encode(mode_);
+ if (mode_ != kFlags_none) {
+ opcode |= FlagsConditionField::encode(condition_);
+ }
+ return opcode;
}
private:
=======================================
--- /branches/bleeding_edge/src/compiler/instruction-selector.cc Fri Aug 1
12:18:20 2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction-selector.cc Mon Aug 4
08:18:37 2014 UTC
@@ -20,6 +20,7 @@
source_positions_(source_positions),
current_block_(NULL),
instructions_(InstructionDeque::allocator_type(zone())),
+ defined_(graph()->NodeCount(), false,
BoolVector::allocator_type(zone())),
used_(graph()->NodeCount(), false,
BoolVector::allocator_type(zone())) {}
@@ -147,6 +148,24 @@
return node->OwnedBy(user) &&
schedule()->block(node) == schedule()->block(user);
}
+
+
+bool InstructionSelector::IsDefined(Node* node) const {
+ ASSERT_NOT_NULL(node);
+ NodeId id = node->id();
+ ASSERT(id >= 0);
+ ASSERT(id < static_cast<NodeId>(defined_.size()));
+ return defined_[id];
+}
+
+
+void InstructionSelector::MarkAsDefined(Node* node) {
+ ASSERT_NOT_NULL(node);
+ NodeId id = node->id();
+ ASSERT(id >= 0);
+ ASSERT(id < static_cast<NodeId>(defined_.size()));
+ defined_[id] = true;
+}
bool InstructionSelector::IsUsed(Node* node) const {
@@ -347,7 +366,8 @@
for (BasicBlock::reverse_iterator i = block->rbegin(); i !=
block->rend();
++i) {
Node* node = *i;
- if (!IsUsed(node)) continue;
+ // Skip nodes that are unused or already defined.
+ if (!IsUsed(node) || IsDefined(node)) continue;
// Generate code for this node "top down", but schedule the
code "bottom
// up".
size_t current_node_end = instructions_.size();
@@ -628,6 +648,26 @@
}
VisitWord64Compare(node, &cont);
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitInt32AddWithOverflow(node, &cont);
+ }
+ FlagsContinuation cont;
+ VisitInt32AddWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = node->FindProjection(1)) {
+ FlagsContinuation cont(kOverflow, ovf);
+ return VisitInt32SubWithOverflow(node, &cont);
+ }
+ FlagsContinuation cont;
+ VisitInt32SubWithOverflow(node, &cont);
+}
void InstructionSelector::VisitInt64LessThan(Node* node) {
@@ -748,8 +788,20 @@
void InstructionSelector::VisitProjection(Node* node) {
- for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
- MarkAsUsed(*i);
+ OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ switch (value->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ case IrOpcode::kInt32SubWithOverflow:
+ if (OpParameter<int32_t>(node) == 0) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+ } else {
+ ASSERT_EQ(1, OpParameter<int32_t>(node));
+ MarkAsUsed(value);
+ }
+ break;
+ default:
+ break;
}
}
@@ -849,6 +901,31 @@
case IrOpcode::kFloat64LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
return VisitFloat64Compare(value, &cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (OpParameter<int32_t>(value) == 1) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either NULL, which means there's no use of the
+ // actual value, or was already defined, which means it is
scheduled
+ // *AFTER* this branch).
+ Node* node = value->InputAt(0);
+ Node* result = node->FindProjection(0);
+ if (result == NULL || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitInt32AddWithOverflow(node, &cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont.OverwriteAndNegateIfEqual(kOverflow);
+ return VisitInt32SubWithOverflow(node, &cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
default:
break;
}
@@ -884,6 +961,7 @@
int deoptimization_id = sequence()->AddDeoptimizationEntry(descriptor);
Emit(kArchDeoptimize | MiscField::encode(deoptimization_id), NULL);
}
+
#if !V8_TURBOFAN_TARGET
@@ -891,6 +969,18 @@
void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
#undef DECLARE_UNIMPLEMENTED_SELECTOR
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation*
cont) {
@@ -913,7 +1003,7 @@
void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
BasicBlock* deoptimization) {}
-#endif
+#endif // !V8_TURBOFAN_TARGET
} // namespace compiler
} // namespace internal
=======================================
--- /branches/bleeding_edge/src/compiler/instruction-selector.h Fri Aug 1
09:32:58 2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction-selector.h Mon Aug 4
08:18:37 2014 UTC
@@ -71,6 +71,13 @@
// edge and the two are in the same basic block.
bool CanCover(Node* user, Node* node) const;
+ // Checks if {node} was already defined, and therefore code was already
+ // generated for it.
+ bool IsDefined(Node* node) const;
+
+ // Inform the instruction selection that {node} was just defined.
+ void MarkAsDefined(Node* node);
+
// Checks if {node} has any uses, and therefore code has to be generated
for
// it.
bool IsUsed(Node* node) const;
@@ -123,6 +130,9 @@
MACHINE_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
+ void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
+ void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
+
void VisitWord32Test(Node* node, FlagsContinuation* cont);
void VisitWord64Test(Node* node, FlagsContinuation* cont);
void VisitWord32Compare(Node* node, FlagsContinuation* cont);
@@ -160,6 +170,7 @@
SourcePositionTable* source_positions_;
BasicBlock* current_block_;
Instructions instructions_;
+ BoolVector defined_;
BoolVector used_;
};
=======================================
--- /branches/bleeding_edge/src/compiler/machine-node-factory.h Fri Aug 1
12:18:20 2014 UTC
+++ /branches/bleeding_edge/src/compiler/machine-node-factory.h Mon Aug 4
08:18:37 2014 UTC
@@ -199,20 +199,14 @@
Node* Int32Add(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Add(), a, b);
}
- void Int32AddWithOverflow(Node* a, Node* b, Node** val_return,
- Node** ovf_return) {
- Node* add = NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
- if (val_return) *val_return = Projection(0, add);
- if (ovf_return) *ovf_return = Projection(1, add);
+ Node* Int32AddWithOverflow(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32AddWithOverflow(), a, b);
}
Node* Int32Sub(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Sub(), a, b);
}
- void Int32SubWithOverflow(Node* a, Node* b, Node** val_return,
- Node** ovf_return) {
- Node* add = NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
- if (val_return) *val_return = Projection(0, add);
- if (ovf_return) *ovf_return = Projection(1, add);
+ Node* Int32SubWithOverflow(Node* a, Node* b) {
+ return NEW_NODE_2(MACHINE()->Int32SubWithOverflow(), a, b);
}
Node* Int32Mul(Node* a, Node* b) {
return NEW_NODE_2(MACHINE()->Int32Mul(), a, b);
=======================================
--- /branches/bleeding_edge/src/compiler/node.cc Fri Aug 1 09:32:58 2014
UTC
+++ /branches/bleeding_edge/src/compiler/node.cc Mon Aug 4 08:18:37 2014
UTC
@@ -21,6 +21,17 @@
projections[index] = *i;
}
}
+
+
+Node* Node::FindProjection(int32_t projection_index) {
+ for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+ if ((*i)->opcode() == IrOpcode::kProjection &&
+ OpParameter<int32_t>(*i) == projection_index) {
+ return *i;
+ }
+ }
+ return NULL;
+}
OStream& operator<<(OStream& os, const Operator& op) { return
op.PrintTo(os); }
=======================================
--- /branches/bleeding_edge/src/compiler/node.h Fri Aug 1 09:32:58 2014 UTC
+++ /branches/bleeding_edge/src/compiler/node.h Mon Aug 4 08:18:37 2014 UTC
@@ -55,6 +55,7 @@
void Initialize(Operator* op) { set_op(op); }
void CollectProjections(int projection_count, Node** projections);
+ Node* FindProjection(int32_t projection_index);
};
OStream& operator<<(OStream& os, const Node& n);
=======================================
--- /branches/bleeding_edge/src/compiler/x64/instruction-selector-x64.cc
Fri Aug 1 12:18:20 2014 UTC
+++ /branches/bleeding_edge/src/compiler/x64/instruction-selector-x64.cc
Mon Aug 4 08:18:37 2014 UTC
@@ -176,31 +176,10 @@
// Shared routine for multiple binary operations.
static void VisitBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, bool commutative) {
- X64OperandGenerator g(selector);
- Node* left = node->InputAt(0);
- Node* right = node->InputAt(1);
- // TODO(turbofan): match complex addressing modes.
- // TODO(turbofan): if commutative, pick the non-live-in operand as the
left as
- // this might be the last use and therefore its register can be reused.
- if (g.CanBeImmediate(right)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
- g.UseImmediate(right));
- } else if (commutative && g.CanBeImmediate(left)) {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
- g.UseImmediate(left));
- } else {
- selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
- g.Use(right));
- }
-}
-
-
-static void VisitBinopWithOverflow(InstructionSelector* selector, Node*
node,
- InstructionCode opcode) {
+ InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
Int32BinopMatcher m(node);
- InstructionOperand* inputs[2];
+ InstructionOperand* inputs[4];
size_t input_count = 0;
InstructionOperand* outputs[2];
size_t output_count = 0;
@@ -216,18 +195,14 @@
inputs[input_count++] = g.Use(m.right().node());
}
- // Define outputs depending on the projections.
- Node* projections[2];
- node->CollectProjections(ARRAY_SIZE(projections), projections);
- if (projections[0]) {
- outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
}
- if (projections[1]) {
- opcode |= FlagsModeField::encode(kFlags_set);
- opcode |= FlagsConditionField::encode(kOverflow);
- outputs[output_count++] =
- (projections[0] ? g.DefineAsRegister(projections[1])
- : g.DefineSameAsFirst(projections[1]));
+
+ outputs[output_count++] = g.DefineSameAsFirst(node);
+ if (cont->IsSet()) {
+ outputs[output_count++] = g.DefineAsRegister(cont->result());
}
ASSERT_NE(0, input_count);
@@ -235,27 +210,37 @@
ASSERT_GE(ARRAY_SIZE(inputs), input_count);
ASSERT_GE(ARRAY_SIZE(outputs), output_count);
- selector->Emit(opcode, output_count, outputs, input_count, inputs);
+ Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+ outputs, input_count, inputs);
+ if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, &cont);
}
void InstructionSelector::VisitWord32And(Node* node) {
- VisitBinop(this, node, kX64And32, true);
+ VisitBinop(this, node, kX64And32);
}
void InstructionSelector::VisitWord64And(Node* node) {
- VisitBinop(this, node, kX64And, true);
+ VisitBinop(this, node, kX64And);
}
void InstructionSelector::VisitWord32Or(Node* node) {
- VisitBinop(this, node, kX64Or32, true);
+ VisitBinop(this, node, kX64Or32);
}
void InstructionSelector::VisitWord64Or(Node* node) {
- VisitBinop(this, node, kX64Or, true);
+ VisitBinop(this, node, kX64Or);
}
@@ -268,7 +253,7 @@
selector->Emit(not_opcode, g.DefineSameAsFirst(node),
g.Use(m.left().node()));
} else {
- VisitBinop(selector, node, xor_opcode, true);
+ VisitBinop(selector, node, xor_opcode);
}
}
@@ -366,17 +351,12 @@
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop(this, node, kX64Add32, true);
-}
-
-
-void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kX64Add32);
+ VisitBinop(this, node, kX64Add32);
}
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop(this, node, kX64Add, true);
+ VisitBinop(this, node, kX64Add);
}
@@ -389,7 +369,7 @@
selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
g.Use(m.right().node()));
} else {
- VisitBinop(selector, node, sub_opcode, false);
+ VisitBinop(selector, node, sub_opcode);
}
}
@@ -397,11 +377,6 @@
void InstructionSelector::VisitInt32Sub(Node* node) {
VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
}
-
-
-void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
- VisitBinopWithOverflow(this, node, kX64Sub32);
-}
void InstructionSelector::VisitInt64Sub(Node* node) {
@@ -582,6 +557,18 @@
Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kX64Add32, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+ FlagsContinuation*
cont) {
+ VisitBinop(this, node, kX64Sub32, cont);
+}
// Shared routine for multiple compare operations.
=======================================
---
/branches/bleeding_edge/test/cctest/compiler/test-instruction-selector-arm.cc
Fri Aug 1 12:18:20 2014 UTC
+++
/branches/bleeding_edge/test/cctest/compiler/test-instruction-selector-arm.cc
Mon Aug 4 08:18:37 2014 UTC
@@ -260,7 +260,7 @@
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -272,7 +272,7 @@
CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -311,7 +311,7 @@
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -325,7 +325,7 @@
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -338,7 +338,7 @@
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -351,7 +351,7 @@
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(2, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -405,7 +405,7 @@
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -420,7 +420,7 @@
CHECK_EQ(kFlags_set, m.code[0]->flags_mode());
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -433,7 +433,7 @@
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -447,7 +447,7 @@
CHECK_EQ(shift.r_mode, m.code[0]->addressing_mode());
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -505,7 +505,7 @@
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -521,7 +521,7 @@
CHECK_EQ(kOverflow, m.code[0]->flags_condition());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -535,7 +535,7 @@
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -550,7 +550,7 @@
CHECK_EQ(kFlags_none, m.code[0]->flags_mode());
CHECK_EQ(3, m.code[0]->InputCount());
CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(2)));
- CHECK_EQ(1, m.code[0]->OutputCount());
+ CHECK_LE(1, m.code[0]->OutputCount());
}
{
InstructionSelectorTester m;
@@ -1753,3 +1753,148 @@
}
}
}
+
+
+TEST(InstructionSelectorBranchWithODPIP) {
+ ODPIs odpis;
+ for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
+ ODPI odpi = *i;
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Projection(1, node), &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ }
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
+ &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
+ }
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0), m.Parameter(1));
+ m.Branch(m.Word32Equal(m.Int32Constant(0), m.Projection(1, node)),
+ &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_R, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
+ }
+ }
+}
+
+
+TEST(InstructionSelectorBranchWithODPIImm) {
+ ODPIs odpis;
+ Immediates immediates;
+ for (ODPIs::const_iterator i = odpis.begin(); i != odpis.end(); ++i) {
+ ODPI odpi = *i;
+ for (Immediates::const_iterator j = immediates.begin();
+ j != immediates.end(); ++j) {
+ int32_t imm = *j;
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0),
m.Int32Constant(imm));
+ m.Branch(m.Projection(1, node), &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_LE(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ }
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Int32Constant(imm),
m.Parameter(0));
+ m.Branch(m.Projection(1, node), &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kOverflow, m.code[0]->flags_condition());
+ CHECK_LE(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ }
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Parameter(0),
m.Int32Constant(imm));
+ m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
+ &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
+ CHECK_LE(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ }
+ {
+ InstructionSelectorTester m;
+ MLabel blocka, blockb;
+ Node* node = m.NewNode(odpi.op, m.Int32Constant(imm),
m.Parameter(0));
+ m.Branch(m.Word32Equal(m.Projection(1, node), m.Int32Constant(0)),
+ &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(0));
+ m.Bind(&blockb);
+ m.Return(m.Projection(0, node));
+ m.SelectInstructions();
+ CHECK_EQ(1, m.code.size());
+ CHECK_EQ(odpi.reverse_arch_opcode, m.code[0]->arch_opcode());
+ CHECK_EQ(kMode_Operand2_I, m.code[0]->addressing_mode());
+ CHECK_EQ(kFlags_branch, m.code[0]->flags_mode());
+ CHECK_EQ(kNotOverflow, m.code[0]->flags_condition());
+ CHECK_LE(2, m.code[0]->InputCount());
+ CHECK_EQ(imm, m.ToInt32(m.code[0]->InputAt(1)));
+ }
+ }
+ }
+}
=======================================
--- /branches/bleeding_edge/test/cctest/compiler/test-run-machops.cc Fri
Aug 1 12:18:20 2014 UTC
+++ /branches/bleeding_edge/test/cctest/compiler/test-run-machops.cc Mon
Aug 4 08:18:37 2014 UTC
@@ -3903,8 +3903,9 @@
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- Node* val, *ovf;
- m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
@@ -3923,8 +3924,9 @@
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* val, *ovf;
- m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val,
&ovf);
+ Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i),
m.Parameter(0));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
@@ -3935,8 +3937,9 @@
}
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* val, *ovf;
- m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val,
&ovf);
+ Node* add = m.Int32AddWithOverflow(m.Parameter(0),
m.Int32Constant(*i));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
@@ -3947,9 +3950,10 @@
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
- Node* val, *ovf;
- m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j),
&val,
- &ovf);
+ Node* add =
+ m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
int expected_ovf = sadd_overflow(*i, *j, &expected_val);
@@ -3961,20 +3965,22 @@
TEST(RunInt32AddWithOverflowInBranchP) {
+ int constant = 911777;
MLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- Node* val, *ovf;
- m.Int32AddWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, add);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
- bt.AddReturn(m.Word32Not(val));
+ bt.AddReturn(m.Int32Constant(constant));
m.Bind(&blockb);
+ Node* val = m.Projection(0, add);
bt.AddReturn(val);
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected;
- if (sadd_overflow(*i, *j, &expected)) expected = ~expected;
+ if (sadd_overflow(*i, *j, &expected)) expected = constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
@@ -3985,8 +3991,9 @@
int32_t actual_val = -1;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- Node* val, *ovf;
- m.Int32SubWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ Node* add = m.Int32SubWithOverflow(bt.param0, bt.param1);
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
bt.AddReturn(ovf);
FOR_INT32_INPUTS(i) {
@@ -4005,8 +4012,9 @@
FOR_INT32_INPUTS(i) {
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* val, *ovf;
- m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0), &val,
&ovf);
+ Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i),
m.Parameter(0));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
@@ -4017,8 +4025,9 @@
}
{
RawMachineAssemblerTester<int32_t> m(kMachineWord32);
- Node* val, *ovf;
- m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i), &val,
&ovf);
+ Node* add = m.Int32SubWithOverflow(m.Parameter(0),
m.Int32Constant(*i));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
FOR_INT32_INPUTS(j) {
@@ -4029,9 +4038,10 @@
}
FOR_INT32_INPUTS(j) {
RawMachineAssemblerTester<int32_t> m;
- Node* val, *ovf;
- m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j),
&val,
- &ovf);
+ Node* add =
+ m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+ Node* val = m.Projection(0, add);
+ Node* ovf = m.Projection(1, add);
m.StoreToPointer(&actual_val, kMachineWord32, val);
m.Return(ovf);
int expected_ovf = ssub_overflow(*i, *j, &expected_val);
@@ -4043,20 +4053,22 @@
TEST(RunInt32SubWithOverflowInBranchP) {
+ int constant = 911999;
MLabel blocka, blockb;
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
- Node* val, *ovf;
- m.Int32SubWithOverflow(bt.param0, bt.param1, &val, &ovf);
+ Node* sub = m.Int32SubWithOverflow(bt.param0, bt.param1);
+ Node* ovf = m.Projection(1, sub);
m.Branch(ovf, &blocka, &blockb);
m.Bind(&blocka);
- bt.AddReturn(m.Word32Not(val));
+ bt.AddReturn(m.Int32Constant(constant));
m.Bind(&blockb);
+ Node* val = m.Projection(0, sub);
bt.AddReturn(val);
FOR_UINT32_INPUTS(i) {
FOR_UINT32_INPUTS(j) {
int32_t expected;
- if (ssub_overflow(*i, *j, &expected)) expected = ~expected;
+ if (ssub_overflow(*i, *j, &expected)) expected = constant;
CHECK_EQ(expected, bt.call(*i, *j));
}
}
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.