Revision: 6344
Author: [email protected]
Date: Mon Jan 17 03:25:36 2011
Log: Change the algorithm and generated code for parallel moves on IA32.
Instead of spilling and then immediately restoring eax to resolve
memory to memory moves, the gap move resolver now tracks registers
that are known to be free and uses one if available. If not it spills
but restores lazily when the spilled value is needed or at the end of
the algorithm.
Instead of using esi for resolving cycles and assuming it is free to
overwrite because it can be rematerialized, the gap move resolver now
resolves cycles using swaps, possibly using a free register as above.
The algorithm is also changed to be simpler: a recursive depth-first
traversal of the move dependence graph. It uses a list of moves to be
performed (because it mutates the moves themselves), but does not use
any auxiliary structure other than the control stack. It does not
build up a separate list of scheduled moves to be interpreted by the
code generate, but emits code on the fly.
Review URL: http://codereview.chromium.org/6263005
http://code.google.com/p/v8/source/detail?r=6344
Added:
/branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.h
Modified:
/branches/bleeding_edge/src/SConscript
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-codegen-ia32.h
/branches/bleeding_edge/src/ia32/lithium-ia32.cc
/branches/bleeding_edge/src/lithium-allocator.cc
/branches/bleeding_edge/src/lithium-allocator.h
/branches/bleeding_edge/src/lithium.cc
/branches/bleeding_edge/src/lithium.h
/branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
/branches/bleeding_edge/tools/gyp/v8.gyp
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.cc Mon Jan
17 03:25:36 2011
@@ -0,0 +1,461 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "ia32/lithium-gap-resolver-ia32.h"
+#include "ia32/lithium-codegen-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32), spilled_register_(-1) {
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ source_uses_[i] = 0;
+ destination_uses_[i] = 0;
+ }
+}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(HasBeenReset());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ Finish();
+ ASSERT(HasBeenReset());
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) AddMove(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the
move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved on the side.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look
eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move
graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ RemoveMove(index);
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap
to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::AddMove(LMoveOperands move) {
+ LOperand* source = move.source();
+ if (source->IsRegister()) ++source_uses_[source->index()];
+
+ LOperand* destination = move.destination();
+ if (destination->IsRegister()) ++destination_uses_[destination->index()];
+
+ moves_.Add(move);
+}
+
+
+void LGapResolver::RemoveMove(int index) {
+ LOperand* source = moves_[index].source();
+ if (source->IsRegister()) {
+ --source_uses_[source->index()];
+ ASSERT(source_uses_[source->index()] >= 0);
+ }
+
+ LOperand* destination = moves_[index].destination();
+ if (destination->IsRegister()) {
+ --destination_uses_[destination->index()];
+ ASSERT(destination_uses_[destination->index()] >= 0);
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+int LGapResolver::CountSourceUses(LOperand* operand) {
+ int count = 0;
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
+ ++count;
+ }
+ }
+ return count;
+}
+
+
+Register LGapResolver::GetFreeRegisterNot(Register reg) {
+ int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i !=
skip_index) {
+ return Register::FromAllocationIndex(i);
+ }
+ }
+ return no_reg;
+}
+
+
+bool LGapResolver::HasBeenReset() {
+ if (!moves_.is_empty()) return false;
+ if (spilled_register_ >= 0) return false;
+
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (source_uses_[i] != 0) return false;
+ if (destination_uses_[i] != 0) return false;
+ }
+ return true;
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Finish() {
+ if (spilled_register_ >= 0) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::EnsureRestored(LOperand* operand) {
+ if (operand->IsRegister() && operand->index() == spilled_register_) {
+ __ pop(Register::FromAllocationIndex(spilled_register_));
+ spilled_register_ = -1;
+ }
+}
+
+
+Register LGapResolver::EnsureTempRegister() {
+ // 1. We may have already spilled to create a temp register.
+ if (spilled_register_ >= 0) {
+ return Register::FromAllocationIndex(spilled_register_);
+ }
+
+ // 2. We may have a free register that we can use without spilling.
+ Register free = GetFreeRegisterNot(no_reg);
+ if (!free.is(no_reg)) return free;
+
+ // 3. Prefer to spill a register that is not used in any remaining move
+ // because it will not need to be restored until the end.
+ for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
+ Register scratch = Register::FromAllocationIndex(i);
+ __ push(scratch);
+ spilled_register_ = i;
+ return scratch;
+ }
+ }
+
+ // 4. Use an arbitrary register. Register 0 is as arbitrary as any
other.
+ Register scratch = Register::FromAllocationIndex(0);
+ __ push(scratch);
+ spilled_register_ = 0;
+ return scratch;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Register src = cgen_->ToRegister(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(dst, src);
+
+ } else if (source->IsStackSlot()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ mov(dst, src);
+ } else {
+ // Spill on demand to use a temporary register for memory-to-memory
+ // moves.
+ Register tmp = EnsureTempRegister();
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(tmp, src);
+ __ mov(dst, tmp);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ ASSERT(destination->IsRegister() || destination->IsStackSlot());
+ Immediate src = cgen_->ToImmediate(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ mov(dst, src);
+
+ } else if (source->IsDoubleRegister()) {
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(dst, src);
+
+ } else if (source->IsDoubleStackSlot()) {
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movdbl(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(xmm0, src);
+ __ movdbl(dst, xmm0);
+ }
+
+ } else {
+ UNREACHABLE();
+ }
+
+ RemoveMove(index);
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+ EnsureRestored(source);
+ EnsureRestored(destination);
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Register-register.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Register-memory. Use a free register as a temp if possible. Do not
+ // spill on demand because the simple spill implementation cannot avoid
+ // spilling src at this point.
+ Register tmp = GetFreeRegisterNot(no_reg);
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ if (tmp.is(no_reg)) {
+ __ xor_(reg, mem);
+ __ xor_(mem, reg);
+ __ xor_(reg, mem);
+ } else {
+ __ mov(tmp, mem);
+ __ mov(mem, reg);
+ __ mov(reg, tmp);
+ }
+
+ } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+ // Memory-memory. Spill on demand to use a temporary. If there is a
+ // free register after that, use it as a second temporary.
+ Register tmp0 = EnsureTempRegister();
+ Register tmp1 = GetFreeRegisterNot(tmp0);
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ if (tmp1.is(no_reg)) {
+ // Only one temp register available to us.
+ __ mov(tmp0, dst);
+ __ xor_(tmp0, src);
+ __ xor_(src, tmp0);
+ __ xor_(tmp0, src);
+ __ mov(dst, tmp0);
+ } else {
+ __ mov(tmp0, dst);
+ __ mov(tmp1, src);
+ __ mov(dst, tmp1);
+ __ mov(src, tmp0);
+ }
+
+ } else if (source->IsDoubleRegister() ||
destination->IsDoubleRegister()) {
+ // XMM register-register or register-memory. We rely on having xmm0
+ // available as a fixed scratch register.
+ ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+ ? source
+ : destination);
+ Operand other =
+ cgen_->ToOperand(source->IsDoubleRegister() ? destination :
source);
+ __ movdbl(xmm0, other);
+ __ movdbl(other, reg);
+ __ movdbl(reg, Operand(xmm0));
+
+ } else if (source->IsDoubleStackSlot() &&
destination->IsDoubleStackSlot()) {
+ // Double-width memory-to-memory. Spill on demand to use a general
+ // purpose temporary register and also rely on having xmm0 available as
+ // a fixed scratch register.
+ Register tmp = EnsureTempRegister();
+ Operand src0 = cgen_->ToOperand(source);
+ Operand src1 = cgen_->HighOperand(source);
+ Operand dst0 = cgen_->ToOperand(destination);
+ Operand dst1 = cgen_->HighOperand(destination);
+ __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ mov(tmp, src0); // Then use tmp to copy source to destination.
+ __ mov(dst0, tmp);
+ __ mov(tmp, src1);
+ __ mov(dst1, tmp);
+ __ movdbl(src0, xmm0);
+
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ RemoveMove(index);
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+
+ // In addition to swapping the actual uses as sources, we need to update
+ // the use counts.
+ if (source->IsRegister() && destination->IsRegister()) {
+ int temp = source_uses_[source->index()];
+ source_uses_[source->index()] = source_uses_[destination->index()];
+ source_uses_[destination->index()] = temp;
+ } else if (source->IsRegister()) {
+ // We don't have use counts for non-register operands like destination.
+ // Compute those counts now.
+ source_uses_[source->index()] = CountSourceUses(source);
+ } else if (destination->IsRegister()) {
+ source_uses_[destination->index()] = CountSourceUses(destination);
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
=======================================
--- /dev/null
+++ /branches/bleeding_edge/src/ia32/lithium-gap-resolver-ia32.h Mon Jan 17
03:25:36 2011
@@ -0,0 +1,110 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#define V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+
+#include "v8.h"
+
+#include "lithium-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit any code necessary at the end of a gap move.
+ void Finish();
+
+ // Add or delete a move from the move graph without emitting any code.
+ // Used to build up the graph and remove trivial moves.
+ void AddMove(LMoveOperands move);
+ void RemoveMove(int index);
+
+ // Report the count of uses of operand as a source in a not-yet-performed
+ // move. Used to rebuild use counts.
+ int CountSourceUses(LOperand* operand);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Ensure that the given operand is not spilled.
+ void EnsureRestored(LOperand* operand);
+
+ // Return a register that can be used as a temp register, spilling
+ // something if necessary.
+ Register EnsureTempRegister();
+
+ // Return a known free register different from the given one (which could
+ // be no_reg---returning any free register), or no_reg if there is no
such
+ // register.
+ Register GetFreeRegisterNot(Register reg);
+
+ // Verify that the state is the initial one, ready to resolve a single
+ // parallel move.
+ bool HasBeenReset();
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+
+ // Source and destination use counts for the general purpose registers.
+ int source_uses_[Register::kNumAllocatableRegisters];
+ int destination_uses_[Register::kNumAllocatableRegisters];
+
+ // If we had to spill on demand, the currently spilled register's
+ // allocation index.
+ int spilled_register_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
=======================================
--- /branches/bleeding_edge/src/SConscript Tue Jan 11 04:24:18 2011
+++ /branches/bleeding_edge/src/SConscript Mon Jan 17 03:25:36 2011
@@ -190,6 +190,7 @@
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
+ ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Mon Jan 17
00:11:03 2011
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Mon Jan 17
03:25:36 2011
@@ -172,13 +172,13 @@
void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.from()->IsConstantOperand()) {
+ if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add
them
// first to the result set.
- AddResultMove(move.from(), move.to());
+ AddResultMove(move.source(), move.destination());
} else {
- LGapNode* from = LookupNode(move.from());
- LGapNode* to = LookupNode(move.to());
+ LGapNode* from = LookupNode(move.source());
+ LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
@@ -816,8 +816,8 @@
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
- LOperand* from = move.from();
- LOperand* to = move.to();
+ LOperand* from = move.source();
+ LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(dbl_scratch));
ASSERT(!to->IsDoubleRegister() |
| !ToDoubleRegister(to).is(dbl_scratch));
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon Jan 17
00:11:03 2011
+++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Mon Jan 17
03:25:36 2011
@@ -58,157 +58,6 @@
};
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand)
{
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned())
{
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.from()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add
them
- // first to the result set.
- AddResultMove(move.from(), move.to());
- } else {
- LGapNode* from = LookupNode(move.from());
- LGapNode* to = LookupNode(move.to());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -425,6 +274,14 @@
return Operand(ebp, -(index - 1) * kPointerSize);
}
}
+
+
+Operand LCodeGen::HighOperand(LOperand* op) {
+ ASSERT(op->IsDoubleStackSlot());
+ int index = op->index();
+ int offset = (index >= 0) ? index + 3 : index - 1;
+ return Operand(ebp, -offset * kPointerSize);
+}
void LCodeGen::WriteTranslation(LEnvironment* environment,
@@ -762,66 +619,7 @@
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // xmm0 must always be a scratch register.
- XMMRegister xmm_scratch = xmm0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register cpu_scratch = esi;
- bool destroys_cpu_scratch = false;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.from();
- LOperand* to = move.to();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(xmm_scratch));
- ASSERT(!to->IsDoubleRegister() |
| !ToDoubleRegister(to).is(xmm_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
- if (from->IsConstantOperand()) {
- __ mov(ToOperand(to), ToImmediate(from));
- } else if (from == &marker_operand) {
- if (to->IsRegister() || to->IsStackSlot()) {
- __ mov(ToOperand(to), cpu_scratch);
- ASSERT(destroys_cpu_scratch);
- } else {
- ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
- __ movdbl(ToOperand(to), xmm_scratch);
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister() || from->IsStackSlot()) {
- __ mov(cpu_scratch, ToOperand(from));
- destroys_cpu_scratch = true;
- } else {
- ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
- __ movdbl(xmm_scratch, ToOperand(from));
- }
- } else if (from->IsRegister()) {
- __ mov(ToOperand(to), ToRegister(from));
- } else if (to->IsRegister()) {
- __ mov(ToRegister(to), ToOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ push(eax);
- __ mov(eax, ToOperand(from));
- __ mov(ToOperand(to), eax);
- __ pop(eax);
- } else if (from->IsDoubleRegister()) {
- __ movdbl(ToOperand(to), ToDoubleRegister(from));
- } else if (to->IsDoubleRegister()) {
- __ movdbl(ToDoubleRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- __ movdbl(xmm_scratch, ToOperand(from));
- __ movdbl(ToOperand(to), xmm_scratch);
- }
- }
-
- if (destroys_cpu_scratch) {
- __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
- }
+ resolver_.Resolve(move);
}
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.h Mon Jan 17
00:11:03 2011
+++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.h Mon Jan 17
03:25:36 2011
@@ -34,6 +34,7 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "ia32/lithium-gap-resolver-ia32.h"
namespace v8 {
namespace internal {
@@ -43,28 +44,6 @@
class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>*
moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -80,9 +59,23 @@
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ Operand ToOperand(LOperand* op) const;
+ Register ToRegister(LOperand* op) const;
+ XMMRegister ToDoubleRegister(LOperand* op) const;
+ Immediate ToImmediate(LOperand* op);
+
+ // The operand denoting the second word (the one with a higher address)
of
+ // a double stack slot.
+ Operand HighOperand(LOperand* op);
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
@@ -129,7 +122,6 @@
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -191,11 +183,7 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
int ToInteger32(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
- Immediate ToImmediate(LOperand* op);
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-ia32.cc Mon Jan 17 00:11:03
2011
+++ /branches/bleeding_edge/src/ia32/lithium-ia32.cc Mon Jan 17 03:25:36
2011
@@ -316,7 +316,7 @@
}
-LOperand* LChunk::GetNextSpillSlot(bool is_double) {
+LOperand* LChunk::GetNextSpillSlot(bool is_double) {
int index = GetNextSpillIndex(is_double);
if (is_double) {
return LDoubleStackSlot::Create(index);
=======================================
--- /branches/bleeding_edge/src/lithium-allocator.cc Mon Jan 17 02:08:58
2011
+++ /branches/bleeding_edge/src/lithium-allocator.cc Mon Jan 17 03:25:36
2011
@@ -745,10 +745,10 @@
const ZoneList<LMoveOperands>* move_operands = move->move_operands();
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands cur = move_operands->at(i);
- LOperand* cur_to = cur.to();
+ LOperand* cur_to = cur.destination();
if (cur_to->IsUnallocated()) {
if (cur_to->VirtualRegister() == from->VirtualRegister()) {
- move->AddMove(cur.from(), to);
+ move->AddMove(cur.source(), to);
return;
}
}
@@ -896,8 +896,8 @@
for (int i = 0; i < move_operands->length(); ++i) {
LMoveOperands* cur = &move_operands->at(i);
if (cur->IsIgnored()) continue;
- LOperand* from = cur->from();
- LOperand* to = cur->to();
+ LOperand* from = cur->source();
+ LOperand* to = cur->destination();
HPhi* phi = LookupPhi(to);
LOperand* hint = to;
if (phi != NULL) {
@@ -1217,9 +1217,9 @@
LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
for (int j = 0; j < move->move_operands()->length(); ++j) {
- LOperand* to = move->move_operands()->at(j).to();
+ LOperand* to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
- hint = move->move_operands()->at(j).from();
+ hint = move->move_operands()->at(j).source();
phi_operand = to;
break;
}
=======================================
--- /branches/bleeding_edge/src/lithium-allocator.h Fri Jan 14 05:16:48 2011
+++ /branches/bleeding_edge/src/lithium-allocator.h Mon Jan 17 03:25:36 2011
@@ -321,27 +321,49 @@
class LMoveOperands BASE_EMBEDDED {
public:
- LMoveOperands(LOperand* from, LOperand* to) : from_(from), to_(to) { }
-
- LOperand* from() const { return from_; }
- LOperand* to() const { return to_; }
+ LMoveOperands(LOperand* source, LOperand* destination)
+ : source_(source), destination_(destination) {
+ }
+
+ LOperand* source() const { return source_; }
+ void set_source(LOperand* operand) { source_ = operand; }
+
+ LOperand* destination() const { return destination_; }
+ void set_destination(LOperand* operand) { destination_ = operand; }
+
+ // The gap resolver marks moves as "in-progress" by clearing the
+ // destination (but not the source).
+ bool IsPending() const {
+ return destination_ == NULL && source_ != NULL;
+ }
+
+ // True if this move a move into the given destination operand.
+ bool Blocks(LOperand* operand) const {
+ return !IsEliminated() && source()->Equals(operand);
+ }
+
+ // A move is redundant if it's been eliminated, if its source and
+ // destination are the same, or if its destination is unneeded.
bool IsRedundant() const {
- return IsEliminated() || from_->Equals(to_) || IsIgnored();
- }
- bool IsEliminated() const { return from_ == NULL; }
+ return IsEliminated() || source_->Equals(destination_) || IsIgnored();
+ }
+
bool IsIgnored() const {
- if (to_ != NULL && to_->IsUnallocated() &&
- LUnallocated::cast(to_)->HasIgnorePolicy()) {
- return true;
- }
- return false;
+ return destination_ != NULL &&
+ destination_->IsUnallocated() &&
+ LUnallocated::cast(destination_)->HasIgnorePolicy();
}
- void Eliminate() { from_ = to_ = NULL; }
+ // We clear both operands to indicate move that's been eliminated.
+ void Eliminate() { source_ = destination_ = NULL; }
+ bool IsEliminated() const {
+ ASSERT(source_ != NULL || destination_ == NULL);
+ return source_ == NULL;
+ }
private:
- LOperand* from_;
- LOperand* to_;
+ LOperand* source_;
+ LOperand* destination_;
};
=======================================
--- /branches/bleeding_edge/src/lithium.cc Fri Jan 14 04:50:03 2011
+++ /branches/bleeding_edge/src/lithium.cc Mon Jan 17 03:25:36 2011
@@ -39,18 +39,21 @@
void LParallelMove::PrintDataTo(StringStream* stream) const {
- for (int i = move_operands_.length() - 1; i >= 0; --i) {
+ bool first = true;
+ for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsEliminated()) {
- LOperand* from = move_operands_[i].from();
- LOperand* to = move_operands_[i].to();
- if (from->Equals(to)) {
- to->PrintTo(stream);
+ LOperand* source = move_operands_[i].source();
+ LOperand* destination = move_operands_[i].destination();
+ if (!first) stream->Add(" ");
+ first = false;
+ if (source->Equals(destination)) {
+ destination->PrintTo(stream);
} else {
- to->PrintTo(stream);
+ destination->PrintTo(stream);
stream->Add(" = ");
- from->PrintTo(stream);
- }
- stream->Add("; ");
+ source->PrintTo(stream);
+ }
+ stream->Add(";");
}
}
}
=======================================
--- /branches/bleeding_edge/src/lithium.h Fri Jan 14 04:50:03 2011
+++ /branches/bleeding_edge/src/lithium.h Mon Jan 17 03:25:36 2011
@@ -35,9 +35,6 @@
namespace v8 {
namespace internal {
-class LCodeGen;
-class Translation;
-
class LParallelMove : public ZoneObject {
public:
LParallelMove() : move_operands_(4) { }
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Jan 17
00:11:03 2011
+++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Mon Jan 17
03:25:36 2011
@@ -155,13 +155,13 @@
void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.from()->IsConstantOperand()) {
+ if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add
them
// first to the result set.
- AddResultMove(move.from(), move.to());
+ AddResultMove(move.source(), move.destination());
} else {
- LGapNode* from = LookupNode(move.from());
- LGapNode* to = LookupNode(move.to());
+ LGapNode* from = LookupNode(move.source());
+ LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
@@ -651,8 +651,8 @@
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
- LOperand* from = move.from();
- LOperand* to = move.to();
+ LOperand* from = move.source();
+ LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(xmm_scratch));
ASSERT(!to->IsDoubleRegister() |
| !ToDoubleRegister(to).is(xmm_scratch));
=======================================
--- /branches/bleeding_edge/tools/gyp/v8.gyp Tue Jan 11 04:24:18 2011
+++ /branches/bleeding_edge/tools/gyp/v8.gyp Mon Jan 17 03:25:36 2011
@@ -581,10 +581,10 @@
'../../src/arm/full-codegen-arm.cc',
'../../src/arm/ic-arm.cc',
'../../src/arm/jump-target-arm.cc',
- '../../src/arm/lithium-codegen-arm.cc',
- '../../src/arm/lithium-codegen-arm.h',
'../../src/arm/lithium-arm.cc',
'../../src/arm/lithium-arm.h',
+ '../../src/arm/lithium-codegen-arm.cc',
+ '../../src/arm/lithium-codegen-arm.h',
'../../src/arm/macro-assembler-arm.cc',
'../../src/arm/macro-assembler-arm.h',
'../../src/arm/regexp-macro-assembler-arm.cc',
@@ -634,6 +634,8 @@
'../../src/ia32/jump-target-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.cc',
'../../src/ia32/lithium-codegen-ia32.h',
+ '../../src/ia32/lithium-gap-resolver-ia32.cc',
+ '../../src/ia32/lithium-gap-resolver-ia32.h',
'../../src/ia32/lithium-ia32.cc',
'../../src/ia32/lithium-ia32.h',
'../../src/ia32/macro-assembler-ia32.cc',
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev