Revision: 21161
Author:   m.m.capew...@googlemail.com
Date:     Tue May  6 11:00:28 2014 UTC
Log: ARM64: Use the shifter operand to merge in previous shift instructions.

When possible, we transform sequences of code of the form
    lsl x8, x9, #imm
    add x0, x1, x8
into
    add x0, x1, x9 LSL #imm

R=u...@chromium.org

Review URL: https://codereview.chromium.org/257203002
http://code.google.com/p/v8/source/detail?r=21161

Modified:
 /branches/bleeding_edge/src/arm64/assembler-arm64.h
 /branches/bleeding_edge/src/arm64/constants-arm64.h
 /branches/bleeding_edge/src/arm64/lithium-arm64.cc
 /branches/bleeding_edge/src/arm64/lithium-arm64.h
 /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc
 /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h
 /branches/bleeding_edge/src/arm64/simulator-arm64.cc
 /branches/bleeding_edge/src/hydrogen-instructions.h

=======================================
--- /branches/bleeding_edge/src/arm64/assembler-arm64.h Wed Apr 30 09:50:58 2014 UTC +++ /branches/bleeding_edge/src/arm64/assembler-arm64.h Tue May 6 11:00:28 2014 UTC
@@ -1748,6 +1748,13 @@
   inline static Instr ImmCondCmp(unsigned imm);
   inline static Instr Nzcv(StatusFlags nzcv);

+  static bool IsImmAddSub(int64_t immediate);
+  static bool IsImmLogical(uint64_t value,
+                           unsigned width,
+                           unsigned* n,
+                           unsigned* imm_s,
+                           unsigned* imm_r);
+
   // MemOperand offset encoding.
   inline static Instr ImmLSUnsigned(int imm12);
   inline static Instr ImmLS(int imm9);
@@ -1861,11 +1868,6 @@
                         unsigned imm_s,
                         unsigned imm_r,
                         LogicalOp op);
-  static bool IsImmLogical(uint64_t value,
-                           unsigned width,
-                           unsigned* n,
-                           unsigned* imm_s,
-                           unsigned* imm_r);

   void ConditionalCompare(const Register& rn,
                           const Operand& operand,
@@ -1896,7 +1898,6 @@
               const Operand& operand,
               FlagsUpdate S,
               AddSubOp op);
-  static bool IsImmAddSub(int64_t immediate);

   static bool IsImmFP32(float imm);
   static bool IsImmFP64(double imm);
=======================================
--- /branches/bleeding_edge/src/arm64/constants-arm64.h Wed Apr 30 13:38:00 2014 UTC +++ /branches/bleeding_edge/src/arm64/constants-arm64.h Tue May 6 11:00:28 2014 UTC
@@ -89,6 +89,8 @@
 const unsigned kJSSPCode = 28;
 const unsigned kSPRegInternalCode = 63;
 const unsigned kRegCodeMask = 0x1f;
+const unsigned kShiftAmountWRegMask = 0x1f;
+const unsigned kShiftAmountXRegMask = 0x3f;
 // Standard machine types defined by AAPCS64.
 const unsigned kByteSize = 8;
 const unsigned kByteSizeInBytes = kByteSize >> 3;
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-arm64.cc Mon May 5 11:03:14 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-arm64.cc Tue May 6 11:00:28 2014 UTC
@@ -826,6 +826,12 @@
   if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }
+
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right =
         UseRegisterOrConstantAtStart(instr->BetterRightOperand());
@@ -905,6 +911,11 @@
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
     ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }

     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right =
@@ -2025,6 +2036,117 @@
       new(zone()) LSeqStringSetChar(context, string, index, value, temp);
   return DefineAsRegister(result);
 }
+
+
+HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val, + HValue** left) {
+  if (!val->representation().IsInteger32()) return NULL;
+  if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
+
+  HBinaryOperation* hinstr = HBinaryOperation::cast(val);
+  HValue* hleft = hinstr->left();
+  HValue* hright = hinstr->right();
+  ASSERT(hleft->representation().Equals(hinstr->representation()));
+  ASSERT(hright->representation().Equals(hinstr->representation()));
+
+  if ((hright->IsConstant() &&
+ LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
+      (hinstr->IsCommutative() && hleft->IsConstant() &&
+ LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
+    // The constant operand will likely fit in the immediate field. We are
+    // better off with
+    //     lsl x8, x9, #imm
+    //     add x0, x8, #imm2
+    // than with
+    //     mov x16, #imm2
+    //     add x0, x16, x9 LSL #imm
+    return NULL;
+  }
+
+  HBitwiseBinaryOperation* shift = NULL;
+ // TODO(aleram): We will miss situations where a shift operation is used by
+  // different instructions both as a left and right operands.
+  if (hright->IsBitwiseBinaryShift() &&
+      HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
+    shift = HBitwiseBinaryOperation::cast(hright);
+    if (left != NULL) {
+      *left = hleft;
+    }
+  } else if (hinstr->IsCommutative() &&
+             hleft->IsBitwiseBinaryShift() &&
+             HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
+    shift = HBitwiseBinaryOperation::cast(hleft);
+    if (left != NULL) {
+      *left = hright;
+    }
+  } else {
+    return NULL;
+  }
+
+ if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
+    // Shifts right by zero can deoptimize.
+    return NULL;
+  }
+
+  return shift;
+}
+
+
+bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
+  if (!shift->representation().IsInteger32()) {
+    return false;
+  }
+  for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
+    if (shift != CanTransformToShiftedOp(it.value())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
+    HBinaryOperation* instr) {
+  HValue* left;
+  HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
+
+  if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
+    return DoShiftedBinaryOp(instr, left, shift);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShiftedBinaryOp(
+ HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
+  ASSERT(hshift->IsBitwiseBinaryShift());
+ ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right())
0));
+
+  LTemplateResultInstruction<1>* res;
+  LOperand* left = UseRegisterAtStart(hleft);
+  LOperand* right = UseRegisterAtStart(hshift->left());
+  LOperand* shift_amount = UseConstant(hshift->right());
+  Shift shift_op;
+  switch (hshift->opcode()) {
+    case HValue::kShl: shift_op = LSL; break;
+    case HValue::kShr: shift_op = LSR; break;
+    case HValue::kSar: shift_op = ASR; break;
+    default: UNREACHABLE(); shift_op = NO_SHIFT;
+  }
+
+  if (hinstr->IsBitwise()) {
+    res = new(zone()) LBitI(left, right, shift_op, shift_amount);
+  } else if (hinstr->IsAdd()) {
+    res = new(zone()) LAddI(left, right, shift_op, shift_amount);
+  } else {
+    ASSERT(hinstr->IsSub());
+    res = new(zone()) LSubI(left, right, shift_op, shift_amount);
+  }
+  if (hinstr->CheckFlag(HValue::kCanOverflow)) {
+    AssignEnvironment(res);
+  }
+  return DefineAsRegister(res);
+}


 LInstruction* LChunkBuilder::DoShift(Token::Value op,
@@ -2038,6 +2160,10 @@
   ASSERT(instr->left()->representation().Equals(instr->representation()));
   ASSERT(instr->right()->representation().Equals(instr->representation()));

+  if (ShiftCanBeOptimizedAway(instr)) {
+    return NULL;
+  }
+
   LOperand* left = instr->representation().IsSmi()
       ? UseRegister(instr->left())
       : UseRegisterAtStart(instr->left());
@@ -2048,8 +2174,7 @@
   int constant_value = 0;
   if (right_value->IsConstant()) {
     right = UseConstant(right_value);
-    HConstant* constant = HConstant::cast(right_value);
-    constant_value = constant->Integer32Value() & 0x1f;
+    constant_value = JSShiftAmountFromHConstant(right_value);
   } else {
     right = UseRegisterAtStart(right_value);
     if (op == Token::ROR) {
@@ -2311,6 +2436,12 @@
   if (instr->representation().IsSmiOrInteger32()) {
ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation()));
+
+ LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }
+
     LOperand *left;
     if (instr->left()->IsConstant() &&
         (HConstant::cast(instr->left())->Integer32Value() == 0)) {
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-arm64.h Tue May 6 08:05:27 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-arm64.h Tue May 6 11:00:28 2014 UTC
@@ -565,16 +565,30 @@

 class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LAddI(LOperand* left, LOperand* right) {
+  LAddI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
     inputs_[0] = left;
     inputs_[1] = right;
   }
+
+ LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }

   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+
+  Shift shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }

   DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
   DECLARE_HYDROGEN_ACCESSOR(Add)
+
+ protected:
+  Shift shift_;
+  LOperand* shift_amount_;
 };


@@ -734,18 +748,32 @@

 class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(LOperand* left, LOperand* right) {
+  LBitI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+ LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
     inputs_[0] = left;
     inputs_[1] = right;
   }

   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+
+  Shift shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }

   Token::Value op() const { return hydrogen()->op(); }

   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
   DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ protected:
+  Shift shift_;
+  LOperand* shift_amount_;
 };


@@ -2730,16 +2758,30 @@

 class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LSubI(LOperand* left, LOperand* right) {
+  LSubI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+ LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
     inputs_[0] = left;
     inputs_[1] = right;
   }

   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+
+  Shift shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }

   DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
   DECLARE_HYDROGEN_ACCESSOR(Sub)
+
+ protected:
+  Shift shift_;
+  LOperand* shift_amount_;
 };


@@ -3080,6 +3122,39 @@
   void VisitInstruction(HInstruction* current);
   void DoBasicBlock(HBasicBlock* block);

+  int JSShiftAmountFromHConstant(HValue* constant) {
+    return HConstant::cast(constant)->Integer32Value() & 0x1f;
+  }
+  bool LikelyFitsImmField(HInstruction* instr, int imm) {
+    if (instr->IsAdd() || instr->IsSub()) {
+      return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
+    } else {
+      ASSERT(instr->IsBitwise());
+      unsigned unused_n, unused_imm_s, unused_imm_r;
+      return Assembler::IsImmLogical(imm, kWRegSizeInBits,
+ &unused_n, &unused_imm_s, &unused_imm_r);
+    }
+  }
+
+  // Indicates if a sequence of the form
+  //   lsl x8, x9, #imm
+  //   add x0, x1, x8
+  // can be replaced with:
+  //   add x0, x1, x9 LSL #imm
+ // If this is not possible, the function returns NULL. Otherwise it returns a
+  // pointer to the shift instruction that would be optimized away.
+  HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
+                                                   HValue** left = NULL);
+  // Checks if all uses of the shift operation can optimize it away.
+  bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
+  // Attempts to merge the binary operation and an eventual previous shift
+  // operation into a single operation. Returns the merged instruction on
+  // success, and NULL otherwise.
+  LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
+  LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
+                                  HValue* left,
+                                  HBitwiseBinaryOperation* shift);
+
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Tue May 6 08:05:27 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.cc Tue May 6 11:00:28 2014 UTC
@@ -1274,6 +1274,21 @@
   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
+
+
+template<class LI>
+Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
+                                          IntegerSignedness signedness) {
+  if (shift_info->shift() == NO_SHIFT) {
+    return (signedness == SIGNED_INT32) ? ToOperand32I(right)
+                                        : ToOperand32U(right);
+  } else {
+    return Operand(
+        ToRegister32(right),
+        shift_info->shift(),
+        JSShiftAmountFromLConstant(shift_info->shift_amount()));
+  }
+}


 bool LCodeGen::IsSmi(LConstantOperand* op) const {
@@ -1472,7 +1487,8 @@
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   Register result = ToRegister32(instr->result());
   Register left = ToRegister32(instr->left());
-  Operand right = ToOperand32I(instr->right());
+  Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+
   if (can_overflow) {
     __ Adds(result, left, right);
     DeoptimizeIf(vs, instr->environment());
@@ -1750,7 +1766,7 @@
 void LCodeGen::DoBitI(LBitI* instr) {
   Register result = ToRegister32(instr->result());
   Register left = ToRegister32(instr->left());
-  Operand right = ToOperand32U(instr->right());
+  Operand right = ToShiftedRightOperand32U(instr->right(), instr);

   switch (instr->op()) {
     case Token::BIT_AND: __ And(result, left, right); break;
@@ -4827,7 +4843,7 @@
     }
   } else {
     ASSERT(right_op->IsConstantOperand());
-    int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+    int shift_count = JSShiftAmountFromLConstant(right_op);
     if (shift_count == 0) {
       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
         DeoptimizeIfNegative(left, instr->environment());
@@ -4890,7 +4906,7 @@
     }
   } else {
     ASSERT(right_op->IsConstantOperand());
-    int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f;
+    int shift_count = JSShiftAmountFromLConstant(right_op);
     if (shift_count == 0) {
       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
         DeoptimizeIfNegative(left, instr->environment());
@@ -5483,7 +5499,8 @@
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   Register result = ToRegister32(instr->result());
   Register left = ToRegister32(instr->left());
-  Operand right = ToOperand32I(instr->right());
+  Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+
   if (can_overflow) {
     __ Subs(result, left, right);
     DeoptimizeIf(vs, instr->environment());
=======================================
--- /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h Fri May 2 08:08:23 2014 UTC +++ /branches/bleeding_edge/src/arm64/lithium-codegen-arm64.h Tue May 6 11:00:28 2014 UTC
@@ -81,6 +81,7 @@
   // information on it.
   void FinishCode(Handle<Code> code);

+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
   // Support for converting LOperands to assembler types.
   // LOperand must be a register.
   Register ToRegister(LOperand* op) const;
@@ -92,6 +93,25 @@
   MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const;
   Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  template<class LI>
+  Operand ToShiftedRightOperand32I(LOperand* right,
+                                   LI* shift_info) {
+    return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
+  }
+  template<class LI>
+  Operand ToShiftedRightOperand32U(LOperand* right,
+                                   LI* shift_info) {
+    return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
+  }
+  template<class LI>
+  Operand ToShiftedRightOperand32(LOperand* right,
+                                  LI* shift_info,
+                                  IntegerSignedness signedness);
+
+  int JSShiftAmountFromLConstant(LOperand* constant) {
+    return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
+  }

   // TODO(jbramley): Examine these helpers and check that they make sense.
   // IsInteger32Constant returns true for smi constants, for example.
@@ -122,7 +142,6 @@
                                Label* exit,
                                Label* allocation_entry);

-  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
   void DoDeferredNumberTagU(LInstruction* instr,
                             LOperand* value,
                             LOperand* temp1,
=======================================
--- /branches/bleeding_edge/src/arm64/simulator-arm64.cc Wed Apr 30 12:25:18 2014 UTC +++ /branches/bleeding_edge/src/arm64/simulator-arm64.cc Tue May 6 11:00:28 2014 UTC
@@ -1979,7 +1979,8 @@
   if (shift_op != NO_SHIFT) {
     // Shift distance encoded in the least-significant five/six bits of the
     // register.
-    int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+    int mask = (instr->SixtyFourBits() == 1) ? kShiftAmountXRegMask
+                                             : kShiftAmountWRegMask;
     unsigned shift = wreg(instr->Rm()) & mask;
     result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
                           shift);
=======================================
--- /branches/bleeding_edge/src/hydrogen-instructions.h Tue May 6 07:05:07 2014 UTC +++ /branches/bleeding_edge/src/hydrogen-instructions.h Tue May 6 11:00:28 2014 UTC
@@ -655,6 +655,10 @@
     HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
   #undef DECLARE_PREDICATE

+  bool IsBitwiseBinaryShift() {
+    return IsShl() || IsShr() || IsSar();
+  }
+
   HValue(HType type = HType::Tagged())
       : block_(NULL),
         id_(kNoNumber),

--
--
v8-dev mailing list
v8-dev@googlegroups.com
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to v8-dev+unsubscr...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.

Reply via email to