Revision: 4507
Author: [email protected]
Date: Tue Apr 27 02:09:51 2010
Log: Avoid constant pool blocking for too long

The generation of the deferred code for named property load where the load was inlined did a constant pool blocking for the whole deferred code. Having large numbers of this type of deferred code generated one ofter the other effectively blocked the constant pool for all the deferred code causing

Removed the BeforeGenerate/AfterGenerate for the deferred code and made macro assembler StartBlockConstPool/EndBlockConstPool non-public. Re-introduced BlockConstPoolFor instead to use with BlockConstPoolScope to block some more instructions cross function calls.

Also handle the use of native code counters for inlined named property load.
Review URL: http://codereview.chromium.org/1787005
http://code.google.com/p/v8/source/detail?r=4507

Modified:
 /branches/bleeding_edge/src/arm/assembler-arm.cc
 /branches/bleeding_edge/src/arm/assembler-arm.h
 /branches/bleeding_edge/src/arm/codegen-arm.cc
 /branches/bleeding_edge/src/codegen.cc
 /branches/bleeding_edge/src/codegen.h

=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Fri Apr 23 00:42:45 2010 +++ /branches/bleeding_edge/src/arm/assembler-arm.cc Tue Apr 27 02:09:51 2010
@@ -1782,6 +1782,11 @@
   uint32_t dummy2;
   return fits_shifter(imm32, &dummy1, &dummy2, NULL);
 }
+
+
+void Assembler::BlockConstPoolFor(int instructions) {
+  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
+}


 // Debugging.
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.h     Fri Apr 23 00:42:45 2010
+++ /branches/bleeding_edge/src/arm/assembler-arm.h     Tue Apr 27 02:09:51 2010
@@ -941,6 +941,10 @@
     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
   };

+ // Postpone the generation of the constant pool for the specified number of
+  // instructions.
+  void BlockConstPoolFor(int instructions);
+
   // Debugging

   // Mark address of the ExitJSFrame code.
@@ -957,13 +961,6 @@
   int pc_offset() const { return pc_ - buffer_; }
   int current_position() const { return current_position_; }
int current_statement_position() const { return current_statement_position_; }
-
-  void StartBlockConstPool() {
-    const_pool_blocked_nesting_++;
-  }
-  void EndBlockConstPool() {
-    const_pool_blocked_nesting_--;
-  }

   // Read/patch instructions
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
@@ -1000,6 +997,13 @@
   void BlockConstPoolBefore(int pc_offset) {
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
   }
+
+  void StartBlockConstPool() {
+    const_pool_blocked_nesting_++;
+  }
+  void EndBlockConstPool() {
+    const_pool_blocked_nesting_--;
+  }

  private:
   // Code buffer:
=======================================
--- /branches/bleeding_edge/src/arm/codegen-arm.cc      Mon Apr 26 07:25:29 2010
+++ /branches/bleeding_edge/src/arm/codegen-arm.cc      Tue Apr 27 02:09:51 2010
@@ -351,17 +351,17 @@
       int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
       masm_->add(sp, sp, Operand(sp_delta));
       masm_->Jump(lr);
-    }

 #ifdef DEBUG
-    // Check that the size of the code used for returning matches what is
- // expected by the debugger. If the sp_delts above cannot be encoded in the
-    // add instruction the add will generate two instructions.
-    int return_sequence_length =
-        masm_->InstructionsGeneratedSince(&check_exit_codesize);
-    CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
- return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
+      // Check that the size of the code used for returning matches what is
+ // expected by the debugger. If the sp_delts above cannot be encoded in the
+      // add instruction the add will generate two instructions.
+      int return_sequence_length =
+          masm_->InstructionsGeneratedSince(&check_exit_codesize);
+      CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
+ return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
 #endif
+    }
   }

   // Adjust for function-level loop nesting.
@@ -5230,34 +5230,34 @@
     set_comment("[ DeferredReferenceGetNamedValue");
   }

-  virtual void BeforeGenerate();
   virtual void Generate();
-  virtual void AfterGenerate();

  private:
   Handle<String> name_;
 };


-void DeferredReferenceGetNamedValue::BeforeGenerate() {
-  __ StartBlockConstPool();
-}
-
-
 void DeferredReferenceGetNamedValue::Generate() {
+  __ DecrementCounter(&Counters::named_load_inline, 1, r1, r2);
   __ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
+
   // Setup the name register and call load IC.
   __ mov(r2, Operand(name_));
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // The call must be followed by a nop(1) instruction to indicate that the
-  // inobject has been inlined.
-  __ nop(NAMED_PROPERTY_LOAD_INLINED);
-}
-
-
-void DeferredReferenceGetNamedValue::AfterGenerate() {
-  __ EndBlockConstPool();
+
+  // The rest of the instructions in the deferred code must be together.
+  { Assembler::BlockConstPoolScope block_const_pool(masm_);
+
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+ // The call must be followed by a nop(1) instruction to indicate that the
+    // in-object has been inlined.
+    __ nop(NAMED_PROPERTY_LOAD_INLINED);
+
+    // Block the constant pool for one more instruction after leaving this
+ // constant pool block scope to include the branch instruction ending the
+    // deferred code.
+    __ BlockConstPoolFor(1);
+  }
 }


@@ -5276,6 +5276,11 @@
     DeferredReferenceGetNamedValue* deferred =
         new DeferredReferenceGetNamedValue(name);

+ // Counter will be decremented in the deferred code. Placed here to avoid
+    // having it in the instruction stream below where patching will occur.
+    __ IncrementCounter(&Counters::named_load_inline, 1,
+                        frame_->scratch0(), frame_->scratch1());
+
// The following instructions are the inlined load of an in-object property. // Parts of this code is patched, so the exact instructions generated needs // to be fixed. Therefore the instruction pool is blocked when generating
@@ -5303,13 +5308,12 @@
// Use initially use an invalid index. The index will be patched by the
       // inline cache code.
       __ ldr(r0, MemOperand(r1, 0));
+
+      // Make sure that the expected number of instructions are generated.
+      ASSERT_EQ(kInlinedNamedLoadInstructions,
+ masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }

-    // Make sure that the expected number of instructions are generated.
-    ASSERT_EQ(kInlinedNamedLoadInstructions,
-              masm_->InstructionsGeneratedSince(&check_inlined_codesize));
-
-    __ IncrementCounter(&Counters::named_load_inline, 1, r1, r2);
     deferred->BindExit();
   }
 }
=======================================
--- /branches/bleeding_edge/src/codegen.cc      Fri Apr 23 00:42:45 2010
+++ /branches/bleeding_edge/src/codegen.cc      Tue Apr 27 02:09:51 2010
@@ -77,13 +77,11 @@
     }
     // Generate the code.
     Comment cmnt(masm_, code->comment());
-    code->BeforeGenerate();
     masm_->bind(code->entry_label());
     code->SaveRegisters();
     code->Generate();
     code->RestoreRegisters();
     masm_->jmp(code->exit_label());
-    code->AfterGenerate();
   }
 }

=======================================
--- /branches/bleeding_edge/src/codegen.h       Fri Apr 23 00:42:45 2010
+++ /branches/bleeding_edge/src/codegen.h       Tue Apr 27 02:09:51 2010
@@ -211,9 +211,6 @@

   void SaveRegisters();
   void RestoreRegisters();
-
-  virtual void BeforeGenerate() { }
-  virtual void AfterGenerate() { }

  protected:
   MacroAssembler* masm_;

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to