Please use commit messages where you don't have to click a link to see what it's about.
On Tue, Jun 12, 2012 at 7:26 PM, <codesite-nore...@google.com> wrote: > Revision: 11784 > Author: svenpa...@chromium.org > Date: Tue Jun 12 10:26:28 2012 > Log: Martyn Capewell <m.m.capew...@googlemail.com> > > Review URL: https://chromiumcodereview.appspot.com/10451037 > http://code.google.com/p/v8/source/detail?r=11784 > > Modified: > /branches/bleeding_edge/src/arm/code-stubs-arm.cc > /branches/bleeding_edge/src/arm/full-codegen-arm.cc > /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc > /branches/bleeding_edge/test/cctest/test-disasm-arm.cc > > ======================================= > --- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Wed May 23 07:24:29 > 2012 > +++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Tue Jun 12 10:26:28 > 2012 > @@ -3737,9 +3737,13 @@ > // Compute the return address in lr to return to after the jump below. Pc > is > // already at '+ 8' from the current instruction but return is after three > // instructions so add another 4 to pc to get the return address. > - masm->add(lr, pc, Operand(4)); > - __ str(lr, MemOperand(sp, 0)); > - masm->Jump(r5); > + { > + // Prevent literal pool emission before return address. > + Assembler::BlockConstPoolScope block_const_pool(masm); > + masm->add(lr, pc, Operand(4)); > + __ str(lr, MemOperand(sp, 0)); > + masm->Jump(r5); > + } > > if (always_allocate) { > // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 > @@ -3956,14 +3960,21 @@ > // Jump to a faked try block that does the invoke, with a faked catch > // block that sets the pending exception. > __ jmp(&invoke); > - __ bind(&handler_entry); > - handler_offset_ = handler_entry.pos(); > - // Caught exception: Store result (exception) in the pending exception > - // field in the JSEnv and return a failure sentinel. Coming in here the > - // fp will be invalid because the PushTryHandler below sets it to 0 to > - // signal the existence of the JSEntry frame. > - __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, > - isolate))); > + > + // Block literal pool emission whilst taking the position of the handler > + // entry. This avoids making the assumption that literal pools are always > + // emitted after an instruction is emitted, rather than before. > + { > + Assembler::BlockConstPoolScope block_const_pool(masm); > + __ bind(&handler_entry); > + handler_offset_ = handler_entry.pos(); > + // Caught exception: Store result (exception) in the pending exception > + // field in the JSEnv and return a failure sentinel. Coming in here > the > + // fp will be invalid because the PushTryHandler below sets it to 0 to > + // signal the existence of the JSEntry frame. > + __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, > + isolate))); > + } > __ str(r0, MemOperand(ip)); > __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); > __ b(&exit); > @@ -4006,9 +4017,13 @@ > > // Branch and link to JSEntryTrampoline. We don't use the double > underscore > // macro for the add instruction because we don't want the coverage tool > - // inserting instructions here after we read the pc. > - __ mov(lr, Operand(pc)); > - masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); > + // inserting instructions here after we read the pc. We block literal > pool > + // emission for the same reason. > + { > + Assembler::BlockConstPoolScope block_const_pool(masm); > + __ mov(lr, Operand(pc)); > + masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); > + } > > // Unlink this frame from the handler chain. > __ PopTryHandler(); > @@ -6812,6 +6827,10 @@ > Register target) { > __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), > RelocInfo::CODE_TARGET)); > + > + // Prevent literal pool emission during calculation of return address. > + Assembler::BlockConstPoolScope block_const_pool(masm); > + > // Push return address (accessible to GC through exit frame pc). > // Note that using pc with str is deprecated. > Label start; > @@ -7172,8 +7191,13 @@ > // forth between a compare instructions (a nop in this position) and the > // real branch when we start and stop incremental heap marking. > // See RecordWriteStub::Patch for details. > - __ b(&skip_to_incremental_noncompacting); > - __ b(&skip_to_incremental_compacting); > + { > + // Block literal pool emission, as the position of these two > instructions > + // is assumed by the patching code. > + Assembler::BlockConstPoolScope block_const_pool(masm); > + __ b(&skip_to_incremental_noncompacting); > + __ b(&skip_to_incremental_compacting); > + } > > if (remembered_set_action_ == EMIT_REMEMBERED_SET) { > __ RememberedSetHelper(object_, > ======================================= > --- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Mon Jun 11 06:18:05 > 2012 > +++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Tue Jun 12 10:26:28 > 2012 > @@ -73,9 +73,6 @@ > Assembler::BlockConstPoolScope block_const_pool(masm_); > __ bind(&patch_site_); > __ cmp(reg, Operand(reg)); > - // Don't use b(al, ...) as that might emit the constant pool right > after the > - // branch. After patching when the branch is no longer unconditional > - // execution can continue into the constant pool. > __ b(eq, target); // Always taken before patched. > } > > @@ -90,6 +87,8 @@ > } > > void EmitPatchInfo() { > + // Block literal pool emission whilst recording patch site information. > + Assembler::BlockConstPoolScope block_const_pool(masm_); > if (patch_site_.is_bound()) { > int delta_to_patch_site = > masm_->InstructionsGeneratedSince(&patch_site_); > Register reg; > @@ -344,6 +343,8 @@ > void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, > Label* back_edge_target) { > Comment cmnt(masm_, "[ Stack check"); > + // Block literal pools whilst emitting stack check code. > + Assembler::BlockConstPoolScope block_const_pool(masm_); > Label ok; > > if (FLAG_count_based_interrupts) { > ======================================= > --- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Tue Jun 12 > 08:44:12 2012 > +++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Tue Jun 12 > 10:26:28 2012 > @@ -571,6 +571,9 @@ > LInstruction* instr, > SafepointMode safepoint_mode) { > ASSERT(instr != NULL); > + // Block literal pool emission to ensure nop indicating no inlined smi > code > + // is in the correct position. > + Assembler::BlockConstPoolScope block_const_pool(masm()); > LPointerMap* pointers = instr->pointer_map(); > RecordPosition(pointers->position()); > __ Call(code, mode); > @@ -1685,6 +1688,9 @@ > ASSERT(ToRegister(instr->result()).is(r0)); > > BinaryOpStub stub(instr->op(), NO_OVERWRITE); > + // Block literal pool emission to ensure nop indicating no inlined smi > code > + // is in the correct position. > + Assembler::BlockConstPoolScope block_const_pool(masm()); > CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); > __ nop(); // Signals no inlined code. > } > @@ -2316,20 +2322,25 @@ > Label cache_miss; > Register map = temp; > __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); > - __ bind(deferred->map_check()); // Label for calculating code patching. > - // We use Factory::the_hole_value() on purpose instead of loading from > the > - // root array to force relocation to be able to later patch with > - // the cached map. > - Handle<JSGlobalPropertyCell> cell = > - factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); > - __ mov(ip, Operand(Handle<Object>(cell))); > - __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); > - __ cmp(map, Operand(ip)); > - __ b(ne, &cache_miss); > - // We use Factory::the_hole_value() on purpose instead of loading from > the > - // root array to force relocation to be able to later patch > - // with true or false. > - __ mov(result, Operand(factory()->the_hole_value())); > + { > + // Block constant pool emission to ensure the positions of instructions > are > + // as expected by the patcher. See InstanceofStub::Generate(). > + Assembler::BlockConstPoolScope block_const_pool(masm()); > + __ bind(deferred->map_check()); // Label for calculating code > patching. > + // We use Factory::the_hole_value() on purpose instead of loading from > the > + // root array to force relocation to be able to later patch with > + // the cached map. > + Handle<JSGlobalPropertyCell> cell = > + factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); > + __ mov(ip, Operand(Handle<Object>(cell))); > + __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); > + __ cmp(map, Operand(ip)); > + __ b(ne, &cache_miss); > + // We use Factory::the_hole_value() on purpose instead of loading from > the > + // root array to force relocation to be able to later patch > + // with true or false. > + __ mov(result, Operand(factory()->the_hole_value())); > + } > __ b(&done); > > // The inlined call site cache did not match. Check null and string before > @@ -5178,6 +5189,8 @@ > int current_pc = masm()->pc_offset(); > int patch_size = Deoptimizer::patch_size(); > if (current_pc < last_lazy_deopt_pc_ + patch_size) { > + // Block literal pool emission for duration of padding. > + Assembler::BlockConstPoolScope block_const_pool(masm()); > int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; > ASSERT_EQ(0, padding_size % Assembler::kInstrSize); > while (padding_size > 0) { > ======================================= > --- /branches/bleeding_edge/test/cctest/test-disasm-arm.cc Fri Jan 13 > 05:09:52 2012 > +++ /branches/bleeding_edge/test/cctest/test-disasm-arm.cc Tue Jun 12 > 10:26:28 2012 > @@ -92,6 +92,10 @@ > if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; > \ > } > > +// Force emission of any pending literals into a pool. > +#define EMIT_PENDING_LITERALS() \ > + assm.CheckConstPool(true, false) > + > > // Verify that all invocations of the COMPARE macro passed successfully. > // Exit with a failure if at least one of the tests failed. > @@ -280,6 +284,10 @@ > // is pretty strange anyway. > COMPARE(mov(r5, Operand(0x01234), SetCC, ne), > "159fc000 ldrne ip, [pc, #+0]"); > + // Emit a literal pool now, otherwise this could be dumped later, in > the > + // middle of a different test. > + EMIT_PENDING_LITERALS(); > + > // We only disassemble one instruction so the eor instruction is not > here. > // The eor does the setcc so we get a movw here. > COMPARE(eor(r5, r4, Operand(0x1234), SetCC, ne), > > -- > v8-dev mailing list > v8-dev@googlegroups.com > http://groups.google.com/group/v8-dev -- Erik Corry, Software Engineer Google Denmark ApS - Frederiksborggade 20B, 1 sal, 1360 København K - Denmark - CVR nr. 28 86 69 84 -- v8-dev mailing list v8-dev@googlegroups.com http://groups.google.com/group/v8-dev