Modified: trunk/Source/_javascript_Core/ChangeLog (148898 => 148899)
--- trunk/Source/_javascript_Core/ChangeLog 2013-04-22 18:23:33 UTC (rev 148898)
+++ trunk/Source/_javascript_Core/ChangeLog 2013-04-22 18:40:58 UTC (rev 148899)
@@ -1,3 +1,24 @@
+2013-04-22 Mark Lam <mark....@apple.com>
+
+ Fix broken 32-bit build to green the bots.
+ https://bugs.webkit.org/show_bug.cgi?id=114968.
+
+ Unreviewed.
+
+ Basically, I moved a JIT::emit_op_loop_hint() and JIT::emitSlow_op_loop_hint()
+ into common code where they belong, instead of the 64-bit specific section.
+
+ Also fixed some SH4 assertions failures which were also caused by
+ https://bugs.webkit.org/show_bug.cgi?id=114963. Thanks to Julien Brianceau
+ for pointing this out.
+
+ * assembler/MacroAssemblerSH4.h:
+ (JSC::MacroAssemblerSH4::branchAdd32):
+ * jit/JITOpcodes.cpp:
+ (JSC):
+ (JSC::JIT::emit_op_loop_hint):
+ (JSC::JIT::emitSlow_op_loop_hint):
+
2013-04-22 Oliver Hunt <oli...@apple.com>
Perform null check before trying to use the result of readline()
Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h (148898 => 148899)
--- trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h 2013-04-22 18:23:33 UTC (rev 148898)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerSH4.h 2013-04-22 18:40:58 UTC (rev 148899)
@@ -1907,7 +1907,7 @@
Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
if (cond == Overflow) {
m_assembler.addvlRegReg(src, dest);
@@ -1937,7 +1937,7 @@
Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
move(imm, scratchReg3);
return branchAdd32(cond, scratchReg3, dest);
@@ -1945,7 +1945,7 @@
Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
{
- ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
+ ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
if (src != dest)
move(src, dest);
Modified: trunk/Source/_javascript_Core/jit/JITOpcodes.cpp (148898 => 148899)
--- trunk/Source/_javascript_Core/jit/JITOpcodes.cpp 2013-04-22 18:23:33 UTC (rev 148898)
+++ trunk/Source/_javascript_Core/jit/JITOpcodes.cpp 2013-04-22 18:40:58 UTC (rev 148899)
@@ -481,52 +481,6 @@
isZero.link(this);
}
-void JIT::emit_op_loop_hint(Instruction*)
-{
- // Emit the watchdog timer check:
- if (m_vm->watchdog.isEnabled())
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
-
- // Emit the JIT optimization check:
- if (canBeOptimized())
- addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
- AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
-}
-
-void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
-{
- // Emit the slow path of the watchdog timer check:
- if (m_vm->watchdog.isEnabled()) {
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_handle_watchdog_timer);
- stubCall.call();
-
-#if ENABLE(DFG_JIT)
- if (canBeOptimized()) {
- Jump doOptimize = branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
- AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
- doOptimize.link(this);
- } else
-#endif
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
- }
-
-#if ENABLE(DFG_JIT)
- // Emit the slow path for the JIT optimization check:
- if (canBeOptimized()) {
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_optimize);
- stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
- stubCall.call();
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
- }
-#endif
-}
-
void JIT::emit_op_neq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
@@ -1323,6 +1277,52 @@
#endif // USE(JSVALUE64)
+void JIT::emit_op_loop_hint(Instruction*)
+{
+ // Emit the watchdog timer check:
+ if (m_vm->watchdog.isEnabled())
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
+
+ // Emit the JIT optimization check:
+ if (canBeOptimized())
+ addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
+ AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
+}
+
+void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
+{
+ // Emit the slow path of the watchdog timer check:
+ if (m_vm->watchdog.isEnabled()) {
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_handle_watchdog_timer);
+ stubCall.call();
+
+#if ENABLE(DFG_JIT)
+ if (canBeOptimized()) {
+ Jump doOptimize = branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
+ AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ doOptimize.link(this);
+ } else
+#endif
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ }
+
+#if ENABLE(DFG_JIT)
+ // Emit the slow path for the JIT optimization check:
+ if (canBeOptimized()) {
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_optimize);
+ stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
+ stubCall.call();
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ }
+#endif
+}
+
void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR)
{