Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (192949 => 192950)
--- trunk/Source/_javascript_Core/ChangeLog 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ChangeLog 2015-12-02 19:20:12 UTC (rev 192950)
@@ -1,5 +1,42 @@
2015-12-02 Mark Lam <mark....@apple.com>
+ Use the JITAddGenerator snippet in the FTL.
+ https://bugs.webkit.org/show_bug.cgi?id=151519
+
+ Reviewed by Geoffrey Garen.
+
+ One detail about how we choosing to handle operands to the binary snippets that
+ may be constant: the slow path call to a C++ function still needs the constant
+ operand loaded in a register. To simplify things, we're choosing to always tell
+ LLVM to load the operands into registers even if they may be constant. However,
+ even though a constant operand is preloaded in a register, the snippet generator
+ will not be made aware of it. It will continue to load the constant as an
+ immediate.
+
+ * ftl/FTLCompile.cpp:
+ * ftl/FTLCompileBinaryOp.cpp:
+ (JSC::FTL::generateArithSubFastPath):
+ (JSC::FTL::generateValueAddFastPath):
+ - generateValueAddFastPath() currently is an exact copy of generateArithSubFastPath()
+ except that it uses JITAddGenerator instead of JITSubGenerator. When we add
+ support for JITMulGenerator later, the code will start to vary. We'll refactor
+ these functions then when we have more insight into what needs to vary between
+ the implementations.
+
+ * ftl/FTLCompileBinaryOp.h:
+ * ftl/FTLInlineCacheDescriptor.h:
+ * ftl/FTLInlineCacheDescriptorInlines.h:
+ (JSC::FTL::ValueAddDescriptor::ValueAddDescriptor):
+ (JSC::FTL::ValueAddDescriptor::icSize):
+ * ftl/FTLInlineCacheSize.cpp:
+ (JSC::FTL::sizeOfValueAdd):
+ * ftl/FTLInlineCacheSize.h:
+ * ftl/FTLLowerDFGToLLVM.cpp:
+ (JSC::FTL::DFG::LowerDFGToLLVM::lower):
+ (JSC::FTL::DFG::LowerDFGToLLVM::compileValueAdd):
+
+2015-12-02 Mark Lam <mark....@apple.com>
+
Teach DFG that ArithSub can now clobber the heap (and other things).
https://bugs.webkit.org/show_bug.cgi?id=151733
Modified: trunk/Source/_javascript_Core/ftl/FTLCompile.cpp (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLCompile.cpp 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLCompile.cpp 2015-12-02 19:20:12 UTC (rev 192950)
@@ -340,6 +340,9 @@
case ArithSub:
generateArithSubFastPath(ic, fastPathJIT, result, left, right, usedRegisters, done, slowPathStart);
break;
+ case ValueAdd:
+ generateValueAddFastPath(ic, fastPathJIT, result, left, right, usedRegisters, done, slowPathStart);
+ break;
default:
RELEASE_ASSERT_NOT_REACHED();
}
Modified: trunk/Source/_javascript_Core/ftl/FTLCompileBinaryOp.cpp (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLCompileBinaryOp.cpp 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLCompileBinaryOp.cpp 2015-12-02 19:20:12 UTC (rev 192950)
@@ -31,6 +31,7 @@
#include "DFGNodeType.h"
#include "FTLInlineCacheDescriptor.h"
#include "GPRInfo.h"
+#include "JITAddGenerator.h"
#include "JITSubGenerator.h"
#include "ScratchRegisterAllocator.h"
@@ -188,6 +189,43 @@
slowPathStart = jit.jump();
}
+void generateValueAddFastPath(BinaryOpDescriptor& ic, CCallHelpers& jit,
+ GPRReg result, GPRReg left, GPRReg right, RegisterSet usedRegisters,
+ CCallHelpers::Jump& done, CCallHelpers::Jump& slowPathStart)
+{
+ ASSERT(ic.nodeType() == ValueAdd);
+ ScratchRegisterAllocator allocator(usedRegisters);
+
+ BinarySnippetRegisterContext context(allocator, result, left, right);
+
+ GPRReg scratchGPR = allocator.allocateScratchGPR();
+ FPRReg leftFPR = allocator.allocateScratchFPR();
+ FPRReg rightFPR = allocator.allocateScratchFPR();
+ FPRReg scratchFPR = InvalidFPRReg;
+
+ JITAddGenerator gen(ic.leftOperand(), ic.rightOperand(), JSValueRegs(result),
+ JSValueRegs(left), JSValueRegs(right), leftFPR, rightFPR, scratchGPR, scratchFPR);
+
+ auto numberOfBytesUsedToPreserveReusedRegisters =
+ allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace);
+
+ context.initializeRegisters(jit);
+ gen.generateFastPath(jit);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(&jit);
+ context.restoreRegisters(jit);
+ allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters,
+ ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+ done = jit.jump();
+
+ gen.slowPathJumpList().link(&jit);
+ context.restoreRegisters(jit);
+ allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters,
+ ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall);
+ slowPathStart = jit.jump();
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
Modified: trunk/Source/_javascript_Core/ftl/FTLCompileBinaryOp.h (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLCompileBinaryOp.h 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLCompileBinaryOp.h 2015-12-02 19:20:12 UTC (rev 192950)
@@ -39,6 +39,10 @@
GPRReg result, GPRReg left, GPRReg right, RegisterSet usedRegisters,
CCallHelpers::Jump& done, CCallHelpers::Jump& slowPathStart);
+void generateValueAddFastPath(BinaryOpDescriptor&, CCallHelpers&,
+ GPRReg result, GPRReg left, GPRReg right, RegisterSet usedRegisters,
+ CCallHelpers::Jump& done, CCallHelpers::Jump& slowPathStart);
+
} // namespace FTL
} // namespace JSC
Modified: trunk/Source/_javascript_Core/ftl/FTLInlineCacheDescriptor.h (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLInlineCacheDescriptor.h 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLInlineCacheDescriptor.h 2015-12-02 19:20:12 UTC (rev 192950)
@@ -171,6 +171,12 @@
static size_t icSize();
};
+class ValueAddDescriptor : public BinaryOpDescriptor {
+public:
+ ValueAddDescriptor(unsigned stackmapID, CodeOrigin, const SnippetOperand& leftOperand, const SnippetOperand& rightOperand);
+ static size_t icSize();
+};
+
// You can create a lazy slow path call in lowerDFGToLLVM by doing:
// m_ftlState.lazySlowPaths.append(
// LazySlowPathDescriptor(
Modified: trunk/Source/_javascript_Core/ftl/FTLInlineCacheDescriptorInlines.h (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLInlineCacheDescriptorInlines.h 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLInlineCacheDescriptorInlines.h 2015-12-02 19:20:12 UTC (rev 192950)
@@ -47,6 +47,18 @@
return sizeOfArithSub();
}
+ValueAddDescriptor::ValueAddDescriptor(unsigned stackmapID, CodeOrigin codeOrigin,
+ const SnippetOperand& leftOperand, const SnippetOperand& rightOperand)
+ : BinaryOpDescriptor(DFG::ValueAdd, stackmapID, codeOrigin, icSize(),
+ "ValueAdd", "ValueAdd IC fast path", DFG::operationValueAdd, leftOperand, rightOperand)
+{
+}
+
+size_t ValueAddDescriptor::icSize()
+{
+ return sizeOfValueAdd();
+}
+
} } // namespace JSC::FTL
#endif // ENABLE(FTL_JIT)
Modified: trunk/Source/_javascript_Core/ftl/FTLInlineCacheSize.cpp (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLInlineCacheSize.cpp 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLInlineCacheSize.cpp 2015-12-02 19:20:12 UTC (rev 192950)
@@ -145,6 +145,23 @@
#endif
}
+size_t sizeOfValueAdd()
+{
+#if CPU(ARM64)
+#ifdef NDEBUG
+ return 180; // ARM64 release.
+#else
+ return 276; // ARM64 debug.
+#endif
+#else // CPU(X86_64)
+#ifdef NDEBUG
+ return 199; // X86_64 release.
+#else
+ return 286; // X86_64 debug.
+#endif
+#endif
+}
+
#if ENABLE(MASM_PROBE)
size_t sizeOfProbe()
{
Modified: trunk/Source/_javascript_Core/ftl/FTLInlineCacheSize.h (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLInlineCacheSize.h 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLInlineCacheSize.h 2015-12-02 19:20:12 UTC (rev 192950)
@@ -47,6 +47,7 @@
size_t sizeOfConstructForwardVarargs();
size_t sizeOfIn();
size_t sizeOfArithSub();
+size_t sizeOfValueAdd();
#if ENABLE(MASM_PROBE)
size_t sizeOfProbe();
#endif
Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp (192949 => 192950)
--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp 2015-12-02 19:15:33 UTC (rev 192949)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToLLVM.cpp 2015-12-02 19:20:12 UTC (rev 192950)
@@ -234,7 +234,8 @@
}
case ArithSub:
case GetById:
- case GetByIdFlush: {
+ case GetByIdFlush:
+ case ValueAdd: {
// We may have to flush one thing for GetByIds/ArithSubs when the base and result or the left/right and the result
// are assigned the same register. For a more comprehensive overview, look at the comment in FTLCompile.cpp
if (node->op() == ArithSub && node->binaryUseKind() != UntypedUse)
@@ -243,8 +244,8 @@
HandlerInfo* exceptionHandler;
bool willCatchException = m_graph.willCatchExceptionInMachineFrame(node->origin.forExit, opCatchOrigin, exceptionHandler);
if (willCatchException) {
- static const size_t numberOfGetByIdOrSubSpills = 1;
- maxNumberOfCatchSpills = std::max(maxNumberOfCatchSpills, numberOfGetByIdOrSubSpills);
+ static const size_t numberOfGetByIdOrBinaryOpSpills = 1;
+ maxNumberOfCatchSpills = std::max(maxNumberOfCatchSpills, numberOfGetByIdOrBinaryOpSpills);
}
break;
}
@@ -1484,15 +1485,56 @@
void compileValueAdd()
{
- J_JITOperation_EJJ operation;
- if (!(provenType(m_node->child1()) & SpecFullNumber)
- && !(provenType(m_node->child2()) & SpecFullNumber))
- operation = operationValueAddNotNumber;
- else
- operation = operationValueAdd;
- setJSValue(vmCall(
- m_out.int64, m_out.operation(operation), m_callFrame,
- lowJSValue(m_node->child1()), lowJSValue(m_node->child2())));
+ auto leftChild = m_node->child1();
+ auto rightChild = m_node->child2();
+
+ if (!(provenType(leftChild) & SpecFullNumber) || !(provenType(rightChild) & SpecFullNumber)) {
+ setJSValue(vmCall(m_out.int64, m_out.operation(operationValueAddNotNumber), m_callFrame,
+ lowJSValue(leftChild), lowJSValue(rightChild)));
+ return;
+ }
+
+ unsigned stackmapID = m_stackmapIDs++;
+
+ if (Options::verboseCompilation())
+ dataLog(" Emitting ValueAdd patchpoint with stackmap #", stackmapID, "\n");
+
+#if FTL_USES_B3
+ CRASH();
+#else
+ LValue left = lowJSValue(leftChild);
+ LValue right = lowJSValue(rightChild);
+
+ SnippetOperand leftOperand(abstractValue(leftChild).resultType());
+ SnippetOperand rightOperand(abstractValue(rightChild).resultType());
+
+ // The DFG does not always fold the sum of 2 constant int operands together.
+ // Because the snippet does not support both operands being constant, if the left
+ // operand is already a constant, we'll just pretend the right operand is not.
+ if (leftChild->isInt32Constant())
+ leftOperand.setConstInt32(leftChild->asInt32());
+ if (!leftOperand.isConst() && rightChild->isInt32Constant())
+ rightOperand.setConstInt32(rightChild->asInt32());
+
+ // Arguments: id, bytes, target, numArgs, args...
+ StackmapArgumentList arguments;
+ arguments.append(m_out.constInt64(stackmapID));
+ arguments.append(m_out.constInt32(ValueAddDescriptor::icSize()));
+ arguments.append(constNull(m_out.ref8));
+ arguments.append(m_out.constInt32(2));
+ arguments.append(left);
+ arguments.append(right);
+
+ appendOSRExitArgumentsForPatchpointIfWillCatchException(arguments,
+ ExceptionType::BinaryOpGenerator, 3); // left, right, and result show up in the stackmap locations.
+
+ LValue call = m_out.call(m_out.int64, m_out.patchpointInt64Intrinsic(), arguments);
+ setInstructionCallingConvention(call, LLVMAnyRegCallConv);
+
+ m_ftlState.binaryOps.append(ValueAddDescriptor(stackmapID, m_node->origin.semantic, leftOperand, rightOperand));
+
+ setJSValue(call);
+#endif
}
void compileStrCat()