Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (240892 => 240893)
--- trunk/Source/_javascript_Core/ChangeLog 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/ChangeLog 2019-02-02 07:15:47 UTC (rev 240893)
@@ -1,3 +1,91 @@
+2019-02-01 Yusuke Suzuki <ysuz...@apple.com>
+
+ [JSC] Decouple JIT related data from CodeBlock
+ https://bugs.webkit.org/show_bug.cgi?id=194187
+
+ Reviewed by Saam Barati.
+
+ CodeBlock holds bunch of data which is only used after JIT starts compiling it.
+ We have three types of data in CodeBlock.
+
+ 1. The data which is always used. CodeBlock needs to hold it.
+ 2. The data which is touched even in LLInt, but it is only meaningful in JIT tiers. The example is profiling.
+ 3. The data which is used after the JIT compiler starts running for the given CodeBlock.
+
+ This patch decouples (3) from CodeBlock as CodeBlock::JITData. Even if we have bunch of CodeBlocks, only small
+ number of them gets JIT compilation. Always allocating (3) data enlarges the size of CodeBlock, leading to the
+ memory waste. Potentially we can decouple (2) in another data structure, but we first do (3) since (3) is beneficial
+ in both non-JIT and *JIT* modes.
+
+ JITData is created only when JIT compiler wants to use it. So it can be concurrently created and used, so it is guarded
+ by the lock of CodeBlock.
+
+ The size of CodeBlock is reduced from 512 to 352.
+
+ This patch improves memory footprint and gets 1.1% improvement in RAMification.
+
+ Footprint geomean: 36696503 (34.997 MB)
+ Peak Footprint geomean: 38595988 (36.808 MB)
+ Score: 37634263 (35.891 MB)
+
+ Footprint geomean: 37172768 (35.451 MB)
+ Peak Footprint geomean: 38978288 (37.173 MB)
+ Score: 38064824 (36.301 MB)
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::~CodeBlock):
+ (JSC::CodeBlock::propagateTransitions):
+ (JSC::CodeBlock::ensureJITDataSlow):
+ (JSC::CodeBlock::finalizeBaselineJITInlineCaches):
+ (JSC::CodeBlock::getICStatusMap):
+ (JSC::CodeBlock::addStubInfo):
+ (JSC::CodeBlock::addJITAddIC):
+ (JSC::CodeBlock::addJITMulIC):
+ (JSC::CodeBlock::addJITSubIC):
+ (JSC::CodeBlock::addJITNegIC):
+ (JSC::CodeBlock::findStubInfo):
+ (JSC::CodeBlock::addByValInfo):
+ (JSC::CodeBlock::addCallLinkInfo):
+ (JSC::CodeBlock::getCallLinkInfoForBytecodeIndex):
+ (JSC::CodeBlock::addRareCaseProfile):
+ (JSC::CodeBlock::rareCaseProfileForBytecodeOffset):
+ (JSC::CodeBlock::rareCaseProfileCountForBytecodeOffset):
+ (JSC::CodeBlock::resetJITData):
+ (JSC::CodeBlock::stronglyVisitStrongReferences):
+ (JSC::CodeBlock::shrinkToFit):
+ (JSC::CodeBlock::linkIncomingCall):
+ (JSC::CodeBlock::linkIncomingPolymorphicCall):
+ (JSC::CodeBlock::unlinkIncomingCalls):
+ (JSC::CodeBlock::updateAllPredictionsAndCountLiveness):
+ (JSC::CodeBlock::dumpValueProfiles):
+ (JSC::CodeBlock::setPCToCodeOriginMap):
+ (JSC::CodeBlock::findPC):
+ (JSC::CodeBlock::dumpMathICStats):
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::ensureJITData):
+ (JSC::CodeBlock::setJITCodeMap):
+ (JSC::CodeBlock::jitCodeMap):
+ (JSC::CodeBlock::likelyToTakeSlowCase):
+ (JSC::CodeBlock::couldTakeSlowCase):
+ (JSC::CodeBlock::lazyOperandValueProfiles):
+ (JSC::CodeBlock::stubInfoBegin): Deleted.
+ (JSC::CodeBlock::stubInfoEnd): Deleted.
+ (JSC::CodeBlock::callLinkInfosBegin): Deleted.
+ (JSC::CodeBlock::callLinkInfosEnd): Deleted.
+ (JSC::CodeBlock::jitCodeMap const): Deleted.
+ (JSC::CodeBlock::numberOfRareCaseProfiles): Deleted.
+ * bytecode/MethodOfGettingAValueProfile.cpp:
+ (JSC::MethodOfGettingAValueProfile::emitReportValue const):
+ (JSC::MethodOfGettingAValueProfile::reportValue):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
+ * jit/JIT.h:
+ * jit/JITOperations.cpp:
+ (JSC::tryGetByValOptimize):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::privateCompileGetByVal):
+ (JSC::JIT::privateCompilePutByVal):
+
2018-12-16 Darin Adler <da...@apple.com>
Convert additional String::format clients to alternative approaches
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (240892 => 240893)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2019-02-02 07:15:47 UTC (rev 240893)
@@ -861,10 +861,11 @@
// destructors.
#if ENABLE(JIT)
- for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo* stub = *iter;
- stub->aboutToDie();
- stub->deref();
+ if (auto* jitData = m_jitData.get()) {
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+ stubInfo->aboutToDie();
+ stubInfo->deref();
+ }
}
#endif // ENABLE(JIT)
}
@@ -1112,8 +1113,10 @@
#if ENABLE(JIT)
if (JITCode::isJIT(jitType())) {
- for (auto iter = m_stubInfos.begin(); !!iter; ++iter)
- (*iter)->propagateTransitions(visitor);
+ if (auto* jitData = m_jitData.get()) {
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
+ stubInfo->propagateTransitions(visitor);
+ }
}
#endif // ENABLE(JIT)
@@ -1350,18 +1353,25 @@
});
}
+#if ENABLE(JIT)
+CodeBlock::JITData& CodeBlock::ensureJITDataSlow(const ConcurrentJSLocker&)
+{
+ ASSERT(!m_jitData);
+ m_jitData = std::make_unique<JITData>();
+ return *m_jitData;
+}
+
void CodeBlock::finalizeBaselineJITInlineCaches()
{
-#if ENABLE(JIT)
- for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
- (*iter)->visitWeak(*vm());
+ if (auto* jitData = m_jitData.get()) {
+ for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
+ callLinkInfo->visitWeak(*vm());
- for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo& stubInfo = **iter;
- stubInfo.visitWeakReferences(this);
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
+ stubInfo->visitWeakReferences(this);
}
+}
#endif
-}
void CodeBlock::finalizeUnconditionally(VM&)
{
@@ -1394,12 +1404,14 @@
{
#if ENABLE(JIT)
if (JITCode::isJIT(jitType())) {
- for (StructureStubInfo* stubInfo : m_stubInfos)
- result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
- for (CallLinkInfo* callLinkInfo : m_callLinkInfos)
- result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
- for (ByValInfo* byValInfo : m_byValInfos)
- result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
+ if (auto* jitData = m_jitData.get()) {
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos)
+ result.add(stubInfo->codeOrigin, ICStatus()).iterator->value.stubInfo = stubInfo;
+ for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos)
+ result.add(callLinkInfo->codeOrigin(), ICStatus()).iterator->value.callLinkInfo = callLinkInfo;
+ for (ByValInfo* byValInfo : jitData->m_byValInfos)
+ result.add(CodeOrigin(byValInfo->bytecodeIndex), ICStatus()).iterator->value.byValInfo = byValInfo;
+ }
#if ENABLE(DFG_JIT)
if (JITCode::isOptimizingJIT(jitType())) {
DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
@@ -1429,34 +1441,41 @@
StructureStubInfo* CodeBlock::addStubInfo(AccessType accessType)
{
ConcurrentJSLocker locker(m_lock);
- return m_stubInfos.add(accessType);
+ return ensureJITData(locker).m_stubInfos.add(accessType);
}
JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction)
{
- return m_addICs.add(arithProfile, instruction);
+ ConcurrentJSLocker locker(m_lock);
+ return ensureJITData(locker).m_addICs.add(arithProfile, instruction);
}
JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction)
{
- return m_mulICs.add(arithProfile, instruction);
+ ConcurrentJSLocker locker(m_lock);
+ return ensureJITData(locker).m_mulICs.add(arithProfile, instruction);
}
JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction)
{
- return m_subICs.add(arithProfile, instruction);
+ ConcurrentJSLocker locker(m_lock);
+ return ensureJITData(locker).m_subICs.add(arithProfile, instruction);
}
JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction)
{
- return m_negICs.add(arithProfile, instruction);
+ ConcurrentJSLocker locker(m_lock);
+ return ensureJITData(locker).m_negICs.add(arithProfile, instruction);
}
StructureStubInfo* CodeBlock::findStubInfo(CodeOrigin codeOrigin)
{
- for (StructureStubInfo* stubInfo : m_stubInfos) {
- if (stubInfo->codeOrigin == codeOrigin)
- return stubInfo;
+ ConcurrentJSLocker locker(m_lock);
+ if (auto* jitData = m_jitData.get()) {
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+ if (stubInfo->codeOrigin == codeOrigin)
+ return stubInfo;
+ }
}
return nullptr;
}
@@ -1464,40 +1483,70 @@
ByValInfo* CodeBlock::addByValInfo()
{
ConcurrentJSLocker locker(m_lock);
- return m_byValInfos.add();
+ return ensureJITData(locker).m_byValInfos.add();
}
CallLinkInfo* CodeBlock::addCallLinkInfo()
{
ConcurrentJSLocker locker(m_lock);
- return m_callLinkInfos.add();
+ return ensureJITData(locker).m_callLinkInfos.add();
}
CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
{
- for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
- if ((*iter)->codeOrigin() == CodeOrigin(index))
- return *iter;
+ ConcurrentJSLocker locker(m_lock);
+ if (auto* jitData = m_jitData.get()) {
+ for (CallLinkInfo* callLinkInfo : jitData->m_callLinkInfos) {
+ if (callLinkInfo->codeOrigin() == CodeOrigin(index))
+ return callLinkInfo;
+ }
}
return nullptr;
}
+RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
+{
+ ConcurrentJSLocker locker(m_lock);
+ auto& jitData = ensureJITData(locker);
+ jitData.m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
+ return &jitData.m_rareCaseProfiles.last();
+}
+
+RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset)
+{
+ if (auto* jitData = m_jitData.get()) {
+ return tryBinarySearch<RareCaseProfile, int>(
+ jitData->m_rareCaseProfiles, jitData->m_rareCaseProfiles.size(), bytecodeOffset,
+ getRareCaseProfileBytecodeOffset);
+ }
+ return nullptr;
+}
+
+unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset)
+{
+ RareCaseProfile* profile = "" bytecodeOffset);
+ if (profile)
+ return profile->m_counter;
+ return 0;
+}
+
void CodeBlock::resetJITData()
{
RELEASE_ASSERT(!JITCode::isJIT(jitType()));
ConcurrentJSLocker locker(m_lock);
- // We can clear these because no other thread will have references to any stub infos, call
- // link infos, or by val infos if we don't have JIT code. Attempts to query these data
- // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
- // don't have JIT code.
- m_stubInfos.clear();
- m_callLinkInfos.clear();
- m_byValInfos.clear();
-
- // We can clear this because the DFG's queries to these data structures are guarded by whether
- // there is JIT code.
- m_rareCaseProfiles.clear();
+ if (auto* jitData = m_jitData.get()) {
+ // We can clear these because no other thread will have references to any stub infos, call
+ // link infos, or by val infos if we don't have JIT code. Attempts to query these data
+ // structures using the concurrent API (getICStatusMap and friends) will return nothing if we
+ // don't have JIT code.
+ jitData->m_stubInfos.clear();
+ jitData->m_callLinkInfos.clear();
+ jitData->m_byValInfos.clear();
+ // We can clear this because the DFG's queries to these data structures are guarded by whether
+ // there is JIT code.
+ jitData->m_rareCaseProfiles.clear();
+ }
}
#endif
@@ -1540,8 +1589,10 @@
});
#if ENABLE(JIT)
- for (ByValInfo* byValInfo : m_byValInfos)
- visitor.append(byValInfo->cachedSymbol);
+ if (auto* jitData = m_jitData.get()) {
+ for (ByValInfo* byValInfo : jitData->m_byValInfos)
+ visitor.append(byValInfo->cachedSymbol);
+ }
#endif
#if ENABLE(DFG_JIT)
@@ -1786,7 +1837,10 @@
{
ConcurrentJSLocker locker(m_lock);
- m_rareCaseProfiles.shrinkToFit();
+#if ENABLE(JIT)
+ if (auto* jitData = m_jitData.get())
+ jitData->m_rareCaseProfiles.shrinkToFit();
+#endif
if (shrinkMode == EarlyShrink) {
m_constantRegisters.shrinkToFit();
@@ -1803,13 +1857,17 @@
void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
{
noticeIncomingCall(callerFrame);
- m_incomingCalls.push(incoming);
+ ConcurrentJSLocker locker(m_lock);
+ ensureJITData(locker).m_incomingCalls.push(incoming);
}
void CodeBlock::linkIncomingPolymorphicCall(ExecState* callerFrame, PolymorphicCallNode* incoming)
{
noticeIncomingCall(callerFrame);
- m_incomingPolymorphicCalls.push(incoming);
+ {
+ ConcurrentJSLocker locker(m_lock);
+ ensureJITData(locker).m_incomingPolymorphicCalls.push(incoming);
+ }
}
#endif // ENABLE(JIT)
@@ -1818,10 +1876,17 @@
while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
m_incomingLLIntCalls.begin()->unlink();
#if ENABLE(JIT)
- while (m_incomingCalls.begin() != m_incomingCalls.end())
- m_incomingCalls.begin()->unlink(*vm());
- while (m_incomingPolymorphicCalls.begin() != m_incomingPolymorphicCalls.end())
- m_incomingPolymorphicCalls.begin()->unlink(*vm());
+ JITData* jitData = nullptr;
+ {
+ ConcurrentJSLocker locker(m_lock);
+ jitData = m_jitData.get();
+ }
+ if (jitData) {
+ while (jitData->m_incomingCalls.begin() != jitData->m_incomingCalls.end())
+ jitData->m_incomingCalls.begin()->unlink(*vm());
+ while (jitData->m_incomingPolymorphicCalls.begin() != jitData->m_incomingPolymorphicCalls.end())
+ jitData->m_incomingPolymorphicCalls.begin()->unlink(*vm());
+ }
#endif // ENABLE(JIT)
}
@@ -2573,7 +2638,7 @@
}
#if ENABLE(DFG_JIT)
- m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
+ lazyOperandValueProfiles(locker).computeUpdatedPredictions(locker);
#endif
}
@@ -2730,9 +2795,9 @@
dataLogF("\n");
});
dataLog("RareCaseProfile for ", *this, ":\n");
- for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
- RareCaseProfile* profile = ""
- dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ if (auto* jitData = m_jitData.get()) {
+ for (RareCaseProfile* profile : jitData->m_rareCaseProfiles)
+ dataLogF(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
}
}
#endif // ENABLE(VERBOSE_VALUE_PROFILE)
@@ -2939,27 +3004,6 @@
return m_instructions->at(offset + target).ptr();
}
-RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset)
-{
- m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
- return &m_rareCaseProfiles.last();
-}
-
-RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
-{
- return tryBinarySearch<RareCaseProfile, int>(
- m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
- getRareCaseProfileBytecodeOffset);
-}
-
-unsigned CodeBlock::rareCaseProfileCountForBytecodeOffset(int bytecodeOffset)
-{
- RareCaseProfile* profile = ""
- if (profile)
- return profile->m_counter;
- return 0;
-}
-
ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset)
{
return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr());
@@ -3080,20 +3124,25 @@
#if ENABLE(JIT)
void CodeBlock::setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&& map)
{
- m_pcToCodeOriginMap = WTFMove(map);
+ ConcurrentJSLocker locker(m_lock);
+ ensureJITData(locker).m_pcToCodeOriginMap = WTFMove(map);
}
Optional<CodeOrigin> CodeBlock::findPC(void* pc)
{
- if (m_pcToCodeOriginMap) {
- if (Optional<CodeOrigin> codeOrigin = m_pcToCodeOriginMap->findPC(pc))
- return codeOrigin;
- }
+ {
+ ConcurrentJSLocker locker(m_lock);
+ if (auto* jitData = m_jitData.get()) {
+ if (jitData->m_pcToCodeOriginMap) {
+ if (Optional<CodeOrigin> codeOrigin = jitData->m_pcToCodeOriginMap->findPC(pc))
+ return codeOrigin;
+ }
- for (auto iter = m_stubInfos.begin(); !!iter; ++iter) {
- StructureStubInfo* stub = *iter;
- if (stub->containsPC(pc))
- return Optional<CodeOrigin>(stub->codeOrigin);
+ for (StructureStubInfo* stubInfo : jitData->m_stubInfos) {
+ if (stubInfo->containsPC(pc))
+ return Optional<CodeOrigin>(stubInfo->codeOrigin);
+ }
+ }
}
if (Optional<CodeOrigin> codeOrigin = m_jitCode->findPC(this, pc))
@@ -3195,24 +3244,26 @@
double totalSubSize = 0.0;
auto countICs = [&] (CodeBlock* codeBlock) {
- for (JITAddIC* addIC : codeBlock->m_addICs) {
- numAdds++;
- totalAddSize += addIC->codeSize();
- }
+ if (auto* jitData = codeBlock->m_jitData.get()) {
+ for (JITAddIC* addIC : jitData->m_addICs) {
+ numAdds++;
+ totalAddSize += addIC->codeSize();
+ }
- for (JITMulIC* mulIC : codeBlock->m_mulICs) {
- numMuls++;
- totalMulSize += mulIC->codeSize();
- }
+ for (JITMulIC* mulIC : jitData->m_mulICs) {
+ numMuls++;
+ totalMulSize += mulIC->codeSize();
+ }
- for (JITNegIC* negIC : codeBlock->m_negICs) {
- numNegs++;
- totalNegSize += negIC->codeSize();
- }
+ for (JITNegIC* negIC : jitData->m_negICs) {
+ numNegs++;
+ totalNegSize += negIC->codeSize();
+ }
- for (JITSubIC* subIC : codeBlock->m_subICs) {
- numSubs++;
- totalSubSize += subIC->codeSize();
+ for (JITSubIC* subIC : jitData->m_subICs) {
+ numSubs++;
+ totalSubSize += subIC->codeSize();
+ }
}
};
heap()->forEachCodeBlock(countICs);
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.h (240892 => 240893)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2019-02-02 07:15:47 UTC (rev 240893)
@@ -249,6 +249,31 @@
void getICStatusMap(ICStatusMap& result);
#if ENABLE(JIT)
+ struct JITData {
+ WTF_MAKE_STRUCT_FAST_ALLOCATED;
+
+ Bag<StructureStubInfo> m_stubInfos;
+ Bag<JITAddIC> m_addICs;
+ Bag<JITMulIC> m_mulICs;
+ Bag<JITNegIC> m_negICs;
+ Bag<JITSubIC> m_subICs;
+ Bag<ByValInfo> m_byValInfos;
+ Bag<CallLinkInfo> m_callLinkInfos;
+ SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
+ SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
+ SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
+ std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
+ JITCodeMap m_jitCodeMap;
+ };
+
+ JITData& ensureJITData(const ConcurrentJSLocker& locker)
+ {
+ if (LIKELY(m_jitData))
+ return *m_jitData;
+ return ensureJITDataSlow(locker);
+ }
+ JITData& ensureJITDataSlow(const ConcurrentJSLocker&);
+
JITAddIC* addJITAddIC(ArithProfile*, const Instruction*);
JITMulIC* addJITMulIC(ArithProfile*, const Instruction*);
JITNegIC* addJITNegIC(ArithProfile*, const Instruction*);
@@ -267,8 +292,6 @@
JITSubIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITSubIC(profile, instruction); }
StructureStubInfo* addStubInfo(AccessType);
- auto stubInfoBegin() { return m_stubInfos.begin(); }
- auto stubInfoEnd() { return m_stubInfos.end(); }
// O(n) operation. Use getStubInfoMap() unless you really only intend to get one
// stub info.
@@ -277,8 +300,6 @@
ByValInfo* addByValInfo();
CallLinkInfo* addCallLinkInfo();
- auto callLinkInfosBegin() { return m_callLinkInfos.begin(); }
- auto callLinkInfosEnd() { return m_callLinkInfos.end(); }
// This is a slow function call used primarily for compiling OSR exits in the case
// that there had been inlining. Chances are if you want to use this, you're really
@@ -285,6 +306,42 @@
// looking for a CallLinkInfoMap to amortize the cost of calling this.
CallLinkInfo* getCallLinkInfoForBytecodeIndex(unsigned bytecodeIndex);
+ void setJITCodeMap(JITCodeMap&& jitCodeMap)
+ {
+ ConcurrentJSLocker locker(m_lock);
+ ensureJITData(locker).m_jitCodeMap = WTFMove(jitCodeMap);
+ }
+ const JITCodeMap& jitCodeMap()
+ {
+ ConcurrentJSLocker locker(m_lock);
+ return ensureJITData(locker).m_jitCodeMap;
+ }
+
+ void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
+ Optional<CodeOrigin> findPC(void* pc);
+
+ RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
+ RareCaseProfile* rareCaseProfileForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
+ unsigned rareCaseProfileCountForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset);
+
+ bool likelyToTakeSlowCase(int bytecodeOffset)
+ {
+ if (!hasBaselineJITProfiling())
+ return false;
+ ConcurrentJSLocker locker(m_lock);
+ unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
+ return value >= Options::likelyToTakeSlowCaseMinimumCount();
+ }
+
+ bool couldTakeSlowCase(int bytecodeOffset)
+ {
+ if (!hasBaselineJITProfiling())
+ return false;
+ ConcurrentJSLocker locker(m_lock);
+ unsigned value = rareCaseProfileCountForBytecodeOffset(locker, bytecodeOffset);
+ return value >= Options::couldTakeSlowCaseMinimumCount();
+ }
+
// We call this when we want to reattempt compiling something with the baseline JIT. Ideally
// the baseline JIT would not add data to CodeBlock, but instead it would put its data into
// a newly created JITCode, which could be thrown away if we bail on JIT compilation. Then we
@@ -302,17 +359,6 @@
void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
-#if ENABLE(JIT)
- void setJITCodeMap(JITCodeMap&& jitCodeMap)
- {
- m_jitCodeMap = WTFMove(jitCodeMap);
- }
- const JITCodeMap& jitCodeMap() const
- {
- return m_jitCodeMap;
- }
-#endif
-
const Instruction* outOfLineJumpTarget(const Instruction* pc);
int outOfLineJumpOffset(const Instruction* pc);
int outOfLineJumpOffset(const InstructionStream::Ref& instruction)
@@ -442,27 +488,6 @@
template<typename Functor> void forEachObjectAllocationProfile(const Functor&);
template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&);
- RareCaseProfile* addRareCaseProfile(int bytecodeOffset);
- unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
- RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset);
- unsigned rareCaseProfileCountForBytecodeOffset(int bytecodeOffset);
-
- bool likelyToTakeSlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
- return value >= Options::likelyToTakeSlowCaseMinimumCount();
- }
-
- bool couldTakeSlowCase(int bytecodeOffset)
- {
- if (!hasBaselineJITProfiling())
- return false;
- unsigned value = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);
- return value >= Options::couldTakeSlowCaseMinimumCount();
- }
-
ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
ArithProfile* arithProfileForPC(const Instruction*);
@@ -499,7 +524,7 @@
return codeOrigins()[index.bits()];
}
- CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
+ CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles(const ConcurrentJSLocker&)
{
return m_lazyOperandValueProfiles;
}
@@ -847,11 +872,6 @@
void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset);
-#if ENABLE(JIT)
- void setPCToCodeOriginMap(std::unique_ptr<PCToCodeOriginMap>&&);
- Optional<CodeOrigin> findPC(void* pc);
-#endif
-
bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); }
template<typename Metadata>
@@ -868,8 +888,9 @@
protected:
void finalizeLLIntInlineCaches();
+#if ENABLE(JIT)
void finalizeBaselineJITInlineCaches();
-
+#endif
#if ENABLE(DFG_JIT)
void tallyFrequentExitSites();
#else
@@ -965,17 +986,7 @@
std::unique_ptr<RegisterAtOffsetList> m_calleeSaveRegisters;
#endif
#if ENABLE(JIT)
- Bag<StructureStubInfo> m_stubInfos;
- Bag<JITAddIC> m_addICs;
- Bag<JITMulIC> m_mulICs;
- Bag<JITNegIC> m_negICs;
- Bag<JITSubIC> m_subICs;
- Bag<ByValInfo> m_byValInfos;
- Bag<CallLinkInfo> m_callLinkInfos;
- SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo>> m_incomingCalls;
- SentinelLinkedList<PolymorphicCallNode, BasicRawSentinelNode<PolymorphicCallNode>> m_incomingPolymorphicCalls;
- std::unique_ptr<PCToCodeOriginMap> m_pcToCodeOriginMap;
- JITCodeMap m_jitCodeMap;
+ std::unique_ptr<JITData> m_jitData;
#endif
#if ENABLE(DFG_JIT)
// This is relevant to non-DFG code blocks that serve as the profiled code block
@@ -984,7 +995,6 @@
#endif
RefCountedArray<ValueProfile> m_argumentValueProfiles;
Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles;
- SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
// Constant Pool
COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
Modified: trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.cpp (240892 => 240893)
--- trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.cpp 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/bytecode/MethodOfGettingAValueProfile.cpp 2019-02-02 07:15:47 UTC (rev 240893)
@@ -61,7 +61,7 @@
ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
LazyOperandValueProfile* profile =
"">- u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key);
+ u.lazyOperand.codeBlock->lazyOperandValueProfiles(locker).add(locker, key);
jit.storeValue(regs, profile->specFailBucket(0));
return;
}
@@ -89,7 +89,7 @@
ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock);
LazyOperandValueProfile* profile =
"">- u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(locker, key);
+ u.lazyOperand.codeBlock->lazyOperandValueProfiles(locker).add(locker, key);
*profile->specFailBucket(0) = JSValue::encode(value);
return;
}
Modified: trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp (240892 => 240893)
--- trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/dfg/DFGByteCodeParser.cpp 2019-02-02 07:15:47 UTC (rev 240893)
@@ -6978,7 +6978,7 @@
m_exitProfile.initialize(m_profiledBlock->unlinkedCodeBlock());
ConcurrentJSLocker locker(m_profiledBlock->m_lock);
- m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles());
+ m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles(locker));
// We do this while holding the lock because we want to encourage StructureStubInfo's
// to be potentially added to operations and because the profiled block could be in the
Modified: trunk/Source/_javascript_Core/jit/JIT.h (240892 => 240893)
--- trunk/Source/_javascript_Core/jit/JIT.h 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/jit/JIT.h 2019-02-02 07:15:47 UTC (rev 240893)
@@ -205,11 +205,11 @@
return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort);
}
- static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compileGetByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
+ jit.privateCompileGetByVal(locker, byValInfo, returnAddress, arrayMode);
}
static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
@@ -219,18 +219,18 @@
jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName);
}
- static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compilePutByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompilePutByVal<OpPutByVal>(byValInfo, returnAddress, arrayMode);
+ jit.privateCompilePutByVal<OpPutByVal>(locker, byValInfo, returnAddress, arrayMode);
}
- static void compileDirectPutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compileDirectPutByVal(const ConcurrentJSLocker& locker, VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompilePutByVal<OpPutByValDirect>(byValInfo, returnAddress, arrayMode);
+ jit.privateCompilePutByVal<OpPutByValDirect>(locker, byValInfo, returnAddress, arrayMode);
}
template<typename Op>
@@ -260,10 +260,10 @@
void privateCompileSlowCases();
CompilationResult privateCompile(JITCompilationEffort);
- void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompileGetByVal(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode);
void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&);
template<typename Op>
- void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompilePutByVal(const ConcurrentJSLocker&, ByValInfo*, ReturnAddressPtr, JITArrayMode);
template<typename Op>
void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&);
Modified: trunk/Source/_javascript_Core/jit/JITOperations.cpp (240892 => 240893)
--- trunk/Source/_javascript_Core/jit/JITOperations.cpp 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/jit/JITOperations.cpp 2019-02-02 07:15:47 UTC (rev 240893)
@@ -737,8 +737,7 @@
CodeBlock* codeBlock = exec->codeBlock();
ConcurrentJSLocker locker(codeBlock->m_lock);
byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
-
- JIT::compilePutByVal(&vm, codeBlock, byValInfo, returnAddress, arrayMode);
+ JIT::compilePutByVal(locker, &vm, codeBlock, byValInfo, returnAddress, arrayMode);
optimizationResult = OptimizationResult::Optimized;
}
}
@@ -822,7 +821,7 @@
ConcurrentJSLocker locker(codeBlock->m_lock);
byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
- JIT::compileDirectPutByVal(&vm, codeBlock, byValInfo, returnAddress, arrayMode);
+ JIT::compileDirectPutByVal(locker, &vm, codeBlock, byValInfo, returnAddress, arrayMode);
optimizationResult = OptimizationResult::Optimized;
}
}
@@ -1889,7 +1888,7 @@
ConcurrentJSLocker locker(codeBlock->m_lock);
byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
- JIT::compileGetByVal(&vm, codeBlock, byValInfo, returnAddress, arrayMode);
+ JIT::compileGetByVal(locker, &vm, codeBlock, byValInfo, returnAddress, arrayMode);
optimizationResult = OptimizationResult::Optimized;
}
}
Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (240892 => 240893)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2019-02-02 04:05:55 UTC (rev 240892)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2019-02-02 07:15:47 UTC (rev 240893)
@@ -1293,7 +1293,7 @@
}
}
-void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+void JIT::privateCompileGetByVal(const ConcurrentJSLocker&, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
@@ -1380,7 +1380,7 @@
}
template<typename Op>
-void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+void JIT::privateCompilePutByVal(const ConcurrentJSLocker&, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr();
auto bytecode = currentInstruction->as<Op>();