Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (230516 => 230517)
--- trunk/Source/_javascript_Core/ChangeLog 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/ChangeLog 2018-04-11 07:44:58 UTC (rev 230517)
@@ -1,3 +1,73 @@
+2018-04-11 Yusuke Suzuki <utatane....@gmail.com>
+
+ [DFG] Remove duplicate 32bit code more
+ https://bugs.webkit.org/show_bug.cgi?id=184236
+
+ Reviewed by Mark Lam.
+
+ Remove duplicate 32bit code more aggressively part 2.
+
+ * _javascript_Core.xcodeproj/project.pbxproj:
+ * dfg/DFGCompareSlowPathGenerator.h: Added.
+ (JSC::DFG::CompareSlowPathGenerator::CompareSlowPathGenerator):
+ Drop boxing part. Use unblessedBooleanResult in DFGSpeculativeJIT side instead.
+
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGOperations.h:
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileOverridesHasInstance):
+ (JSC::DFG::SpeculativeJIT::compileLoadVarargs):
+ (JSC::DFG::SpeculativeJIT::compileIsObject):
+ (JSC::DFG::SpeculativeJIT::compileCheckNotEmpty):
+ (JSC::DFG::SpeculativeJIT::compilePutByIdFlush):
+ (JSC::DFG::SpeculativeJIT::compilePutById):
+ (JSC::DFG::SpeculativeJIT::compilePutByIdDirect):
+ (JSC::DFG::SpeculativeJIT::compileNewArrayWithSize):
+ (JSC::DFG::SpeculativeJIT::compileMiscStrictEq):
+ (JSC::DFG::SpeculativeJIT::emitInitializeButterfly):
+ (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize):
+ (JSC::DFG::SpeculativeJIT::compileHasIndexedProperty):
+ (JSC::DFG::SpeculativeJIT::compileGetDirectPname):
+ (JSC::DFG::SpeculativeJIT::compileExtractCatchLocal):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeCompare): Deleted.
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::selectScratchGPR): Deleted.
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ (JSC::DFG::SpeculativeJIT::cachedPutById): Deleted.
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch): Deleted.
+ (JSC::DFG::CompareAndBoxBooleanSlowPathGenerator::CompareAndBoxBooleanSlowPathGenerator): Deleted.
+ (JSC::DFG::CompareAndBoxBooleanSlowPathGenerator::generateInternal): Deleted.
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare): Deleted.
+ (JSC::DFG::SpeculativeJIT::compileMiscStrictEq): Deleted.
+ (JSC::DFG::SpeculativeJIT::emitInitializeButterfly): Deleted.
+ (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize): Deleted.
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq):
+ (JSC::DFG::SpeculativeJIT::compile):
+ (JSC::DFG::SpeculativeJIT::cachedPutById): Deleted.
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch): Deleted.
+ (JSC::DFG::CompareAndBoxBooleanSlowPathGenerator::CompareAndBoxBooleanSlowPathGenerator): Deleted.
+ (): Deleted.
+ (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare): Deleted.
+ (JSC::DFG::SpeculativeJIT::compileMiscStrictEq): Deleted.
+ (JSC::DFG::SpeculativeJIT::emitInitializeButterfly): Deleted.
+ (JSC::DFG::SpeculativeJIT::compileAllocateNewArrayWithSize): Deleted.
+ * ftl/FTLLowerDFGToB3.cpp:
+ (JSC::FTL::DFG::LowerDFGToB3::compileHasIndexedProperty):
+ operationHasIndexedPropertyByInt starts returning unblessed boolean with size_t.
+
+ * jit/AssemblyHelpers.h:
+ (JSC::AssemblyHelpers::loadValue):
+ (JSC::AssemblyHelpers::selectScratchGPR):
+ (JSC::AssemblyHelpers::constructRegisterSet):
+ * jit/RegisterSet.h:
+ (JSC::RegisterSet::setAny):
+ Clean up selectScratchGPR code to pass JSValueRegs.
+
2018-04-10 Caio Lima <ticaiol...@gmail.com>
[ESNext][BigInt] Add support for BigInt in SpeculatedType
Modified: trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj (230516 => 230517)
--- trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj 2018-04-11 07:44:58 UTC (rev 230517)
@@ -1706,6 +1706,7 @@
E354622B1B6065D100545386 /* ConstructAbility.h in Headers */ = {isa = PBXBuildFile; fileRef = E354622A1B6065D100545386 /* ConstructAbility.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3555B8A1DAE03A500F36921 /* DOMJITCallDOMGetterSnippet.h in Headers */ = {isa = PBXBuildFile; fileRef = E3555B891DAE03A200F36921 /* DOMJITCallDOMGetterSnippet.h */; settings = {ATTRIBUTES = (Private, ); }; };
E355F3531B7DC85300C50DC5 /* ModuleLoaderPrototype.h in Headers */ = {isa = PBXBuildFile; fileRef = E355F3511B7DC85300C50DC5 /* ModuleLoaderPrototype.h */; };
+ E356D86420728381005AC750 /* DFGCompareSlowPathGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = E356D86320728381005AC750 /* DFGCompareSlowPathGenerator.h */; };
E35CA1541DBC3A5C00F83516 /* DOMJITHeapRange.h in Headers */ = {isa = PBXBuildFile; fileRef = E35CA1521DBC3A5600F83516 /* DOMJITHeapRange.h */; settings = {ATTRIBUTES = (Private, ); }; };
E35CA1561DBC3A5F00F83516 /* DOMJITAbstractHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = E35CA1501DBC3A5600F83516 /* DOMJITAbstractHeap.h */; settings = {ATTRIBUTES = (Private, ); }; };
E35E03601B7AB43E0073AD2A /* InspectorInstrumentationObject.h in Headers */ = {isa = PBXBuildFile; fileRef = E35E035E1B7AB43E0073AD2A /* InspectorInstrumentationObject.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -4555,6 +4556,7 @@
E3555B891DAE03A200F36921 /* DOMJITCallDOMGetterSnippet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DOMJITCallDOMGetterSnippet.h; sourceTree = "<group>"; };
E355F3501B7DC85300C50DC5 /* ModuleLoaderPrototype.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ModuleLoaderPrototype.cpp; sourceTree = "<group>"; };
E355F3511B7DC85300C50DC5 /* ModuleLoaderPrototype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ModuleLoaderPrototype.h; sourceTree = "<group>"; };
+ E356D86320728381005AC750 /* DFGCompareSlowPathGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCompareSlowPathGenerator.h; path = dfg/DFGCompareSlowPathGenerator.h; sourceTree = "<group>"; };
E35CA14F1DBC3A5600F83516 /* DOMJITAbstractHeap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DOMJITAbstractHeap.cpp; sourceTree = "<group>"; };
E35CA1501DBC3A5600F83516 /* DOMJITAbstractHeap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DOMJITAbstractHeap.h; sourceTree = "<group>"; };
E35CA1511DBC3A5600F83516 /* DOMJITHeapRange.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DOMJITHeapRange.cpp; sourceTree = "<group>"; };
@@ -7160,6 +7162,7 @@
0FC0977E1469EBC400CF2442 /* DFGCommon.h */,
0FEA0A2D170D40BF00BB722C /* DFGCommonData.cpp */,
0FEA0A2E170D40BF00BB722C /* DFGCommonData.h */,
+ E356D86320728381005AC750 /* DFGCompareSlowPathGenerator.h */,
0F38B01317CFE75500B144D3 /* DFGCompilationKey.cpp */,
0F38B01417CFE75500B144D3 /* DFGCompilationKey.h */,
0F38B01517CFE75500B144D3 /* DFGCompilationMode.cpp */,
@@ -8509,6 +8512,7 @@
0F04396E1B03DC0B009598B7 /* DFGCombinedLiveness.h in Headers */,
0F7B294D14C3CD4C007C3DB1 /* DFGCommon.h in Headers */,
0FEA0A32170D40BF00BB722C /* DFGCommonData.h in Headers */,
+ E356D86420728381005AC750 /* DFGCompareSlowPathGenerator.h in Headers */,
0F38B01817CFE75500B144D3 /* DFGCompilationKey.h in Headers */,
0F38B01A17CFE75500B144D3 /* DFGCompilationMode.h in Headers */,
0F3B3A1B153E68F4003ED0FF /* DFGConstantFoldingPhase.h in Headers */,
@@ -9034,7 +9038,6 @@
A503FA1E188E0FB000110F14 /* JSJavaScriptCallFramePrototype.h in Headers */,
7013CA8C1B491A9400CAE613 /* JSJob.h in Headers */,
BC18C4160E16F5CD00B34460 /* JSLexicalEnvironment.h in Headers */,
- 7A9774A8206B82E4008D03D0 /* JSWeakValue.h in Headers */,
BC18C4230E16F5CD00B34460 /* JSLock.h in Headers */,
C25D709C16DE99F400FCA6BC /* JSManagedValue.h in Headers */,
2A4BB7F318A41179008A0FCD /* JSManagedValueInternal.h in Headers */,
@@ -9106,6 +9109,7 @@
A7482B9311671147003B0712 /* JSWeakObjectMapRefPrivate.h in Headers */,
0F0B286B1EB8E6CF000EB5D2 /* JSWeakPrivate.h in Headers */,
709FB8681AE335C60039D069 /* JSWeakSet.h in Headers */,
+ 7A9774A8206B82E4008D03D0 /* JSWeakValue.h in Headers */,
AD5C36EB1F75AD73000BCAAF /* JSWebAssembly.h in Headers */,
AD9E852F1E8A0C7C008DE39E /* JSWebAssemblyCodeBlock.h in Headers */,
79EFD4841EBC045C00F3DFEA /* JSWebAssemblyCodeBlockHeapCellType.h in Headers */,
Added: trunk/Source/_javascript_Core/dfg/DFGCompareSlowPathGenerator.h (0 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGCompareSlowPathGenerator.h (rev 0)
+++ trunk/Source/_javascript_Core/dfg/DFGCompareSlowPathGenerator.h 2018-04-11 07:44:58 UTC (rev 230517)
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2018 Yusuke Suzuki <utatane....@gmail.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGSlowPathGenerator.h"
+
+namespace JSC { namespace DFG {
+
+template<typename JumpType>
+class CompareSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
+public:
+ CompareSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit,
+ S_JITOperation_EJJ function, GPRReg result, JSValueRegs arg1, JSValueRegs arg2)
+ : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
+ from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
+ , m_arg1(arg1)
+ , m_arg2(arg2)
+ {
+ }
+
+protected:
+ void generateInternal(SpeculativeJIT* jit) override
+ {
+ this->setUp(jit);
+ this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
+ this->tearDown(jit);
+ }
+
+private:
+ JSValueRegs m_arg1;
+ JSValueRegs m_arg2;
+};
+
+} } // namespace JSC::DFG
+
+#endif
Modified: trunk/Source/_javascript_Core/dfg/DFGOperations.cpp (230516 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGOperations.cpp 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/dfg/DFGOperations.cpp 2018-04-11 07:44:58 UTC (rev 230517)
@@ -1897,7 +1897,7 @@
return JSValue::encode(jsBoolean(base->hasPropertyGeneric(exec, asString(propertyName)->toIdentifier(exec), PropertySlot::InternalMethodType::GetOwnProperty)));
}
-EncodedJSValue JIT_OPERATION operationHasIndexedPropertyByInt(ExecState* exec, JSCell* baseCell, int32_t subscript, int32_t internalMethodType)
+size_t JIT_OPERATION operationHasIndexedPropertyByInt(ExecState* exec, JSCell* baseCell, int32_t subscript, int32_t internalMethodType)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -1904,9 +1904,9 @@
JSObject* object = baseCell->toObject(exec, exec->lexicalGlobalObject());
if (UNLIKELY(subscript < 0)) {
// Go the slowest way possible because negative indices don't use indexed storage.
- return JSValue::encode(jsBoolean(object->hasPropertyGeneric(exec, Identifier::from(exec, subscript), static_cast<PropertySlot::InternalMethodType>(internalMethodType))));
+ return object->hasPropertyGeneric(exec, Identifier::from(exec, subscript), static_cast<PropertySlot::InternalMethodType>(internalMethodType));
}
- return JSValue::encode(jsBoolean(object->hasPropertyGeneric(exec, subscript, static_cast<PropertySlot::InternalMethodType>(internalMethodType))));
+ return object->hasPropertyGeneric(exec, subscript, static_cast<PropertySlot::InternalMethodType>(internalMethodType));
}
JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState* exec, EncodedJSValue encodedBase)
Modified: trunk/Source/_javascript_Core/dfg/DFGOperations.h (230516 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGOperations.h 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/dfg/DFGOperations.h 2018-04-11 07:44:58 UTC (rev 230517)
@@ -80,7 +80,7 @@
EncodedJSValue JIT_OPERATION operationGetPrototypeOf(ExecState*, EncodedJSValue) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetPrototypeOfObject(ExecState*, JSObject*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState*, EncodedJSValue, JSCell*);
-EncodedJSValue JIT_OPERATION operationHasIndexedPropertyByInt(ExecState*, JSCell*, int32_t, int32_t);
+size_t JIT_OPERATION operationHasIndexedPropertyByInt(ExecState*, JSCell*, int32_t, int32_t);
JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState*, EncodedJSValue);
JSCell* JIT_OPERATION operationGetPropertyEnumeratorCell(ExecState*, JSCell*);
JSCell* JIT_OPERATION operationToIndexString(ExecState*, int32_t);
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp (230516 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.cpp 2018-04-11 07:44:58 UTC (rev 230517)
@@ -34,6 +34,7 @@
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
#include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
#include "DFGCapabilities.h"
+#include "DFGCompareSlowPathGenerator.h"
#include "DFGMayExit.h"
#include "DFGOSRExitFuzz.h"
#include "DFGSaneStringGetByValSlowPathGenerator.h"
@@ -1182,27 +1183,6 @@
cellResult(resultGPR, node);
}
-bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
-{
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- Node* branchNode = m_block->at(branchIndexInBlock);
-
- ASSERT(node->adjustedRefCount() == 1);
-
- nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
-
- m_indexInBlock = branchIndexInBlock;
- m_currentNode = branchNode;
-
- return true;
- }
-
- nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
-
- return false;
-}
-
bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
@@ -3466,6 +3446,45 @@
jsValueResult(resultRegs, node);
}
+void SpeculativeJIT::compileOverridesHasInstance(Node* node)
+{
+ Node* hasInstanceValueNode = node->child2().node();
+ JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(node->cellOperand()->value());
+
+ MacroAssembler::JumpList notDefault;
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand hasInstanceValue(this, node->child2());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ // It would be great if constant folding handled automatically the case where we knew the hasInstance function
+ // was a constant. Unfortunately, the folding rule for OverridesHasInstance is in the strength reduction phase
+ // since it relies on OSR information. https://bugs.webkit.org/show_bug.cgi?id=154832
+ if (!hasInstanceValueNode->isCellConstant() || defaultHasInstanceFunction != hasInstanceValueNode->asCell()) {
+ JSValueRegs hasInstanceValueRegs = hasInstanceValue.jsValueRegs();
+#if USE(JSVALUE64)
+ notDefault.append(m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueRegs.gpr(), TrustedImmPtr(node->cellOperand())));
+#else
+ notDefault.append(m_jit.branchIfNotCell(hasInstanceValueRegs));
+ notDefault.append(m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueRegs.payloadGPR(), TrustedImmPtr(node->cellOperand())));
+#endif
+ }
+
+ // Check that base 'ImplementsDefaultHasInstance'.
+ m_jit.test8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance), resultGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+
+ if (!notDefault.empty()) {
+ notDefault.link(&m_jit);
+ m_jit.move(TrustedImm32(1), resultGPR);
+ }
+
+ done.link(&m_jit);
+ unblessedBooleanResult(resultGPR, node);
+}
+
void SpeculativeJIT::compileInstanceOf(Node* node)
{
if (node->child1().useKind() == UntypedUse) {
@@ -6924,6 +6943,59 @@
noResult(node);
}
+void SpeculativeJIT::compileLoadVarargs(Node* node)
+{
+ LoadVarargsData* data = ""
+
+ JSValueRegs argumentsRegs;
+ {
+ JSValueOperand arguments(this, node->child1());
+ argumentsRegs = arguments.jsValueRegs();
+ flushRegisters();
+ }
+
+ callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsRegs, data->offset);
+ m_jit.exceptionCheck();
+
+ lock(GPRInfo::returnValueGPR);
+ {
+ JSValueOperand arguments(this, node->child1());
+ argumentsRegs = arguments.jsValueRegs();
+ flushRegisters();
+ }
+ unlock(GPRInfo::returnValueGPR);
+
+ // FIXME: There is a chance that we will call an effectful length property twice. This is safe
+ // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
+ // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
+ // past the sizing.
+ // https://bugs.webkit.org/show_bug.cgi?id=141448
+
+ GPRReg argCountIncludingThisGPR =
+ JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsRegs);
+
+ m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
+
+ speculationCheck(
+ VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
+ MacroAssembler::Above,
+ GPRInfo::returnValueGPR,
+ argCountIncludingThisGPR));
+
+ speculationCheck(
+ VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
+ MacroAssembler::Above,
+ argCountIncludingThisGPR,
+ TrustedImm32(data->limit)));
+
+ m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
+
+ callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsRegs, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
+ m_jit.exceptionCheck();
+
+ noResult(node);
+}
+
void SpeculativeJIT::compileForwardVarargs(Node* node)
{
LoadVarargsData* data = ""
@@ -8509,6 +8581,33 @@
noResult(node);
}
+void SpeculativeJIT::compileIsObject(Node* node)
+{
+ JSValueOperand value(this, node->child1());
+#if USE(JSVALUE64)
+ GPRTemporary result(this, Reuse, value);
+#else
+ GPRTemporary result(this, Reuse, value, TagWord);
+#endif
+
+ JSValueRegs valueRegs = value.jsValueRegs();
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
+
+ m_jit.compare8(JITCompiler::AboveOrEqual,
+ JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
+ TrustedImm32(ObjectType),
+ resultGPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isNotCell.link(&m_jit);
+ m_jit.move(TrustedImm32(0), resultGPR);
+
+ done.link(&m_jit);
+ unblessedBooleanResult(resultGPR, node);
+}
+
void SpeculativeJIT::compileIsObjectOrNull(Node* node)
{
JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
@@ -8667,6 +8766,14 @@
noResult(node);
}
+void SpeculativeJIT::compileCheckNotEmpty(Node* node)
+{
+ JSValueOperand operand(this, node->child1());
+ JSValueRegs regs = operand.jsValueRegs();
+ speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branchIfEmpty(regs));
+ noResult(node);
+}
+
void SpeculativeJIT::compileCheckStructure(Node* node)
{
switch (node->child1().useKind()) {
@@ -11556,6 +11663,52 @@
cellResult(resultGPR, node);
}
+void SpeculativeJIT::compilePutByIdFlush(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ JSValueRegs valueRegs = value.jsValueRegs();
+ GPRReg scratchGPR = scratch.gpr();
+ flushRegisters();
+
+ cachedPutById(node->origin.semantic, baseGPR, valueRegs, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
+
+ noResult(node);
+}
+
+void SpeculativeJIT::compilePutById(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ JSValueRegs valueRegs = value.jsValueRegs();
+ GPRReg scratchGPR = scratch.gpr();
+
+ cachedPutById(node->origin.semantic, baseGPR, valueRegs, scratchGPR, node->identifierNumber(), NotDirect);
+
+ noResult(node);
+}
+
+void SpeculativeJIT::compilePutByIdDirect(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ JSValueRegs valueRegs = value.jsValueRegs();
+ GPRReg scratchGPR = scratch.gpr();
+
+ cachedPutById(node->origin.semantic, baseGPR, valueRegs, scratchGPR, node->identifierNumber(), Direct);
+
+ noResult(node);
+}
+
void SpeculativeJIT::compilePutByIdWithThis(Node* node)
{
JSValueOperand base(this, node->child1());
@@ -11860,7 +12013,7 @@
flushRegisters();
GPRFlushedCallResult result(this);
GPRReg resultGPR = result.gpr();
- GPRReg structureGPR = selectScratchGPR(sizeGPR);
+ GPRReg structureGPR = AssemblyHelpers::selectScratchGPR(sizeGPR);
MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()))), structureGPR);
MacroAssembler::Jump done = m_jit.jump();
@@ -12409,7 +12562,420 @@
}
}
+void SpeculativeJIT::compileMiscStrictEq(Node* node)
+{
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
+ JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
+ GPRTemporary result(this);
+ if (node->child1().useKind() == MiscUse)
+ speculateMisc(node->child1(), op1.jsValueRegs());
+ if (node->child2().useKind() == MiscUse)
+ speculateMisc(node->child2(), op2.jsValueRegs());
+
+#if USE(JSVALUE64)
+ m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
+#else
+ m_jit.move(TrustedImm32(0), result.gpr());
+ JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
+ m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
+ notEqual.link(&m_jit);
+#endif
+ unblessedBooleanResult(result.gpr(), node);
+}
+
+void SpeculativeJIT::emitInitializeButterfly(GPRReg storageGPR, GPRReg sizeGPR, JSValueRegs emptyValueRegs, GPRReg scratchGPR)
+{
+ m_jit.zeroExtend32ToPtr(sizeGPR, scratchGPR);
+ MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
+ MacroAssembler::Label loop = m_jit.label();
+ m_jit.sub32(TrustedImm32(1), scratchGPR);
+ m_jit.storeValue(emptyValueRegs, MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight));
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
+ done.link(&m_jit);
+}
+
+void SpeculativeJIT::compileAllocateNewArrayWithSize(JSGlobalObject* globalObject, GPRReg resultGPR, GPRReg sizeGPR, IndexingType indexingType, bool shouldConvertLargeSizeToArrayStorage)
+{
+ GPRTemporary storage(this);
+ GPRTemporary scratch(this);
+ GPRTemporary scratch2(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+ GPRReg scratch2GPR = scratch2.gpr();
+
+ m_jit.move(TrustedImmPtr(nullptr), storageGPR);
+
+ MacroAssembler::JumpList slowCases;
+ if (shouldConvertLargeSizeToArrayStorage)
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
+
+ // We can use resultGPR as a scratch right now.
+ emitAllocateButterfly(storageGPR, sizeGPR, scratchGPR, scratch2GPR, resultGPR, slowCases);
+
+#if USE(JSVALUE64)
+ JSValueRegs emptyValueRegs(scratchGPR);
+ if (hasDouble(indexingType))
+ m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), emptyValueRegs.gpr());
+ else
+ m_jit.move(TrustedImm64(JSValue::encode(JSValue())), emptyValueRegs.gpr());
+#else
+ JSValueRegs emptyValueRegs(scratchGPR, scratch2GPR);
+ if (hasDouble(indexingType))
+ m_jit.moveValue(JSValue(JSValue::EncodeAsDouble, PNaN), emptyValueRegs);
+ else
+ m_jit.moveValue(JSValue(), emptyValueRegs);
+#endif
+ emitInitializeButterfly(storageGPR, sizeGPR, emptyValueRegs, resultGPR);
+
+ RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType));
+
+ emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
+
+ m_jit.mutatorFence(*m_jit.vm());
+
+ addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
+ slowCases, this, operationNewArrayWithSize, resultGPR,
+ structure,
+ shouldConvertLargeSizeToArrayStorage ? m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)) : structure,
+ sizeGPR, storageGPR));
+}
+
+void SpeculativeJIT::compileHasIndexedProperty(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand index(this, node->child2());
+ GPRTemporary result(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ MacroAssembler::JumpList slowCases;
+ ArrayMode mode = node->arrayMode();
+ switch (mode.type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ ASSERT(!!node->child3());
+ StorageOperand storage(this, node->child3());
+ GPRTemporary scratch(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ if (mode.isInBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
+ else
+ slowCases.append(outOfBounds);
+
+#if USE(JSVALUE64)
+ m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchGPR);
+ slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
+#else
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
+ slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
+#endif
+ m_jit.move(TrustedImm32(1), resultGPR);
+ break;
+ }
+ case Array::Double: {
+ ASSERT(!!node->child3());
+ StorageOperand storage(this, node->child3());
+ FPRTemporary scratch(this);
+ FPRReg scratchFPR = scratch.fpr();
+ GPRReg storageGPR = storage.gpr();
+
+ MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ if (mode.isInBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
+ else
+ slowCases.append(outOfBounds);
+
+ m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
+ slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
+ m_jit.move(TrustedImm32(1), resultGPR);
+ break;
+ }
+ case Array::ArrayStorage: {
+ ASSERT(!!node->child3());
+ StorageOperand storage(this, node->child3());
+ GPRTemporary scratch(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
+ if (mode.isInBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
+ else
+ slowCases.append(outOfBounds);
+
+#if USE(JSVALUE64)
+ m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), scratchGPR);
+ slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
+#else
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
+ slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
+#endif
+ m_jit.move(TrustedImm32(1), resultGPR);
+ break;
+ }
+ default: {
+ slowCases.append(m_jit.jump());
+ break;
+ }
+ }
+
+ addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedPropertyByInt, HasPropertyPtrTag, resultGPR, baseGPR, indexGPR, static_cast<int32_t>(node->internalMethodType())));
+
+ unblessedBooleanResult(resultGPR, node);
+}
+
+void SpeculativeJIT::compileGetDirectPname(Node* node)
+{
+ Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
+ Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
+
+ SpeculateCellOperand base(this, baseEdge);
+ SpeculateCellOperand property(this, propertyEdge);
+ GPRReg baseGPR = base.gpr();
+ GPRReg propertyGPR = property.gpr();
+
+#if CPU(X86)
+ // Not enough registers on X86 for this code, so always use the slow path.
+ flushRegisters();
+ JSValueRegsFlushedCallResult result(this);
+ JSValueRegs resultRegs = result.regs();
+ callOperation(operationGetByValCell, resultRegs, baseGPR, JSValue::JSCellType, propertyGPR);
+ m_jit.exceptionCheck();
+ jsValueResult(resultRegs, node);
+#else
+ Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
+ Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
+ SpeculateStrictInt32Operand index(this, indexEdge);
+ SpeculateCellOperand enumerator(this, enumeratorEdge);
+ GPRTemporary scratch(this);
+ JSValueRegsTemporary result(this);
+
+ GPRReg indexGPR = index.gpr();
+ GPRReg enumeratorGPR = enumerator.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+ JSValueRegs resultRegs = result.regs();
+
+ MacroAssembler::JumpList slowPath;
+
+ // Check the structure
+ m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR);
+ slowPath.append(
+ m_jit.branch32(
+ MacroAssembler::NotEqual,
+ scratchGPR,
+ MacroAssembler::Address(
+ enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ // Compute the offset
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
+ indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+
+ m_jit.loadValue(MacroAssembler::BaseIndex(baseGPR, indexGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage()), resultRegs);
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ // Otherwise it's out of line
+ outOfLineAccess.link(&m_jit);
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultRegs.gpr());
+ m_jit.move(indexGPR, scratchGPR);
+ m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR);
+ m_jit.neg32(scratchGPR);
+ m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ m_jit.loadValue(MacroAssembler::BaseIndex(resultRegs.gpr(), scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty), resultRegs);
+
+ done.link(&m_jit);
+
+#if USE(JSVALUE64)
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByValCell, GetPropertyPtrTag, resultRegs, baseGPR, propertyGPR));
+#else
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByValCell, GetPropertyPtrTag, resultRegs, baseGPR, JSValue::JSCellType, propertyGPR));
+#endif
+
+ jsValueResult(resultRegs, node);
+#endif
+}
+
+void SpeculativeJIT::compileExtractCatchLocal(Node* node)
+{
+ JSValueRegsTemporary result(this);
+ JSValueRegs resultRegs = result.regs();
+
+ JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->common.catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
+ m_jit.loadValue(ptr, resultRegs);
+ jsValueResult(resultRegs, node);
+}
+
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, JSValueRegs valueRegs, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+{
+ RegisterSet usedRegisters = this->usedRegisters();
+ if (spillMode == DontSpill) {
+ // We've already flushed registers to the stack, we don't need to spill these.
+ usedRegisters.set(baseGPR, false);
+ usedRegisters.set(valueRegs, false);
+ }
+ CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
+ JITPutByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
+ JSValueRegs::payloadOnly(baseGPR), valueRegs,
+ scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
+
+ gen.generateFastPath(m_jit);
+
+ JITCompiler::JumpList slowCases;
+ if (slowPathTarget.isSet())
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
+
+#if USE(JSVALUE64)
+ auto slowPath = slowPathCall(
+ slowCases, this, gen.slowPathFunction(), PutPropertyPtrTag, NoResult, gen.stubInfo(), valueRegs,
+ baseGPR, identifierUID(identifierNumber));
+#else
+ auto slowPath = slowPathCall(
+ slowCases, this, gen.slowPathFunction(), PutPropertyPtrTag, NoResult, gen.stubInfo(), valueRegs,
+ JSValue::JSCellType, baseGPR, identifierUID(identifierNumber));
+#endif
+
+ m_jit.addPutById(gen, slowPath.get());
+ addSlowPathGenerator(WTFMove(slowPath));
+}
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
+{
+ ASSERT(node->isBinaryUseKind(UntypedUse));
+ JSValueOperand arg1(this, node->child1());
+ JSValueOperand arg2(this, node->child2());
+
+ JSValueRegs arg1Regs = arg1.jsValueRegs();
+ JSValueRegs arg2Regs = arg2.jsValueRegs();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+ callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
+ m_jit.exceptionCheck();
+
+ unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
+ return;
+ }
+
+#if USE(JSVALUE64)
+ GPRTemporary result(this, Reuse, arg1);
+#else
+ GPRTemporary result(this, Reuse, arg1, TagWord);
+#endif
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node->child1().node()))
+ slowPath.append(m_jit.branchIfNotInt32(arg1Regs));
+ if (!isKnownInteger(node->child2().node()))
+ slowPath.append(m_jit.branchIfNotInt32(arg2Regs));
+
+ m_jit.compare32(cond, arg1Regs.payloadGPR(), arg2Regs.payloadGPR(), resultGPR);
+
+ if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
+ addSlowPathGenerator(std::make_unique<CompareSlowPathGenerator<JITCompiler::JumpList>>(
+ slowPath, this, helperFunction, resultGPR, arg1Regs, arg2Regs));
+ }
+
+ unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
+{
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+
+ JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ cond = JITCompiler::invert(cond);
+ callResultCondition = JITCompiler::Zero;
+ BasicBlock* tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ JSValueOperand arg1(this, node->child1());
+ JSValueOperand arg2(this, node->child2());
+ JSValueRegs arg1Regs = arg1.jsValueRegs();
+ JSValueRegs arg2Regs = arg2.jsValueRegs();
+
+ JITCompiler::JumpList slowPath;
+
+ if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ flushRegisters();
+ callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
+ m_jit.exceptionCheck();
+
+ branchTest32(callResultCondition, resultGPR, taken);
+ } else {
+#if USE(JSVALUE64)
+ GPRTemporary result(this, Reuse, arg2);
+#else
+ GPRTemporary result(this, Reuse, arg2, TagWord);
+#endif
+ GPRReg resultGPR = result.gpr();
+
+ arg1.use();
+ arg2.use();
+
+ if (!isKnownInteger(node->child1().node()))
+ slowPath.append(m_jit.branchIfNotInt32(arg1Regs));
+ if (!isKnownInteger(node->child2().node()))
+ slowPath.append(m_jit.branchIfNotInt32(arg2Regs));
+
+ branch32(cond, arg1Regs.payloadGPR(), arg2Regs.payloadGPR(), taken);
+
+ if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
+ jump(notTaken, ForceJump);
+
+ slowPath.link(&m_jit);
+
+ silentSpillAllRegisters(resultGPR);
+ callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
+ silentFillAllRegisters();
+ m_jit.exceptionCheck();
+
+ branchTest32(callResultCondition, resultGPR, taken);
+ }
+ }
+
+ jump(notTaken);
+
+ m_indexInBlock = m_block->size() - 1;
+ m_currentNode = branchNode;
+}
+
} } // namespace JSC::DFG
#endif
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.h (230516 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.h 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT.h 2018-04-11 07:44:58 UTC (rev 230517)
@@ -345,11 +345,6 @@
void compileStoreBarrier(Node*);
- static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
- {
- return AssemblyHelpers::selectScratchGPR(preserve1, preserve2, preserve3, preserve4);
- }
-
// Called by the speculative operand types, below, to fill operand to
// machine registers, implicitly generating speculation checks as needed.
GPRReg fillSpeculateInt32(Edge, DataFormat& returnFormat);
@@ -725,14 +720,13 @@
void compileMovHintAndCheck(Node*);
void cachedGetById(CodeOrigin, JSValueRegs base, JSValueRegs result, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType);
+ void cachedPutById(CodeOrigin, GPRReg baseGPR, JSValueRegs valueRegs, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
#if USE(JSVALUE64)
void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType);
- void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedGetByIdWithThis(CodeOrigin, GPRReg baseGPR, GPRReg thisGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::JumpList slowPathTarget = JITCompiler::JumpList());
#elif USE(JSVALUE32_64)
void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode, AccessType);
- void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedGetByIdWithThis(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg thisTagGPROrNone, GPRReg thisPayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::JumpList slowPathTarget = JITCompiler::JumpList());
#endif
@@ -748,7 +742,6 @@
void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
- bool nonSpeculativeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false);
void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false);
@@ -757,6 +750,7 @@
void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg, GPRReg scratch2Reg, GPRReg scratch3Reg);
void compileInstanceOf(Node*);
void compileInstanceOfCustom(Node*);
+ void compileOverridesHasInstance(Node*);
void compileIsCellWithType(Node*);
void compileIsTypedArrayView(Node*);
@@ -1442,6 +1436,7 @@
void compileSetFunctionName(Node*);
void compileNewRegexp(Node*);
void compileForwardVarargs(Node*);
+ void compileLoadVarargs(Node*);
void compileCreateActivation(Node*);
void compileCreateDirectArguments(Node*);
void compileGetFromArguments(Node*);
@@ -1464,10 +1459,12 @@
void compileRegExpMatchFastGlobal(Node*);
void compileRegExpTest(Node*);
void compileStringReplace(Node*);
+ void compileIsObject(Node*);
void compileIsObjectOrNull(Node*);
void compileIsFunction(Node*);
void compileTypeOf(Node*);
void compileCheckCell(Node*);
+ void compileCheckNotEmpty(Node*);
void compileCheckStructure(Node*);
void emitStructureCheck(Node*, GPRReg cellGPR, GPRReg tempGPR);
void compilePutAccessorById(Node*);
@@ -1497,8 +1494,12 @@
void compileGetEnumerableLength(Node*);
void compileHasGenericProperty(Node*);
void compileToIndexString(Node*);
+ void compilePutByIdFlush(Node*);
+ void compilePutById(Node*);
+ void compilePutByIdDirect(Node*);
void compilePutByIdWithThis(Node*);
void compileHasStructureProperty(Node*);
+ void compileGetDirectPname(Node*);
void compileGetPropertyEnumerator(Node*);
void compileGetEnumeratorPname(Node*);
void compileGetExecutable(Node*);
@@ -1517,6 +1518,8 @@
void compileToPrimitive(Node*);
void compileLogShadowChickenPrologue(Node*);
void compileLogShadowChickenTail(Node*);
+ void compileHasIndexedProperty(Node*);
+ void compileExtractCatchLocal(Node*);
void moveTrueTo(GPRReg);
void moveFalseTo(GPRReg);
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp (230516 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT32_64.cpp 2018-04-11 07:44:58 UTC (rev 230517)
@@ -265,35 +265,6 @@
addSlowPathGenerator(WTFMove(slowPath));
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
-{
- RegisterSet usedRegisters = this->usedRegisters();
- if (spillMode == DontSpill) {
- // We've already flushed registers to the stack, we don't need to spill these.
- usedRegisters.set(basePayloadGPR, false);
- usedRegisters.set(JSValueRegs(valueTagGPR, valuePayloadGPR), false);
- }
- CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
- JITPutByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters,
- JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR),
- scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
-
- gen.generateFastPath(m_jit);
-
- JITCompiler::JumpList slowCases;
- if (slowPathTarget.isSet())
- slowCases.append(slowPathTarget);
- slowCases.append(gen.slowPathJump());
-
- auto slowPath = slowPathCall(
- slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), JSValueRegs(valueTagGPR, valuePayloadGPR),
- JSValue::JSCellType, basePayloadGPR, identifierUID(identifierNumber));
-
- m_jit.addPutById(gen, slowPath.get());
- addSlowPathGenerator(WTFMove(slowPath));
-}
-
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand)
{
JSValueOperand arg(this, operand, ManualOperandSpeculation);
@@ -412,162 +383,6 @@
jump(notTaken);
}
-void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
-{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
-
- JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
-
- // The branch instruction will branch to the taken block.
- // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == nextBlock()) {
- cond = JITCompiler::invert(cond);
- callResultCondition = JITCompiler::Zero;
- BasicBlock* tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
-
- JSValueOperand arg1(this, node->child1());
- JSValueOperand arg2(this, node->child2());
- JSValueRegs arg1Regs = arg1.jsValueRegs();
- JSValueRegs arg2Regs = arg2.jsValueRegs();
- GPRReg arg1TagGPR = arg1.tagGPR();
- GPRReg arg1PayloadGPR = arg1.payloadGPR();
- GPRReg arg2TagGPR = arg2.tagGPR();
- GPRReg arg2PayloadGPR = arg2.payloadGPR();
-
- JITCompiler::JumpList slowPath;
-
- if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- flushRegisters();
- callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
- m_jit.exceptionCheck();
-
- branchTest32(callResultCondition, resultGPR, taken);
- } else {
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- if (!isKnownInteger(node->child1().node()))
- slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
- if (!isKnownInteger(node->child2().node()))
- slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
-
- branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken);
-
- if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
- jump(notTaken, ForceJump);
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(helperFunction, resultGPR, arg1Regs, arg2Regs);
- m_jit.exceptionCheck();
- silentFillAllRegisters();
-
- branchTest32(callResultCondition, resultGPR, taken);
- }
- }
-
- jump(notTaken);
-
- m_indexInBlock = m_block->size() - 1;
- m_currentNode = branchNode;
-}
-
-template<typename JumpType>
-class CompareAndBoxBooleanSlowPathGenerator
- : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
-public:
- CompareAndBoxBooleanSlowPathGenerator(
- JumpType from, SpeculativeJIT* jit,
- S_JITOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload,
- GPRReg arg2Tag, GPRReg arg2Payload)
- : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
- from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
- , m_arg1Tag(arg1Tag)
- , m_arg1Payload(arg1Payload)
- , m_arg2Tag(arg2Tag)
- , m_arg2Payload(arg2Payload)
- {
- }
-
-protected:
- virtual void generateInternal(SpeculativeJIT* jit)
- {
- this->setUp(jit);
- this->recordCall(
- jit->callOperation(
- this->m_function, this->m_result, JSValueRegs(m_arg1Tag, m_arg1Payload), JSValueRegs(m_arg2Tag, m_arg2Payload)));
- jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
- this->tearDown(jit);
- }
-
-private:
- GPRReg m_arg1Tag;
- GPRReg m_arg1Payload;
- GPRReg m_arg2Tag;
- GPRReg m_arg2Payload;
-};
-
-void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
-{
- JSValueOperand arg1(this, node->child1());
- JSValueOperand arg2(this, node->child2());
- GPRReg arg1TagGPR = arg1.tagGPR();
- GPRReg arg1PayloadGPR = arg1.payloadGPR();
- GPRReg arg2TagGPR = arg2.tagGPR();
- GPRReg arg2PayloadGPR = arg2.payloadGPR();
-
- JITCompiler::JumpList slowPath;
-
- if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
- GPRReg resultPayloadGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- flushRegisters();
- callOperation(helperFunction, resultPayloadGPR, arg1.jsValueRegs(), arg2.jsValueRegs());
- m_jit.exceptionCheck();
-
- booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
- } else {
- GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
- GPRReg resultPayloadGPR = resultPayload.gpr();
-
- arg1.use();
- arg2.use();
-
- if (!isKnownInteger(node->child1().node()))
- slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
- if (!isKnownInteger(node->child2().node()))
- slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
-
- m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
-
- if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
- addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
- slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
- arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR));
- }
-
- booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
- }
-}
-
void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
{
BasicBlock* taken = branchNode->branchData()->taken.block;
@@ -668,24 +483,6 @@
booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::compileMiscStrictEq(Node* node)
-{
- JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
- JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
- GPRTemporary result(this);
-
- if (node->child1().useKind() == MiscUse)
- speculateMisc(node->child1(), op1.jsValueRegs());
- if (node->child2().useKind() == MiscUse)
- speculateMisc(node->child2(), op2.jsValueRegs());
-
- m_jit.move(TrustedImm32(0), result.gpr());
- JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
- m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
- notEqual.link(&m_jit);
- booleanResult(result.gpr(), node);
-}
-
void SpeculativeJIT::compileCompareEqPtr(Node* node)
{
JSValueOperand operand(this, node->child1());
@@ -3580,10 +3377,7 @@
}
case CheckNotEmpty: {
- JSValueOperand operand(this, node->child1());
- GPRReg tagGPR = operand.tagGPR();
- speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::EmptyValueTag)));
- noResult(node);
+ compileCheckNotEmpty(node);
break;
}
@@ -3691,51 +3485,17 @@
}
case PutByIdFlush: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg scratchGPR = scratch.gpr();
- flushRegisters();
-
- cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
-
- noResult(node);
+ compilePutByIdFlush(node);
break;
}
-
+
case PutById: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg scratchGPR = scratch.gpr();
-
- cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect);
-
- noResult(node);
+ compilePutById(node);
break;
}
case PutByIdDirect: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg scratchGPR = scratch.gpr();
-
- cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct);
-
- noResult(node);
+ compilePutByIdDirect(node);
break;
}
@@ -3798,38 +3558,7 @@
}
case OverridesHasInstance: {
-
- Node* hasInstanceValueNode = node->child2().node();
- JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(node->cellOperand()->value());
-
- MacroAssembler::JumpList notDefaultHasInstanceValue;
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand hasInstanceValue(this, node->child2());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg resultGPR = result.gpr();
-
- // It would be great if constant folding handled automatically the case where we knew the hasInstance function
- // was a constant. Unfortunately, the folding rule for OverridesHasInstance is in the strength reduction phase
- // since it relies on OSR information. https://bugs.webkit.org/show_bug.cgi?id=154832
- if (!hasInstanceValueNode->isCellConstant() || defaultHasInstanceFunction != hasInstanceValueNode->asCell()) {
- JSValueRegs hasInstanceValueRegs = hasInstanceValue.jsValueRegs();
- notDefaultHasInstanceValue.append(m_jit.branchIfNotCell(hasInstanceValueRegs));
- notDefaultHasInstanceValue.append(m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueRegs.payloadGPR(), TrustedImmPtr(node->cellOperand())));
- }
-
- // Check that constructor 'ImplementsDefaultHasInstance'.
- m_jit.test8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance), resultGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- if (!notDefaultHasInstanceValue.empty()) {
- notDefaultHasInstanceValue.link(&m_jit);
- moveTrueTo(resultGPR);
- }
-
- done.link(&m_jit);
- booleanResult(resultGPR, node);
+ compileOverridesHasInstance(node);
break;
}
@@ -3921,22 +3650,7 @@
}
case IsObject: {
- JSValueOperand value(this, node->child1());
- GPRTemporary result(this, Reuse, value, TagWord);
-
- JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
-
- m_jit.compare8(JITCompiler::AboveOrEqual,
- JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()),
- TrustedImm32(ObjectType),
- result.gpr());
- JITCompiler::Jump done = m_jit.jump();
-
- isNotCell.link(&m_jit);
- m_jit.move(TrustedImm32(0), result.gpr());
-
- done.link(&m_jit);
- booleanResult(result.gpr(), node);
+ compileIsObject(node);
break;
}
@@ -4076,59 +3790,7 @@
break;
case LoadVarargs: {
- LoadVarargsData* data = ""
-
- GPRReg argumentsTagGPR;
- GPRReg argumentsPayloadGPR;
- JSValueRegs argumentsRegs;
- {
- JSValueOperand arguments(this, node->child1());
- argumentsRegs = arguments.jsValueRegs();
- flushRegisters();
- }
-
- callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsRegs, data->offset);
- m_jit.exceptionCheck();
-
- lock(GPRInfo::returnValueGPR);
- {
- JSValueOperand arguments(this, node->child1());
- argumentsTagGPR = arguments.tagGPR();
- argumentsPayloadGPR = arguments.payloadGPR();
- argumentsRegs = arguments.jsValueRegs();
- flushRegisters();
- }
- unlock(GPRInfo::returnValueGPR);
-
- // FIXME: There is a chance that we will call an effectful length property twice. This is safe
- // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
- // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
- // past the sizing.
- // https://bugs.webkit.org/show_bug.cgi?id=141448
-
- GPRReg argCountIncludingThisGPR =
- JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR);
-
- m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
-
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- GPRInfo::returnValueGPR,
- argCountIncludingThisGPR));
-
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- argCountIncludingThisGPR,
- TrustedImm32(data->limit)));
-
- m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
-
- callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsRegs, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
- m_jit.exceptionCheck();
-
- noResult(node);
+ compileLoadVarargs(node);
break;
}
@@ -4340,152 +4002,11 @@
break;
}
case HasIndexedProperty: {
- SpeculateCellOperand base(this, node->child1());
- SpeculateInt32Operand index(this, node->child2());
- GPRTemporary resultPayload(this);
- GPRTemporary resultTag(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg resultTagGPR = resultTag.gpr();
-
- MacroAssembler::JumpList slowCases;
- ArrayMode mode = node->arrayMode();
- switch (mode.type()) {
- case Array::Int32:
- case Array::Contiguous: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())));
- m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
- slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
- break;
- }
- case Array::Double: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- FPRTemporary scratch(this);
- FPRReg scratchFPR = scratch.fpr();
- GPRReg storageGPR = storage.gpr();
-
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())));
- m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
- slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
- break;
- }
- case Array::ArrayStorage: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
- m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
- slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
- break;
- }
- default: {
- slowCases.append(m_jit.jump());
- break;
- }
- }
-
- moveTrueTo(resultPayloadGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedPropertyByInt, JSValueRegs(resultTagGPR, resultPayloadGPR), baseGPR, indexGPR, static_cast<int32_t>(node->internalMethodType())));
-
- done.link(&m_jit);
- booleanResult(resultPayloadGPR, node);
+ compileHasIndexedProperty(node);
break;
}
case GetDirectPname: {
- Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
- Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
-
- SpeculateCellOperand base(this, baseEdge);
- SpeculateCellOperand property(this, propertyEdge);
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
-
-#if CPU(X86)
- GPRTemporary scratch(this);
-
- GPRReg scratchGPR = scratch.gpr();
-
- // Not enough registers on X86 for this code, so always use the slow path.
- flushRegisters();
- JSValueRegsFlushedCallResult result(this);
- JSValueRegs resultRegs = result.regs();
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), scratchGPR);
- callOperation(operationGetByValCell, resultRegs, baseGPR, JSValueRegs(scratchGPR, propertyGPR));
- m_jit.exceptionCheck();
-#else
- GPRTemporary scratch(this);
- JSValueRegsTemporary result(this);
-
- GPRReg scratchGPR = scratch.gpr();
- JSValueRegs resultRegs = result.regs();
-
- Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
- Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
-
- SpeculateInt32Operand index(this, indexEdge);
- SpeculateCellOperand enumerator(this, enumeratorEdge);
-
- GPRReg indexGPR = index.gpr();
- GPRReg enumeratorGPR = enumerator.gpr();
-
- MacroAssembler::JumpList slowPath;
-
- // Check the structure
- m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR);
- slowPath.append(
- m_jit.branch32(
- MacroAssembler::NotEqual,
- scratchGPR,
- MacroAssembler::Address(
- enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset())));
-
- // Compute the offset
- // If index is less than the enumerator's cached inline storage, then it's an inline access
- MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
- indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
-
- m_jit.move(indexGPR, scratchGPR);
- m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
- m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultRegs.tagGPR());
- m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultRegs.payloadGPR());
-
- MacroAssembler::Jump done = m_jit.jump();
-
- // Otherwise it's out of line
- outOfLineAccess.link(&m_jit);
- m_jit.move(indexGPR, scratchGPR);
- m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR);
- m_jit.neg32(scratchGPR);
- m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
- // We use resultRegs.payloadGPR() as a temporary here. We have to make sure clobber it after getting the
- // value out of indexGPR and enumeratorGPR because resultRegs.payloadGPR() could reuse either of those registers.
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultRegs.payloadGPR());
- int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
- m_jit.load32(MacroAssembler::BaseIndex(resultRegs.payloadGPR(), scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultRegs.tagGPR());
- m_jit.load32(MacroAssembler::BaseIndex(resultRegs.payloadGPR(), scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultRegs.payloadGPR());
-
- done.link(&m_jit);
-
- addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByValCell, resultRegs, baseGPR, JSValue::JSCellType, propertyGPR));
-#endif
-
- jsValueResult(resultRegs, node);
+ compileGetDirectPname(node);
break;
}
case GetPropertyEnumerator: {
@@ -4650,19 +4171,7 @@
break;
case ExtractCatchLocal: {
- GPRTemporary temp(this);
- GPRTemporary tag(this);
- GPRTemporary payload(this);
-
- GPRReg tempGPR = temp.gpr();
- GPRReg tagGPR = tag.gpr();
- GPRReg payloadGPR = payload.gpr();
-
- JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->common.catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
- m_jit.move(CCallHelpers::TrustedImmPtr(ptr), tempGPR);
- m_jit.load32(CCallHelpers::Address(tempGPR, TagOffset), tagGPR);
- m_jit.load32(CCallHelpers::Address(tempGPR, PayloadOffset), payloadGPR);
- jsValueResult(tagGPR, payloadGPR, node);
+ compileExtractCatchLocal(node);
break;
}
@@ -4794,59 +4303,6 @@
booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::emitInitializeButterfly(GPRReg storageGPR, GPRReg sizeGPR, JSValueRegs emptyValueRegs, GPRReg scratchGPR)
-{
- m_jit.move(sizeGPR, scratchGPR);
- MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
- MacroAssembler::Label loop = m_jit.label();
- m_jit.sub32(TrustedImm32(1), scratchGPR);
- m_jit.store32(emptyValueRegs.tagGPR(), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- m_jit.store32(emptyValueRegs.payloadGPR(), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
- done.link(&m_jit);
-}
-
-void SpeculativeJIT::compileAllocateNewArrayWithSize(JSGlobalObject* globalObject, GPRReg resultGPR, GPRReg sizeGPR, IndexingType indexingType, bool shouldConvertLargeSizeToArrayStorage)
-{
- GPRTemporary storage(this);
- GPRTemporary scratch(this);
- GPRTemporary scratch2(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
-
- m_jit.move(TrustedImmPtr(nullptr), storageGPR);
-
- MacroAssembler::JumpList slowCases;
- if (shouldConvertLargeSizeToArrayStorage)
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
-
- // We can use result as a scratch for this.
- emitAllocateButterfly(storageGPR, sizeGPR, scratchGPR, scratch2GPR, resultGPR, slowCases);
-
- JSValue hole;
- if (hasDouble(indexingType))
- hole = JSValue(JSValue::EncodeAsDouble, PNaN);
- else
- hole = JSValue();
- JSValueRegs emptyValueRegs(scratchGPR, scratch2GPR);
- m_jit.move(TrustedImm32(hole.tag()), emptyValueRegs.tagGPR());
- m_jit.move(TrustedImm32(hole.payload()), emptyValueRegs.payloadGPR());
- // We can use result as a scratch for this.
- emitInitializeButterfly(storageGPR, sizeGPR, emptyValueRegs, resultGPR);
-
- RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType));
-
- emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
-
- addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
- slowCases, this, operationNewArrayWithSize, resultGPR,
- structure,
- shouldConvertLargeSizeToArrayStorage ? m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)) : structure,
- sizeGPR, storageGPR));
-}
-
#endif
} } // namespace JSC::DFG
Modified: trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp (230516 => 230517)
--- trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/dfg/DFGSpeculativeJIT64.cpp 2018-04-11 07:44:58 UTC (rev 230517)
@@ -216,35 +216,6 @@
addSlowPathGenerator(WTFMove(slowPath));
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
-{
- CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
- RegisterSet usedRegisters = this->usedRegisters();
- if (spillMode == DontSpill) {
- // We've already flushed registers to the stack, we don't need to spill these.
- usedRegisters.set(baseGPR, false);
- usedRegisters.set(valueGPR, false);
- }
-
- JITPutByIdGenerator gen(
- m_jit.codeBlock(), codeOrigin, callSite, usedRegisters, JSValueRegs(baseGPR),
- JSValueRegs(valueGPR), scratchGPR, m_jit.ecmaModeFor(codeOrigin), putKind);
-
- gen.generateFastPath(m_jit);
-
- JITCompiler::JumpList slowCases;
- if (slowPathTarget.isSet())
- slowCases.append(slowPathTarget);
- slowCases.append(gen.slowPathJump());
-
- auto slowPath = slowPathCall(
- slowCases, this, gen.slowPathFunction(), PutPropertyPtrTag, NoResult, gen.stubInfo(), valueGPR, baseGPR,
- identifierUID(identifierNumber));
-
- m_jit.addPutById(gen, slowPath.get());
- addSlowPathGenerator(WTFMove(slowPath));
-}
-
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined(Edge operand)
{
ASSERT_WITH_MESSAGE(!masqueradesAsUndefinedWatchpointIsStillValid() || !isKnownCell(operand.node()), "The Compare should have been eliminated, it is known to be always false.");
@@ -357,152 +328,6 @@
}
}
-void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
-{
- BasicBlock* taken = branchNode->branchData()->taken.block;
- BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
-
- JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
-
- // The branch instruction will branch to the taken block.
- // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == nextBlock()) {
- cond = JITCompiler::invert(cond);
- callResultCondition = JITCompiler::Zero;
- BasicBlock* tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
-
- JSValueOperand arg1(this, node->child1());
- JSValueOperand arg2(this, node->child2());
- GPRReg arg1GPR = arg1.gpr();
- GPRReg arg2GPR = arg2.gpr();
-
- JITCompiler::JumpList slowPath;
-
- if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- flushRegisters();
- callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
- m_jit.exceptionCheck();
-
- branchTest32(callResultCondition, resultGPR, taken);
- } else {
- GPRTemporary result(this, Reuse, arg2);
- GPRReg resultGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- if (!isKnownInteger(node->child1().node()))
- slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
- if (!isKnownInteger(node->child2().node()))
- slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
-
- branch32(cond, arg1GPR, arg2GPR, taken);
-
- if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
- jump(notTaken, ForceJump);
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
- silentFillAllRegisters();
- m_jit.exceptionCheck();
-
- branchTest32(callResultCondition, resultGPR, taken);
- }
- }
-
- jump(notTaken);
-
- m_indexInBlock = m_block->size() - 1;
- m_currentNode = branchNode;
-}
-
-template<typename JumpType>
-class CompareAndBoxBooleanSlowPathGenerator
- : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
-public:
- CompareAndBoxBooleanSlowPathGenerator(
- JumpType from, SpeculativeJIT* jit,
- S_JITOperation_EJJ function, GPRReg result, GPRReg arg1, GPRReg arg2)
- : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
- from, jit, function, NeedToSpill, ExceptionCheckRequirement::CheckNeeded, result)
- , m_arg1(arg1)
- , m_arg2(arg2)
- {
- }
-
-protected:
- void generateInternal(SpeculativeJIT* jit) override
- {
- this->setUp(jit);
- this->recordCall(jit->callOperation(this->m_function, this->m_result, m_arg1, m_arg2));
- jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
- jit->m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), this->m_result);
- this->tearDown(jit);
- }
-
-private:
- GPRReg m_arg1;
- GPRReg m_arg2;
-};
-
-void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
-{
- ASSERT(node->isBinaryUseKind(UntypedUse));
- JSValueOperand arg1(this, node->child1());
- JSValueOperand arg2(this, node->child2());
- GPRReg arg1GPR = arg1.gpr();
- GPRReg arg2GPR = arg2.gpr();
-
- JITCompiler::JumpList slowPath;
-
- if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
- GPRFlushedCallResult result(this);
- GPRReg resultGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- flushRegisters();
- callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
- m_jit.exceptionCheck();
-
- m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
- jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
- } else {
- GPRTemporary result(this, Reuse, arg2);
- GPRReg resultGPR = result.gpr();
-
- arg1.use();
- arg2.use();
-
- if (!isKnownInteger(node->child1().node()))
- slowPath.append(m_jit.branch64(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister));
- if (!isKnownInteger(node->child2().node()))
- slowPath.append(m_jit.branch64(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
-
- m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR);
- m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
-
- if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
- addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
- slowPath, this, helperFunction, resultGPR, arg1GPR, arg2GPR));
- }
-
- jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
- }
-}
-
void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
{
BasicBlock* taken = branchNode->branchData()->taken.block;
@@ -575,8 +400,8 @@
{
JSValueOperand arg1(this, node->child1());
JSValueOperand arg2(this, node->child2());
- GPRReg arg1GPR = arg1.gpr();
- GPRReg arg2GPR = arg2.gpr();
+ JSValueRegs arg1Regs = arg1.jsValueRegs();
+ JSValueRegs arg2Regs = arg2.jsValueRegs();
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
@@ -588,9 +413,9 @@
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
// FIXME: this should flush registers instead of silent spill/fill.
- JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR);
+ JITCompiler::Jump notEqualCase = m_jit.branch64(JITCompiler::NotEqual, arg1Regs.gpr(), arg2Regs.gpr());
- m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
+ m_jit.move(JITCompiler::TrustedImm64(!invert), resultGPR);
JITCompiler::Jump done = m_jit.jump();
@@ -597,62 +422,43 @@
notEqualCase.link(&m_jit);
silentSpillAllRegisters(resultGPR);
- callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
+ callOperation(operationCompareStrictEqCell, resultGPR, arg1Regs, arg2Regs);
silentFillAllRegisters();
m_jit.exceptionCheck();
- m_jit.and64(JITCompiler::TrustedImm32(1), resultGPR);
- m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
-
done.link(&m_jit);
- } else {
- m_jit.or64(arg1GPR, arg2GPR, resultGPR);
-
- JITCompiler::JumpList slowPathCases;
-
- JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
-
- JITCompiler::Jump leftOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg1GPR, GPRInfo::tagTypeNumberRegister);
- slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg1GPR, GPRInfo::tagTypeNumberRegister));
- leftOK.link(&m_jit);
- JITCompiler::Jump rightOK = m_jit.branch64(JITCompiler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister);
- slowPathCases.append(m_jit.branchTest64(JITCompiler::NonZero, arg2GPR, GPRInfo::tagTypeNumberRegister));
- rightOK.link(&m_jit);
-
- m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR);
- m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR);
-
- JITCompiler::Jump done = m_jit.jump();
-
- twoCellsCase.link(&m_jit);
- slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1GPR, arg2GPR));
-
- m_jit.move(JITCompiler::TrustedImm64(JSValue::encode(jsBoolean(!invert))), resultGPR);
-
- addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<MacroAssembler::JumpList>>(
- slowPathCases, this, operationCompareStrictEq, resultGPR, arg1GPR,
- arg2GPR));
-
- done.link(&m_jit);
+ unblessedBooleanResult(resultGPR, m_currentNode, UseChildrenCalledExplicitly);
+ return;
}
-
- jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
-}
-void SpeculativeJIT::compileMiscStrictEq(Node* node)
-{
- JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
- JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
- GPRTemporary result(this);
-
- if (node->child1().useKind() == MiscUse)
- speculateMisc(node->child1(), op1.jsValueRegs());
- if (node->child2().useKind() == MiscUse)
- speculateMisc(node->child2(), op2.jsValueRegs());
-
- m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
- m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
- jsValueResult(result.gpr(), node, DataFormatJSBoolean);
+ m_jit.or64(arg1Regs.gpr(), arg2Regs.gpr(), resultGPR);
+
+ JITCompiler::JumpList slowPathCases;
+
+ JITCompiler::Jump twoCellsCase = m_jit.branchTest64(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister);
+
+ JITCompiler::Jump leftOK = m_jit.branchIfInt32(arg1Regs);
+ slowPathCases.append(m_jit.branchIfNumber(arg1Regs, InvalidGPRReg));
+ leftOK.link(&m_jit);
+ JITCompiler::Jump rightOK = m_jit.branchIfInt32(arg2Regs);
+ slowPathCases.append(m_jit.branchIfNumber(arg2Regs, InvalidGPRReg));
+ rightOK.link(&m_jit);
+
+ m_jit.compare64(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1Regs.gpr(), arg2Regs.gpr(), resultGPR);
+
+ JITCompiler::Jump done = m_jit.jump();
+
+ twoCellsCase.link(&m_jit);
+ slowPathCases.append(m_jit.branch64(JITCompiler::NotEqual, arg1Regs.gpr(), arg2Regs.gpr()));
+
+ m_jit.move(JITCompiler::TrustedImm64(!invert), resultGPR);
+
+ addSlowPathGenerator(std::make_unique<CompareSlowPathGenerator<MacroAssembler::JumpList>>(
+ slowPathCases, this, operationCompareStrictEq, resultGPR, arg1Regs, arg2Regs));
+
+ done.link(&m_jit);
+
+ unblessedBooleanResult(resultGPR, m_currentNode, UseChildrenCalledExplicitly);
}
void SpeculativeJIT::emitCall(Node* node)
@@ -3796,10 +3602,7 @@
}
case CheckNotEmpty: {
- JSValueOperand operand(this, node->child1());
- GPRReg gpr = operand.gpr();
- speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branchTest64(JITCompiler::Zero, gpr));
- noResult(node);
+ compileCheckNotEmpty(node);
break;
}
@@ -3921,33 +3724,12 @@
}
case PutByIdFlush: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueGPR = value.gpr();
- GPRReg scratchGPR = scratch.gpr();
- flushRegisters();
-
- cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
-
- noResult(node);
+ compilePutByIdFlush(node);
break;
}
case PutById: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueGPR = value.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), NotDirect);
-
- noResult(node);
+ compilePutById(node);
break;
}
@@ -3975,17 +3757,7 @@
}
case PutByIdDirect: {
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand value(this, node->child2());
- GPRTemporary scratch(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg valueGPR = value.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- cachedPutById(node->origin.semantic, baseGPR, valueGPR, scratchGPR, node->identifierNumber(), Direct);
-
- noResult(node);
+ compilePutByIdDirect(node);
break;
}
@@ -4063,38 +3835,7 @@
}
case OverridesHasInstance: {
-
- Node* hasInstanceValueNode = node->child2().node();
- JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(node->cellOperand()->value());
-
- MacroAssembler::Jump notDefault;
- SpeculateCellOperand base(this, node->child1());
- JSValueOperand hasInstanceValue(this, node->child2());
- GPRTemporary result(this);
-
- GPRReg resultGPR = result.gpr();
- GPRReg baseGPR = base.gpr();
-
- // It would be great if constant folding handled automatically the case where we knew the hasInstance function
- // was a constant. Unfortunately, the folding rule for OverridesHasInstance is in the strength reduction phase
- // since it relies on OSR information. https://bugs.webkit.org/show_bug.cgi?id=154832
- if (!hasInstanceValueNode->isCellConstant() || defaultHasInstanceFunction != hasInstanceValueNode->asCell()) {
- GPRReg hasInstanceValueGPR = hasInstanceValue.gpr();
- notDefault = m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueGPR, TrustedImmPtr(node->cellOperand()));
- }
-
- // Check that base 'ImplementsDefaultHasInstance'.
- m_jit.test8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance), resultGPR);
- m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
- MacroAssembler::Jump done = m_jit.jump();
-
- if (notDefault.isSet()) {
- notDefault.link(&m_jit);
- moveTrueTo(resultGPR);
- }
-
- done.link(&m_jit);
- jsValueResult(resultGPR, node, DataFormatJSBoolean);
+ compileOverridesHasInstance(node);
break;
}
@@ -4108,7 +3849,7 @@
break;
}
- case IsEmpty: {
+ case IsEmpty: {
JSValueOperand value(this, node->child1());
GPRTemporary result(this, Reuse, value);
@@ -4529,23 +4270,7 @@
}
case IsObject: {
- JSValueOperand value(this, node->child1());
- GPRTemporary result(this, Reuse, value);
-
- JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
-
- m_jit.compare8(JITCompiler::AboveOrEqual,
- JITCompiler::Address(value.gpr(), JSCell::typeInfoTypeOffset()),
- TrustedImm32(ObjectType),
- result.gpr());
- m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
- JITCompiler::Jump done = m_jit.jump();
-
- isNotCell.link(&m_jit);
- m_jit.move(TrustedImm32(ValueFalse), result.gpr());
-
- done.link(&m_jit);
- jsValueResult(result.gpr(), node, DataFormatJSBoolean);
+ compileIsObject(node);
break;
}
@@ -4598,55 +4323,7 @@
break;
case LoadVarargs: {
- LoadVarargsData* data = ""
-
- GPRReg argumentsGPR;
- {
- JSValueOperand arguments(this, node->child1());
- argumentsGPR = arguments.gpr();
- flushRegisters();
- }
-
- callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsGPR, data->offset);
- m_jit.exceptionCheck();
-
- lock(GPRInfo::returnValueGPR);
- {
- JSValueOperand arguments(this, node->child1());
- argumentsGPR = arguments.gpr();
- flushRegisters();
- }
- unlock(GPRInfo::returnValueGPR);
-
- // FIXME: There is a chance that we will call an effectful length property twice. This is safe
- // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
- // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
- // past the sizing.
- // https://bugs.webkit.org/show_bug.cgi?id=141448
-
- GPRReg argCountIncludingThisGPR =
- JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsGPR);
-
- m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
-
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- GPRInfo::returnValueGPR,
- argCountIncludingThisGPR));
-
- speculationCheck(
- VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
- MacroAssembler::Above,
- argCountIncludingThisGPR,
- TrustedImm32(data->limit)));
-
- m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
-
- callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
- m_jit.exceptionCheck();
-
- noResult(node);
+ compileLoadVarargs(node);
break;
}
@@ -4878,142 +4555,11 @@
break;
}
case HasIndexedProperty: {
- SpeculateCellOperand base(this, node->child1());
- SpeculateStrictInt32Operand index(this, node->child2());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg resultGPR = result.gpr();
-
- MacroAssembler::JumpList slowCases;
- ArrayMode mode = node->arrayMode();
- switch (mode.type()) {
- case Array::Int32:
- case Array::Contiguous: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- if (mode.isInBounds())
- speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
- else
- slowCases.append(outOfBounds);
-
- m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchGPR);
- slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
- moveTrueTo(resultGPR);
- break;
- }
- case Array::Double: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- FPRTemporary scratch(this);
- FPRReg scratchFPR = scratch.fpr();
- GPRReg storageGPR = storage.gpr();
-
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
- if (mode.isInBounds())
- speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
- else
- slowCases.append(outOfBounds);
-
- m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
- slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
- moveTrueTo(resultGPR);
- break;
- }
- case Array::ArrayStorage: {
- ASSERT(!!node->child3());
- StorageOperand storage(this, node->child3());
- GPRTemporary scratch(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
-
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
- if (mode.isInBounds())
- speculationCheck(OutOfBounds, JSValueRegs(), 0, outOfBounds);
- else
- slowCases.append(outOfBounds);
-
- m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), scratchGPR);
- slowCases.append(m_jit.branchTest64(MacroAssembler::Zero, scratchGPR));
- moveTrueTo(resultGPR);
- break;
- }
- default: {
- slowCases.append(m_jit.jump());
- break;
- }
- }
-
- addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedPropertyByInt, HasPropertyPtrTag, resultGPR, baseGPR, indexGPR, static_cast<int32_t>(node->internalMethodType())));
-
- jsValueResult(resultGPR, node, DataFormatJSBoolean);
+ compileHasIndexedProperty(node);
break;
}
case GetDirectPname: {
- Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
- Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
- Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
- Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
-
- SpeculateCellOperand base(this, baseEdge);
- SpeculateCellOperand property(this, propertyEdge);
- SpeculateStrictInt32Operand index(this, indexEdge);
- SpeculateCellOperand enumerator(this, enumeratorEdge);
- GPRTemporary result(this);
- GPRTemporary scratch1(this);
- GPRTemporary scratch2(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg propertyGPR = property.gpr();
- GPRReg indexGPR = index.gpr();
- GPRReg enumeratorGPR = enumerator.gpr();
- GPRReg resultGPR = result.gpr();
- GPRReg scratch1GPR = scratch1.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
-
- MacroAssembler::JumpList slowPath;
-
- // Check the structure
- m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratch1GPR);
- slowPath.append(
- m_jit.branch32(
- MacroAssembler::NotEqual,
- scratch1GPR,
- MacroAssembler::Address(
- enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset())));
-
- // Compute the offset
- // If index is less than the enumerator's cached inline storage, then it's an inline access
- MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
- indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
-
- m_jit.load64(MacroAssembler::BaseIndex(baseGPR, indexGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage()), resultGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
-
- // Otherwise it's out of line
- outOfLineAccess.link(&m_jit);
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratch2GPR);
- m_jit.move(indexGPR, scratch1GPR);
- m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratch1GPR);
- m_jit.neg32(scratch1GPR);
- m_jit.signExtend32ToPtr(scratch1GPR, scratch1GPR);
- int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
- m_jit.load64(MacroAssembler::BaseIndex(scratch2GPR, scratch1GPR, MacroAssembler::TimesEight, offsetOfFirstProperty), resultGPR);
-
- done.link(&m_jit);
-
- addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByVal, GetPropertyPtrTag, resultGPR, baseGPR, propertyGPR));
-
- jsValueResult(resultGPR, node);
+ compileGetDirectPname(node);
break;
}
case GetPropertyEnumerator: {
@@ -5134,12 +4680,7 @@
break;
case ExtractCatchLocal: {
- JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->common.catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
- GPRTemporary temp(this);
- GPRReg tempGPR = temp.gpr();
- m_jit.move(CCallHelpers::TrustedImmPtr(ptr), tempGPR);
- m_jit.load64(CCallHelpers::Address(tempGPR), tempGPR);
- jsValueResult(tempGPR, node);
+ compileExtractCatchLocal(node);
break;
}
@@ -5367,17 +4908,6 @@
doubleResult(result.fpr(), node);
}
-void SpeculativeJIT::emitInitializeButterfly(GPRReg storageGPR, GPRReg sizeGPR, JSValueRegs emptyValueRegs, GPRReg scratchGPR)
-{
- m_jit.zeroExtend32ToPtr(sizeGPR, scratchGPR);
- MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
- MacroAssembler::Label loop = m_jit.label();
- m_jit.sub32(TrustedImm32(1), scratchGPR);
- m_jit.store64(emptyValueRegs.gpr(), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight));
- m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
- done.link(&m_jit);
-}
-
void SpeculativeJIT::compileBigIntEquality(Node* node)
{
// FIXME: [ESNext][BigInt] Create specialized version of strict equals for BigIntUse
@@ -5415,44 +4945,6 @@
jsValueResult(resultGPR, m_currentNode, DataFormatJSBoolean, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::compileAllocateNewArrayWithSize(JSGlobalObject* globalObject, GPRReg resultGPR, GPRReg sizeGPR, IndexingType indexingType, bool shouldConvertLargeSizeToArrayStorage)
-{
- GPRTemporary storage(this);
- GPRTemporary scratch(this);
- GPRTemporary scratch2(this);
-
- GPRReg storageGPR = storage.gpr();
- GPRReg scratchGPR = scratch.gpr();
- GPRReg scratch2GPR = scratch2.gpr();
-
- m_jit.move(TrustedImmPtr(nullptr), storageGPR);
-
- MacroAssembler::JumpList slowCases;
- if (shouldConvertLargeSizeToArrayStorage)
- slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
-
- // We can use resultGPR as a scratch right now.
- emitAllocateButterfly(storageGPR, sizeGPR, resultGPR, scratchGPR, scratch2GPR, slowCases);
-
- if (hasDouble(indexingType))
- m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
- else
- m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
- emitInitializeButterfly(storageGPR, sizeGPR, JSValueRegs(scratchGPR), scratch2GPR);
-
- RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType));
-
- emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
-
- m_jit.mutatorFence(*m_jit.vm());
-
- addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
- slowCases, this, operationNewArrayWithSize, resultGPR,
- structure,
- shouldConvertLargeSizeToArrayStorage ? m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)) : structure,
- sizeGPR, storageGPR));
-}
-
#endif
} } // namespace JSC::DFG
Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (230516 => 230517)
--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp 2018-04-11 07:44:58 UTC (rev 230517)
@@ -10023,9 +10023,8 @@
m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(m_out.equal(
- m_out.constInt64(JSValue::encode(jsBoolean(true))),
- vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
+ ValueFromBlock slowResult = m_out.anchor(
+ m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
@@ -10060,9 +10059,8 @@
m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(m_out.equal(
- m_out.constInt64(JSValue::encode(jsBoolean(true))),
- vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
+ ValueFromBlock slowResult = m_out.anchor(
+ m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
@@ -10096,9 +10094,8 @@
m_out.branch(checkHoleResultValue, usually(continuation), rarely(slowCase));
m_out.appendTo(slowCase, continuation);
- ValueFromBlock slowResult = m_out.anchor(m_out.equal(
- m_out.constInt64(JSValue::encode(jsBoolean(true))),
- vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
+ ValueFromBlock slowResult = m_out.anchor(
+ m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
m_out.jump(continuation);
m_out.appendTo(continuation, lastNext);
@@ -10110,9 +10107,7 @@
LValue base = lowCell(m_node->child1());
LValue index = lowInt32(m_node->child2());
LValue internalMethodType = m_out.constInt32(static_cast<int32_t>(m_node->internalMethodType()));
- setBoolean(m_out.equal(
- m_out.constInt64(JSValue::encode(jsBoolean(true))),
- vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
+ setBoolean(m_out.notZero64(vmCall(Int64, m_out.operation(operationHasIndexedPropertyByInt), m_callFrame, base, index, internalMethodType)));
break;
}
}
Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.h (230516 => 230517)
--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.h 2018-04-11 07:44:58 UTC (rev 230517)
@@ -164,8 +164,8 @@
#if USE(JSVALUE64)
load64(address, regs.gpr());
#else
- load32(bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset), regs.payloadGPR());
- load32(bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset), regs.tagGPR());
+ move(TrustedImmPtr(address), regs.payloadGPR());
+ loadValue(Address(regs.payloadGPR()), regs);
#endif
}
@@ -1011,24 +1011,53 @@
return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
}
- static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg)
+ static GPRReg selectScratchGPR(RegisterSet preserved)
{
- if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0)
- return GPRInfo::regT0;
+ GPRReg registers[] = {
+ GPRInfo::regT0,
+ GPRInfo::regT1,
+ GPRInfo::regT2,
+ GPRInfo::regT3,
+ GPRInfo::regT4,
+ GPRInfo::regT5,
+ };
- if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1)
- return GPRInfo::regT1;
+ for (GPRReg reg : registers) {
+ if (!preserved.contains(reg))
+ return reg;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+ }
- if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2)
- return GPRInfo::regT2;
+ template<typename... Regs>
+ static GPRReg selectScratchGPR(Regs... args)
+ {
+ RegisterSet set;
+ constructRegisterSet(set, args...);
+ return selectScratchGPR(set);
+ }
- if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3)
- return GPRInfo::regT3;
+ static void constructRegisterSet(RegisterSet&)
+ {
+ }
- if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4)
- return GPRInfo::regT4;
+ template<typename... Regs>
+ static void constructRegisterSet(RegisterSet& set, JSValueRegs regs, Regs... args)
+ {
+ if (regs.tagGPR() != InvalidGPRReg)
+ set.set(regs.tagGPR());
+ if (regs.payloadGPR() != InvalidGPRReg)
+ set.set(regs.payloadGPR());
+ constructRegisterSet(set, args...);
+ }
- return GPRInfo::regT5;
+ template<typename... Regs>
+ static void constructRegisterSet(RegisterSet& set, GPRReg reg, Regs... args)
+ {
+ if (reg != InvalidGPRReg)
+ set.set(reg);
+ constructRegisterSet(set, args...);
}
// Add a debug call. This call has no effect on JIT code execution state.
Modified: trunk/Source/_javascript_Core/jit/RegisterSet.h (230516 => 230517)
--- trunk/Source/_javascript_Core/jit/RegisterSet.h 2018-04-11 05:49:31 UTC (rev 230516)
+++ trunk/Source/_javascript_Core/jit/RegisterSet.h 2018-04-11 07:44:58 UTC (rev 230517)
@@ -207,6 +207,7 @@
private:
void setAny(Reg reg) { set(reg); }
+ void setAny(JSValueRegs regs) { set(regs); }
void setAny(const RegisterSet& set) { merge(set); }
void setMany() { }
template<typename RegType, typename... Regs>