Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (277499 => 277500)
--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2021-05-14 18:26:20 UTC (rev 277499)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp 2021-05-14 18:34:33 UTC (rev 277500)
@@ -32,6 +32,7 @@
#include "CodeBlock.h"
#include "DirectArguments.h"
#include "JITInlines.h"
+#include "JITThunks.h"
#include "JSLexicalEnvironment.h"
#include "LinkBuffer.h"
#include "PrivateFieldPutKind.h"
@@ -94,11 +95,76 @@
JITGetByValGenerator& gen = m_getByVals[m_getByValIndex];
++m_getByValIndex;
Label coldPathBegin = label();
- Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByValOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), profile, regT0, regT1);
+
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ Call call = callOperationWithProfile(metadata, operationGetByValOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), profile, regT0, regT1);
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR4;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR3; // arg1 arg1 already used.
+ constexpr GPRReg profileGPR = argumentGPR2;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(propertyGPR == argumentGPR1);
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(profile), profileGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_by_val_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationGetByValOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitValueProfilingSite(metadata, returnValueGPR);
+ emitPutVirtualRegister(dst, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_get_by_val_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR4;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR5;
+ constexpr GPRReg stubInfoGPR = argumentGPR3;
+ constexpr GPRReg profileGPR = argumentGPR2;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(propertyGPR == argumentGPR1);
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationGetByValOptimize)>(globalObjectGPR, stubInfoGPR, profileGPR, baseGPR, propertyGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_get_by_val_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_get_private_name(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpGetPrivateName>();
@@ -129,9 +195,8 @@
{
ASSERT(hasAnySlowCases(iter));
auto bytecode = currentInstruction->as<OpGetPrivateName>();
+ auto& metadata = bytecode.metadata(m_codeBlock);
VirtualRegister dst = bytecode.m_dst;
- GPRReg baseGPR = regT0;
- GPRReg propertyGPR = regT1;
linkAllSlowCases(iter);
@@ -138,10 +203,74 @@
JITGetByValGenerator& gen = m_getByVals[m_getByValIndex];
++m_getByValIndex;
Label coldPathBegin = label();
- Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetPrivateNameOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, propertyGPR);
+
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ Call call = callOperationWithProfile(metadata, operationGetPrivateNameOptimize, dst, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, propertyGPR);
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already used.
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(propertyGPR == argumentGPR1);
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationGetPrivateNameOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitValueProfilingSite(metadata, returnValueGPR);
+ emitPutVirtualRegister(dst, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_get_private_name_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR5;
+ constexpr GPRReg stubInfoGPR = argumentGPR2;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(propertyGPR == argumentGPR1);
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationGetPrivateNameOptimize)>(globalObjectGPR, stubInfoGPR, baseGPR, propertyGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_xxx_private_name_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_set_private_brand(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpSetPrivateBrand>();
@@ -170,15 +299,37 @@
void JIT::emitSlow_op_set_private_brand(const Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
- GPRReg baseGPR = regT0;
- GPRReg brandGPR = regT1;
-
linkAllSlowCases(iter);
JITPrivateBrandAccessGenerator& gen = m_privateBrandAccesses[m_privateBrandAccessIndex];
++m_privateBrandAccessIndex;
Label coldPathBegin = label();
+
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg brandGPR = regT1;
Call call = callOperation(operationSetPrivateBrandOptimize, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, brandGPR);
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already used.
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(propertyGPR == argumentGPR1);
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ static_assert(std::is_same<FunctionTraits<decltype(operationSetPrivateBrandOptimize)>::ArgumentTypes, FunctionTraits<decltype(operationGetPrivateNameOptimize)>::ArgumentTypes>::value);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationSetPrivateBrandOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+#endif
gen.reportSlowPathCall(coldPathBegin, call);
}
@@ -203,15 +354,38 @@
void JIT::emitSlow_op_check_private_brand(const Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
- GPRReg baseGPR = regT0;
- GPRReg brandGPR = regT1;
-
linkAllSlowCases(iter);
JITPrivateBrandAccessGenerator& gen = m_privateBrandAccesses[m_privateBrandAccessIndex];
++m_privateBrandAccessIndex;
Label coldPathBegin = label();
+
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg brandGPR = regT1;
Call call = callOperation(operationCheckPrivateBrandOptimize, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), baseGPR, brandGPR);
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already used.
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(propertyGPR == argumentGPR1);
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ static_assert(std::is_same<FunctionTraits<decltype(operationCheckPrivateBrandOptimize)>::ArgumentTypes, FunctionTraits<decltype(operationGetPrivateNameOptimize)>::ArgumentTypes>::value);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationCheckPrivateBrandOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
@@ -437,16 +611,78 @@
linkAllSlowCases(iter);
Label slowPath = label();
+#if !ENABLE(EXTRA_CTI_THUNKS)
emitGetVirtualRegister(base, regT0);
emitGetVirtualRegister(property, regT1);
emitGetVirtualRegister(value, regT2);
Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, TrustedImmPtr(m_codeBlock->globalObject()), regT0, regT1, regT2, byValInfo, TrustedImm32(ecmaMode.value()));
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg baseGPR = argumentGPR1;
+ constexpr GPRReg propertyGPR = argumentGPR2;
+ constexpr GPRReg valuePR = argumentGPR3;
+ constexpr GPRReg byValInfoGPR = argumentGPR4;
+ constexpr GPRReg ecmaModeGPR = argumentGPR5;
+
+ emitGetVirtualRegister(base, baseGPR);
+ emitGetVirtualRegister(property, propertyGPR);
+ emitGetVirtualRegister(value, valuePR);
+ move(TrustedImmPtr(byValInfo), byValInfoGPR);
+ move(TrustedImm32(ecmaMode.value()), ecmaModeGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_put_by_val_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ auto* operation = isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize;
+ Call call = appendCall(operation);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
m_byValInstructionIndex++;
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_put_by_val_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR0;
+ constexpr GPRReg baseGPR = argumentGPR1;
+ constexpr GPRReg propertyGPR = argumentGPR2;
+ constexpr GPRReg valuePR = argumentGPR3;
+ constexpr GPRReg byValInfoGPR = argumentGPR4;
+ constexpr GPRReg ecmaModeGPR = argumentGPR5;
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationPutByValOptimize)>(globalObjectGPR, baseGPR, propertyGPR, valuePR, byValInfoGPR, ecmaModeGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_put_xxx_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_put_private_name(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpPutPrivateName>();
@@ -476,20 +712,81 @@
linkAllSlowCases(iter);
Label slowPath = label();
- GPRReg baseGPR = regT0;
- GPRReg propertyGPR = regT1;
- GPRReg valueGPR = regT2;
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = regT1;
+ constexpr GPRReg valueGPR = regT2;
emitGetVirtualRegister(bytecode.m_base, baseGPR);
emitGetVirtualRegister(bytecode.m_property, propertyGPR);
emitGetVirtualRegister(bytecode.m_value, valueGPR);
Call call = callOperation(operationPutPrivateNameOptimize, TrustedImmPtr(m_codeBlock->globalObject()), baseGPR, propertyGPR, valueGPR, byValInfo, TrustedImm32(putKind.value()));
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg baseGPR = argumentGPR1;
+ constexpr GPRReg propertyGPR = argumentGPR2;
+ constexpr GPRReg valuePR = argumentGPR3;
+ constexpr GPRReg byValInfoGPR = argumentGPR4;
+ constexpr GPRReg putKindGPR = argumentGPR5;
+
+ emitGetVirtualRegister(bytecode.m_base, baseGPR);
+ emitGetVirtualRegister(bytecode.m_property, propertyGPR);
+ emitGetVirtualRegister(bytecode.m_value, valuePR);
+ move(TrustedImmPtr(byValInfo), byValInfoGPR);
+ move(TrustedImm32(putKind.value()), putKindGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_put_private_name_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationPutPrivateNameOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
m_byValInstructionIndex++;
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_put_private_name_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR0;
+ constexpr GPRReg baseGPR = argumentGPR1;
+ constexpr GPRReg propertyGPR = argumentGPR2;
+ constexpr GPRReg valuePR = argumentGPR3;
+ constexpr GPRReg byValInfoGPR = argumentGPR4;
+ constexpr GPRReg putKindGPR = argumentGPR5;
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationPutPrivateNameOptimize)>(globalObjectGPR, baseGPR, propertyGPR, valuePR, byValInfoGPR, putKindGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_put_put_private_name_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpPutGetterById>();
@@ -578,8 +875,33 @@
Label coldPathBegin = label();
+#if !ENABLE(EXTRA_CTI_THUNKS)
emitGetVirtualRegister(base, regT0);
Call call = callOperation(operationDeleteByIdOptimize, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits(), TrustedImm32(bytecode.m_ecmaMode.value()));
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = argumentGPR2;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ constexpr GPRReg ecmaModeGPR = argumentGPR4;
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ emitGetVirtualRegister(base, baseGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ move(TrustedImm32(bytecode.m_ecmaMode.value()), ecmaModeGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_del_by_id_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationDeleteByIdOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+ static_assert(returnValueGPR == regT0);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
boxBoolean(regT0, JSValueRegs(regT0));
@@ -586,6 +908,41 @@
emitPutVirtualRegister(dst, JSValueRegs(regT0));
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_del_by_id_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR0;
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = argumentGPR2;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ constexpr GPRReg ecmaModeGPR = argumentGPR4;
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(argumentGPR0, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationDeleteByIdOptimize)>(globalObjectGPR, stubInfoGPR, baseGPR, propertyGPR, ecmaModeGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_del_by_id_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_del_by_val(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpDelByVal>();
@@ -627,9 +984,34 @@
Label coldPathBegin = label();
+#if !ENABLE(EXTRA_CTI_THUNKS)
emitGetVirtualRegister(base, regT0);
emitGetVirtualRegister(property, regT1);
Call call = callOperation(operationDeleteByValOptimize, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, regT1, TrustedImm32(bytecode.m_ecmaMode.value()));
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = argumentGPR2;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ constexpr GPRReg ecmaModeGPR = argumentGPR4;
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ emitGetVirtualRegister(base, baseGPR);
+ emitGetVirtualRegister(property, propertyGPR);
+ move(TrustedImm32(bytecode.m_ecmaMode.value()), ecmaModeGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_del_by_val_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationDeleteByValOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+ static_assert(returnValueGPR == regT0);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
boxBoolean(regT0, JSValueRegs(regT0));
@@ -636,6 +1018,41 @@
emitPutVirtualRegister(dst, JSValueRegs(regT0));
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_del_by_val_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR0;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR0;
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = argumentGPR2;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ constexpr GPRReg ecmaModeGPR = argumentGPR4;
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(argumentGPR0, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationDeleteByValOptimize)>(globalObjectGPR, stubInfoGPR, baseGPR, propertyGPR, ecmaModeGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_del_by_val_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpTryGetById>();
@@ -672,8 +1089,32 @@
Label coldPathBegin = label();
+#if !ENABLE(EXTRA_CTI_THUNKS)
Call call = callOperation(operationTryGetByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
-
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(JIT::TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ static_assert(std::is_same<decltype(operationTryGetByIdOptimize), decltype(operationGetByIdOptimize)>::value);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_by_id_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationTryGetByIdOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitPutVirtualRegister(resultVReg, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
@@ -706,6 +1147,7 @@
linkAllSlowCases(iter);
auto bytecode = currentInstruction->as<OpGetByIdDirect>();
+ auto& metadata = bytecode.metadata(m_codeBlock);
VirtualRegister resultVReg = bytecode.m_dst;
const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
@@ -713,8 +1155,33 @@
Label coldPathBegin = label();
- Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ Call call = callOperationWithProfile(metadata, operationGetByIdDirectOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ static_assert(std::is_same<decltype(operationGetByIdDirectOptimize), decltype(operationGetByIdOptimize)>::value);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_by_id_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationGetByIdDirectOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitValueProfilingSite(metadata, returnValueGPR);
+ emitPutVirtualRegister(resultVReg, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
@@ -780,6 +1247,7 @@
linkAllSlowCases(iter);
auto bytecode = currentInstruction->as<OpGetById>();
+ auto& metadata = bytecode.metadata(m_codeBlock);
VirtualRegister resultVReg = bytecode.m_dst;
const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
@@ -787,16 +1255,76 @@
Label coldPathBegin = label();
- Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ Call call = callOperationWithProfile(metadata, operationGetByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_by_id_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationGetByIdOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitValueProfilingSite(metadata, returnValueGPR);
+ emitPutVirtualRegister(resultVReg, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_get_by_id_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR5;
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationGetByIdOptimize)>(globalObjectGPR, stubInfoGPR, baseGPR, propertyGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_get_by_id_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkAllSlowCases(iter);
auto bytecode = currentInstruction->as<OpGetByIdWithThis>();
+ auto& metadata = bytecode.metadata(m_codeBlock);
VirtualRegister resultVReg = bytecode.m_dst;
const Identifier* ident = &(m_codeBlock->identifier(bytecode.m_property));
@@ -804,11 +1332,74 @@
Label coldPathBegin = label();
- Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, regT1, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ Call call = callOperationWithProfile(metadata, operationGetByIdWithThisOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, regT1, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR2; // arg1 already in use.
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg thisGPR = regT1;
+ constexpr GPRReg propertyGPR = argumentGPR4;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(thisGPR == argumentGPR1);
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_by_id_with_this_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationGetByIdWithThisOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitValueProfilingSite(metadata, returnValueGPR);
+ emitPutVirtualRegister(resultVReg, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_get_by_id_with_this_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR3;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR5;
+ constexpr GPRReg stubInfoGPR = argumentGPR2;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg thisGPR = regT1;
+ constexpr GPRReg propertyGPR = argumentGPR4;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(thisGPR == argumentGPR1);
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationGetByIdWithThisOptimize)>(globalObjectGPR, stubInfoGPR, baseGPR, thisGPR, propertyGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_get_by_id_with_this_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_put_by_id(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpPutById>();
@@ -852,11 +1443,71 @@
JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
+#if !ENABLE(EXTRA_CTI_THUNKS)
Call call = callOperation(gen.slowPathFunction(), TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT1, regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR3; // arg1 already in use.
+ constexpr GPRReg valueGPR = regT1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR4;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(valueGPR == argumentGPR1);
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ emitNakedNearCall(vm.getCTIStub(slow_op_put_by_id_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(gen.slowPathFunction());
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_put_by_id_prepareCallGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+ jit.tagReturnAddress();
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg globalObjectGPR = argumentGPR5;
+ constexpr GPRReg stubInfoGPR = argumentGPR3;
+ constexpr GPRReg valueGPR = regT1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR4;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+ static_assert(valueGPR == argumentGPR1);
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), globalObjectGPR);
+ jit.loadPtr(Address(globalObjectGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+
+ jit.setupArguments<decltype(operationPutByIdStrictOptimize)>(globalObjectGPR, stubInfoGPR, valueGPR, baseGPR, propertyGPR);
+ jit.prepareCallOperation(vm);
+
+ // FIXME: Consider making this a tail call directly to the target operation instead
+ // of returning to the client to call it.
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_put_by_id_prepareCall");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emit_op_in_by_id(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpInById>();
@@ -890,8 +1541,34 @@
Label coldPathBegin = label();
+#if !ENABLE(EXTRA_CTI_THUNKS)
Call call = callOperation(operationInByIdOptimize, resultVReg, TrustedImmPtr(m_codeBlock->globalObject()), gen.stubInfo(), regT0, CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits());
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ constexpr GPRReg stubInfoGPR = argumentGPR1;
+ constexpr GPRReg baseGPR = regT0;
+ constexpr GPRReg propertyGPR = argumentGPR3;
+ static_assert(baseGPR == argumentGPR0 || !isARM64());
+
+ move(TrustedImmPtr(gen.stubInfo()), stubInfoGPR);
+ move(TrustedImmPtr(CacheableIdentifier::createFromIdentifierOwnedByCodeBlock(m_codeBlock, *ident).rawBits()), propertyGPR);
+ // slow_op_get_by_id_prepareCallGenerator will do exactly what we need.
+ // So, there's no point in creating a duplicate thunk just to give it a different name.
+ static_assert(std::is_same<decltype(operationInByIdOptimize), decltype(operationGetByIdOptimize)>::value);
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_by_id_prepareCallGenerator).retaggedCode<NoPtrTag>());
+
+ Call call = appendCall(operationInByIdOptimize);
+ emitNakedNearCall(vm.getCTIStub(checkExceptionGenerator).retaggedCode<NoPtrTag>());
+
+ emitPutVirtualRegister(resultVReg, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
gen.reportSlowPathCall(coldPathBegin, call);
}
@@ -1164,10 +1841,75 @@
linkAllSlowCases(iter);
auto bytecode = currentInstruction->as<OpGetFromScope>();
+ auto& metadata = bytecode.metadata(m_codeBlock);
VirtualRegister dst = bytecode.m_dst;
- callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, TrustedImmPtr(m_codeBlock->globalObject()), currentInstruction);
+
+#if !ENABLE(EXTRA_CTI_THUNKS)
+ callOperationWithProfile(metadata, operationGetFromScope, dst, TrustedImmPtr(m_codeBlock->globalObject()), currentInstruction);
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ ASSERT(m_codeBlock->instructionAt(m_bytecodeIndex) == currentInstruction);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ emitNakedNearCall(vm.getCTIStub(slow_op_get_from_scopeGenerator).retaggedCode<NoPtrTag>());
+
+ emitValueProfilingSite(metadata, returnValueGPR);
+ emitPutVirtualRegister(dst, returnValueGPR);
+#endif // ENABLE(EXTRA_CTI_THUNKS)
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_get_from_scopeGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+#if CPU(X86_64)
+ jit.push(X86Registers::ebp);
+#elif CPU(ARM64)
+ jit.tagReturnAddress();
+ jit.pushPair(framePointerRegister, linkRegister);
+#endif
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg codeBlockGPR = argumentGPR3;
+ constexpr GPRReg globalObjectGPR = argumentGPR0;
+ constexpr GPRReg instructionGPR = argumentGPR1;
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
+ jit.loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+ jit.loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfInstructionsRawPointer()), instructionGPR);
+ jit.addPtr(bytecodeOffsetGPR, instructionGPR);
+
+ jit.setupArguments<decltype(operationGetFromScope)>(globalObjectGPR, instructionGPR);
+ jit.prepareCallOperation(vm);
+ CCallHelpers::Call operation = jit.call(OperationPtrTag);
+ CCallHelpers::Jump exceptionCheck = jit.emitExceptionCheck(vm);
+
+#if CPU(X86_64)
+ jit.pop(X86Registers::ebp);
+#elif CPU(ARM64)
+ jit.popPair(framePointerRegister, linkRegister);
+#endif
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ patchBuffer.link(operation, FunctionPtr<OperationPtrTag>(operationGetFromScope));
+ auto handler = vm.jitStubs->existingCTIStub(popThunkStackPreservesAndHandleExceptionGenerator, NoLockingNecessary);
+ patchBuffer.link(exceptionCheck, CodeLocationLabel(handler.retaggedCode<NoPtrTag>()));
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_get_from_scope");
+}
+#endif // ENABLE(EXTRA_CTI_THUNKS)
+
void JIT::emitPutGlobalVariable(JSValue* operand, VirtualRegister value, WatchpointSet* set)
{
emitGetVirtualRegister(value, regT0);
@@ -1325,10 +2067,70 @@
if (resolveType == ModuleVar) {
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
slowPathCall.call();
- } else
+ } else {
+#if !ENABLE(EXTRA_CTI_THUNKS)
callOperation(operationPutToScope, TrustedImmPtr(m_codeBlock->globalObject()), currentInstruction);
+#else
+ VM& vm = this->vm();
+ uint32_t bytecodeOffset = m_bytecodeIndex.offset();
+ ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
+ ASSERT(m_codeBlock->instructionAt(m_bytecodeIndex) == currentInstruction);
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
+
+ emitNakedNearCall(vm.getCTIStub(slow_op_put_to_scopeGenerator).retaggedCode<NoPtrTag>());
+#endif
+ }
}
+#if ENABLE(EXTRA_CTI_THUNKS)
+MacroAssemblerCodeRef<JITThunkPtrTag> JIT::slow_op_put_to_scopeGenerator(VM& vm)
+{
+ // The thunk generated by this function can only work with the LLInt / Baseline JIT because
+ // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
+ // DFG/FTL may inline functions belonging to other globalObjects, which may not match
+ // CallFrame::codeBlock().
+ JIT jit(vm);
+
+#if CPU(X86_64)
+ jit.push(X86Registers::ebp);
+#elif CPU(ARM64)
+ jit.tagReturnAddress();
+ jit.pushPair(framePointerRegister, linkRegister);
+#endif
+
+ constexpr GPRReg bytecodeOffsetGPR = argumentGPR2;
+ jit.store32(bytecodeOffsetGPR, tagFor(CallFrameSlot::argumentCountIncludingThis));
+
+ constexpr GPRReg codeBlockGPR = argumentGPR3;
+ constexpr GPRReg globalObjectGPR = argumentGPR0;
+ constexpr GPRReg instructionGPR = argumentGPR1;
+
+ jit.loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
+ jit.loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfGlobalObject()), globalObjectGPR);
+ jit.loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfInstructionsRawPointer()), instructionGPR);
+ jit.addPtr(bytecodeOffsetGPR, instructionGPR);
+
+ jit.prepareCallOperation(vm);
+ CCallHelpers::Call operation = jit.call(OperationPtrTag);
+ CCallHelpers::Jump exceptionCheck = jit.emitExceptionCheck(vm);
+
+#if CPU(X86_64)
+ jit.pop(X86Registers::ebp);
+#elif CPU(ARM64)
+ jit.popPair(framePointerRegister, linkRegister);
+#endif
+ jit.ret();
+
+ LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::Thunk);
+ patchBuffer.link(operation, FunctionPtr<OperationPtrTag>(operationPutToScope));
+ auto handler = vm.jitStubs->existingCTIStub(popThunkStackPreservesAndHandleExceptionGenerator, NoLockingNecessary);
+ patchBuffer.link(exceptionCheck, CodeLocationLabel(handler.retaggedCode<NoPtrTag>()));
+ return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: slow_op_put_to_scope");
+}
+#endif
+
void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction)
{
auto bytecode = currentInstruction->as<OpGetFromArguments>();