Title: [232271] trunk/Source/_javascript_Core
Revision
232271
Author
utatane....@gmail.com
Date
2018-05-29 13:04:14 -0700 (Tue, 29 May 2018)

Log Message

[Baseline] Merge loading functionalities
https://bugs.webkit.org/show_bug.cgi?id=185907

Reviewed by Saam Barati.

This patch unifies emitXXXLoad functions in 32bit and 64bit.

* jit/JITInlines.h:
(JSC::JIT::emitDoubleGetByVal):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emitDoubleLoad):
(JSC::JIT::emitContiguousLoad):
(JSC::JIT::emitArrayStorageLoad):
(JSC::JIT::emitIntTypedArrayGetByVal):
(JSC::JIT::emitFloatTypedArrayGetByVal):
Define register usage first, and share the same code in 32bit and 64bit.

* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emitSlow_op_put_by_val):
Now C-stack is always enabled in JIT platform and temporary registers increases from 5 to 6 in x86.
We can remove this special handling.

(JSC::JIT::emitContiguousLoad): Deleted.
(JSC::JIT::emitDoubleLoad): Deleted.
(JSC::JIT::emitArrayStorageLoad): Deleted.

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (232270 => 232271)


--- trunk/Source/_javascript_Core/ChangeLog	2018-05-29 19:27:13 UTC (rev 232270)
+++ trunk/Source/_javascript_Core/ChangeLog	2018-05-29 20:04:14 UTC (rev 232271)
@@ -1,3 +1,31 @@
+2018-05-29  Yusuke Suzuki  <utatane....@gmail.com>
+
+        [Baseline] Merge loading functionalities
+        https://bugs.webkit.org/show_bug.cgi?id=185907
+
+        Reviewed by Saam Barati.
+
+        This patch unifies emitXXXLoad functions in 32bit and 64bit.
+
+        * jit/JITInlines.h:
+        (JSC::JIT::emitDoubleGetByVal):
+        * jit/JITPropertyAccess.cpp:
+        (JSC::JIT::emitDoubleLoad):
+        (JSC::JIT::emitContiguousLoad):
+        (JSC::JIT::emitArrayStorageLoad):
+        (JSC::JIT::emitIntTypedArrayGetByVal):
+        (JSC::JIT::emitFloatTypedArrayGetByVal):
+        Define register usage first, and share the same code in 32bit and 64bit.
+
+        * jit/JITPropertyAccess32_64.cpp:
+        (JSC::JIT::emitSlow_op_put_by_val):
+        Now C-stack is always enabled in JIT platform and temporary registers increases from 5 to 6 in x86.
+        We can remove this special handling.
+
+        (JSC::JIT::emitContiguousLoad): Deleted.
+        (JSC::JIT::emitDoubleLoad): Deleted.
+        (JSC::JIT::emitArrayStorageLoad): Deleted.
+
 2018-05-29  Saam Barati  <sbar...@apple.com>
 
         JSC should put bmalloc's scavenger into mini mode

Modified: trunk/Source/_javascript_Core/jit/JITInlines.h (232270 => 232271)


--- trunk/Source/_javascript_Core/jit/JITInlines.h	2018-05-29 19:27:13 UTC (rev 232270)
+++ trunk/Source/_javascript_Core/jit/JITInlines.h	2018-05-29 20:04:14 UTC (rev 232271)
@@ -31,22 +31,17 @@
 
 namespace JSC {
 
-#if USE(JSVALUE64)
 inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
 {
-    JumpList slowCases = emitDoubleLoad(instruction, badType);
-    moveDoubleTo64(fpRegT0, regT0);
-    sub64(tagTypeNumberRegister, regT0);
-    return slowCases;
-}
+#if USE(JSVALUE64)
+    JSValueRegs result = JSValueRegs(regT0);
 #else
-inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
-{
+    JSValueRegs result = JSValueRegs(regT1, regT0);
+#endif
     JumpList slowCases = emitDoubleLoad(instruction, badType);
-    moveDoubleToInts(fpRegT0, regT0, regT1);
+    boxDouble(fpRegT0, result);
     return slowCases;
 }
-#endif // USE(JSVALUE64)
 
 ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
 {

Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp (232270 => 232271)


--- trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp	2018-05-29 19:27:13 UTC (rev 232270)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess.cpp	2018-05-29 20:04:14 UTC (rev 232271)
@@ -125,48 +125,6 @@
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
 }
 
-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
-    loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
-    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
-    load64(BaseIndex(regT2, regT1, TimesEight), regT0);
-    slowCases.append(branchTest64(Zero, regT0));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-
-    add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
-    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
-
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
-    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
-
-    load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
-    slowCases.append(branchTest64(Zero, regT0));
-    
-    return slowCases;
-}
-
 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
 {
     // base: regT0
@@ -1119,9 +1077,6 @@
     emitWriteBarrier(arguments, value, ShouldFilterValue);
 }
 
-#endif // USE(JSVALUE64)
-
-#if USE(JSVALUE64)
 void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
 {
     Jump valueNotCell;
@@ -1392,7 +1347,88 @@
     MacroAssembler::repatchCall(CodeLocationCall<NoPtrTag>(MacroAssemblerCodePtr<NoPtrTag>(returnAddress)), FunctionPtr<OperationPtrTag>(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
 }
 
+JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID indexing = regT2;
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID indexing = regT1;
+    RegisterID scratch = regT3;
+#endif
 
+    JumpList slowCases;
+
+    badType = patchableBranch32(NotEqual, indexing, TrustedImm32(DoubleShape));
+    loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+    slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength())));
+    loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
+    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID indexing = regT2;
+    JSValueRegs result = JSValueRegs(regT0);
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID indexing = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
+    RegisterID scratch = regT3;
+#endif
+
+    JumpList slowCases;
+
+    badType = patchableBranch32(NotEqual, indexing, TrustedImm32(expectedShape));
+    loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+    slowCases.append(branch32(AboveOrEqual, property, Address(scratch, Butterfly::offsetOfPublicLength())));
+    loadValue(BaseIndex(scratch, property, TimesEight), result);
+    slowCases.append(branchIfEmpty(result));
+
+    return slowCases;
+}
+
+JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
+{
+#if USE(JSVALUE64)
+    RegisterID base = regT0;
+    RegisterID property = regT1;
+    RegisterID indexing = regT2;
+    JSValueRegs result = JSValueRegs(regT0);
+    RegisterID scratch = regT3;
+#else
+    RegisterID base = regT0;
+    RegisterID property = regT2;
+    RegisterID indexing = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
+    RegisterID scratch = regT3;
+#endif
+
+    JumpList slowCases;
+
+    add32(TrustedImm32(-ArrayStorageShape), indexing, scratch);
+    badType = patchableBranch32(Above, scratch, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
+
+    loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+    slowCases.append(branch32(AboveOrEqual, property, Address(scratch, ArrayStorage::vectorLengthOffset())));
+
+    loadValue(BaseIndex(scratch, property, TimesEight, ArrayStorage::vectorOffset()), result);
+    slowCases.append(branchIfEmpty(result));
+
+    return slowCases;
+}
+
 JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType)
 {
     JumpList slowCases;
@@ -1484,17 +1520,17 @@
 #if USE(JSVALUE64)
     RegisterID base = regT0;
     RegisterID property = regT1;
-    RegisterID resultPayload = regT0;
+    JSValueRegs result = JSValueRegs(regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #else
     RegisterID base = regT0;
     RegisterID property = regT2;
-    RegisterID resultPayload = regT0;
-    RegisterID resultTag = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #endif
+    RegisterID resultPayload = result.payloadGPR();
     
     JumpList slowCases;
     
@@ -1530,22 +1566,12 @@
         
         convertInt32ToDouble(resultPayload, fpRegT0);
         addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
-#if USE(JSVALUE64)
-        moveDoubleTo64(fpRegT0, resultPayload);
-        sub64(tagTypeNumberRegister, resultPayload);
-#else
-        moveDoubleToInts(fpRegT0, resultPayload, resultTag);
-#endif
-        
+        boxDouble(fpRegT0, result);
         done = jump();
         canBeInt.link(this);
     }
 
-#if USE(JSVALUE64)
-    or64(tagTypeNumberRegister, resultPayload);
-#else
-    move(TrustedImm32(JSValue::Int32Tag), resultTag);
-#endif
+    boxInt32(resultPayload, result);
     if (done.isSet())
         done.link(this);
     return slowCases;
@@ -1558,14 +1584,13 @@
 #if USE(JSVALUE64)
     RegisterID base = regT0;
     RegisterID property = regT1;
-    RegisterID resultPayload = regT0;
+    JSValueRegs result = JSValueRegs(regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #else
     RegisterID base = regT0;
     RegisterID property = regT2;
-    RegisterID resultPayload = regT0;
-    RegisterID resultTag = regT1;
+    JSValueRegs result = JSValueRegs(regT1, regT0);
     RegisterID scratch = regT3;
     RegisterID scratch2 = regT4;
 #endif
@@ -1596,12 +1621,7 @@
     loadDouble(TrustedImmPtr(&NaN), fpRegT0);
     notNaN.link(this);
     
-#if USE(JSVALUE64)
-    moveDoubleTo64(fpRegT0, resultPayload);
-    sub64(tagTypeNumberRegister, resultPayload);
-#else
-    moveDoubleToInts(fpRegT0, resultPayload, resultTag);
-#endif
+    boxDouble(fpRegT0, result);
     return slowCases;    
 }
 

Modified: trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp (232270 => 232271)


--- trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp	2018-05-29 19:27:13 UTC (rev 232270)
+++ trunk/Source/_javascript_Core/jit/JITPropertyAccess32_64.cpp	2018-05-29 20:04:14 UTC (rev 232271)
@@ -184,48 +184,6 @@
     m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
 }
 
-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
-    slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-    load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
-    load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
-    slowCases.append(branchIfEmpty(regT1));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-    
-    badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
-    slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-    loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
-    slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
-    
-    return slowCases;
-}
-
-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
-{
-    JumpList slowCases;
-    
-    add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
-    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
-    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
-    slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
-    load32(BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
-    load32(BaseIndex(regT3, regT2, TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
-    slowCases.append(branchIfEmpty(regT1));
-    
-    return slowCases;
-}
-
 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
 {
     int dst = currentInstruction[1].u.operand;
@@ -499,24 +457,6 @@
     
     bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
 
-#if CPU(X86)
-    // FIXME: We only have 5 temp registers, but need 6 to make this call, therefore we materialize
-    // our own call. When we finish moving JSC to the C call stack, we'll get another register so
-    // we can use the normal case.
-    unsigned pokeOffset = 0;
-    poke(GPRInfo::callFrameRegister, pokeOffset++);
-    emitLoad(base, regT0, regT1);
-    poke(regT1, pokeOffset++);
-    poke(regT0, pokeOffset++);
-    emitLoad(property, regT0, regT1);
-    poke(regT1, pokeOffset++);
-    poke(regT0, pokeOffset++);
-    emitLoad(value, regT0, regT1);
-    poke(regT1, pokeOffset++);
-    poke(regT0, pokeOffset++);
-    poke(TrustedImmPtr(byValInfo), pokeOffset++);
-    Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize);
-#else
     // The register selection below is chosen to reduce register swapping on ARM.
     // Swapping shouldn't happen on other platforms.
     emitLoad(base, regT2, regT1);
@@ -523,7 +463,6 @@
     emitLoad(property, regT3, regT0);
     emitLoad(value, regT5, regT4);
     Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, JSValueRegs(regT2, regT1), JSValueRegs(regT3, regT0), JSValueRegs(regT5, regT4), byValInfo);
-#endif
 
     m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
     m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to