Title: [281062] trunk/Source/_javascript_Core
Revision
281062
Author
yijia_hu...@apple.com
Date
2021-08-15 10:41:03 -0700 (Sun, 15 Aug 2021)

Log Message

[ARM64] Add Pre/Post-Indexed Address Mode to Air for ARM64 (Store Instruction)
https://bugs.webkit.org/show_bug.cgi?id=228538

Reviewed by Filip Pizlo.

The previous patch has introduced the pre/post-index address mode for Load instruction,
which benefits loop program. Here, this patch adds the corresponding mode for Store
instruction. Store Register (immediate) stores a word or a doubleword from a register
to memory. The address that is used for the store is calculated from a base register
and an immediate offset.

######################################
## Pre-Index Address Mode For Store ##
######################################

STR Wt, [Xn, #imm]!

In B3 Reduction Strength, since we have this reduction rule:
    Turns this: Store(value, Add(address, offset1), offset = offset2)
    Into this: Store(value, address, offset = offset1 + offset2)

The equivalent pattern is:
    address = Add(base, offset)
    ...
    memory = Store(value, base, offset)

Here, we only consider the pattern:
    address = Add(base, offset)
    memory = Store(value, base, offset)

And, this patch directly treats it as the canonical form. Ideally, we should move
memory to just after the address like what we did for PreIndex Load in the previous
patch. But, we cannot provide a better way to do that since the value may be
used between address and memory. To move value upward, we must move all descendants
of the value along with it to prevent B3 IR index violations, which is risky and expensive.

Next, lower to Air:
    Move %base, %address
    Move %value, (%address, prefix(offset))

######################################
## Post-Index Address Mode For Load ##
######################################

STR Wt, [Xn], #imm

Then, the equivalent pattern is:
    memory = Store(value, base, 0)
    ...
    address = Add(base, offset)

First, we convert it to the canonical form:
    newOffset = Constant
    newAddress = Add(base, offset)
    memory = Store(value, base, 0) // move the offset and address to just before the memory
    ...
    offset = Identity(newOffset)
    address = Identity(newAddress)

Next, lower to Air:
    Move %base, %newAddress
    Move %value, (%newAddress, postfix(offset))

##############################################
## Test for Pre/Post-Increment Address Mode ##
##############################################

B3 IR:
------------------------------------------------------
Int64 b@0 = ArgumentReg(%x0)
Int64 b@1 = ArgumentReg(%x1)
Int64 b@2 = Const64(8)
Int64 b@3 = Add(b@0, $8(b@2))
Void  b@4 = Store(b@1, b@3, ControlDependent|Writes:Top)
Void  b@5 = Return(b@3, Terminal)
------------------------------------------------------

W/O Pre-Increment Address Mode:
------------------------------------------------------
Move  %x0, %x2,      b@0
Add64  $8, %x2, %x0, b@3
Move  %x1, 8(%x2),   b@4
Ret64 %x0,           b@5
------------------------------------------------------

W/ Pre-Increment Address Mode:
------------------------------------------------------
MoveWithIncrement64 %x1, (%x0,Pre($8)), b@4
Ret64               %x0,                b@5
------------------------------------------------------

B3 IR:
------------------------------------------------------
Int64 b@0 = ArgumentReg(%x0)
Int64 b@1 = ArgumentReg(%x1)
Void  b@2 = Store(b@1, b@0, ControlDependent|Writes:Top)
Int64 b@3 = Const64(8)
Int64 b@4 = Add(b@0, $8(b@3))
Void  b@5 = Return(b@4, Terminal)
------------------------------------------------------

W/O Post-Increment Address Mode:
------------------------------------------------------
Move  %x1, (%x0),      b@2
Add64  $8,   %x0, %x0, b@4
Ret64 %x0,             b@5
------------------------------------------------------

W/ Post-Increment Address Mode:
------------------------------------------------------
MoveWithIncrement64 %x1, (%x0,Post($8)), b@2
Ret64               %x0,                 b@5
------------------------------------------------------

* b3/B3CanonicalizePrePostIncrements.cpp:
(JSC::B3::canonicalizePrePostIncrements):
* b3/B3LowerToAir.cpp:
* b3/testb3.h:
* b3/testb3_3.cpp:
(testStorePreIndex32):
(testStorePreIndex64):
(testStorePostIndex32):
(testStorePostIndex64):
(addShrTests):

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (281061 => 281062)


--- trunk/Source/_javascript_Core/ChangeLog	2021-08-15 15:44:27 UTC (rev 281061)
+++ trunk/Source/_javascript_Core/ChangeLog	2021-08-15 17:41:03 UTC (rev 281062)
@@ -1,3 +1,131 @@
+2021-08-15  Yijia Huang  <yijia_hu...@apple.com>
+
+        [ARM64] Add Pre/Post-Indexed Address Mode to Air for ARM64 (Store Instruction)
+        https://bugs.webkit.org/show_bug.cgi?id=228538
+
+        Reviewed by Filip Pizlo.
+
+        The previous patch has introduced the pre/post-index address mode for Load instruction,
+        which benefits loop program. Here, this patch adds the corresponding mode for Store
+        instruction. Store Register (immediate) stores a word or a doubleword from a register
+        to memory. The address that is used for the store is calculated from a base register
+        and an immediate offset.
+
+        ######################################
+        ## Pre-Index Address Mode For Store ##
+        ######################################
+
+        STR Wt, [Xn, #imm]!
+
+        In B3 Reduction Strength, since we have this reduction rule:
+            Turns this: Store(value, Add(address, offset1), offset = offset2)
+            Into this: Store(value, address, offset = offset1 + offset2)
+
+        The equivalent pattern is:
+            address = Add(base, offset)
+            ...
+            memory = Store(value, base, offset)
+
+        Here, we only consider the pattern:
+            address = Add(base, offset)
+            memory = Store(value, base, offset)
+
+        And, this patch directly treats it as the canonical form. Ideally, we should move 
+        memory to just after the address like what we did for PreIndex Load in the previous 
+        patch. But, we cannot provide a better way to do that since the value may be 
+        used between address and memory. To move value upward, we must move all descendants 
+        of the value along with it to prevent B3 IR index violations, which is risky and expensive.
+
+        Next, lower to Air:
+            Move %base, %address
+            Move %value, (%address, prefix(offset))
+
+        ######################################
+        ## Post-Index Address Mode For Load ##
+        ######################################
+
+        STR Wt, [Xn], #imm
+
+        Then, the equivalent pattern is:
+            memory = Store(value, base, 0)
+            ...
+            address = Add(base, offset)
+
+        First, we convert it to the canonical form:
+            newOffset = Constant
+            newAddress = Add(base, offset)
+            memory = Store(value, base, 0) // move the offset and address to just before the memory
+            ...
+            offset = Identity(newOffset)
+            address = Identity(newAddress)
+
+        Next, lower to Air:
+            Move %base, %newAddress
+            Move %value, (%newAddress, postfix(offset))
+
+        ##############################################
+        ## Test for Pre/Post-Increment Address Mode ##
+        ##############################################
+
+        B3 IR:
+        ------------------------------------------------------
+        Int64 b@0 = ArgumentReg(%x0)
+        Int64 b@1 = ArgumentReg(%x1)
+        Int64 b@2 = Const64(8)
+        Int64 b@3 = Add(b@0, $8(b@2))
+        Void  b@4 = Store(b@1, b@3, ControlDependent|Writes:Top)
+        Void  b@5 = Return(b@3, Terminal)
+        ------------------------------------------------------
+
+        W/O Pre-Increment Address Mode:
+        ------------------------------------------------------
+        Move  %x0, %x2,      b@0
+        Add64  $8, %x2, %x0, b@3
+        Move  %x1, 8(%x2),   b@4
+        Ret64 %x0,           b@5
+        ------------------------------------------------------
+
+        W/ Pre-Increment Address Mode:
+        ------------------------------------------------------
+        MoveWithIncrement64 %x1, (%x0,Pre($8)), b@4
+        Ret64               %x0,                b@5
+        ------------------------------------------------------
+
+
+        B3 IR:
+        ------------------------------------------------------
+        Int64 b@0 = ArgumentReg(%x0)
+        Int64 b@1 = ArgumentReg(%x1)
+        Void  b@2 = Store(b@1, b@0, ControlDependent|Writes:Top)
+        Int64 b@3 = Const64(8)
+        Int64 b@4 = Add(b@0, $8(b@3))
+        Void  b@5 = Return(b@4, Terminal)
+        ------------------------------------------------------
+
+        W/O Post-Increment Address Mode:
+        ------------------------------------------------------
+        Move  %x1, (%x0),      b@2
+        Add64  $8,   %x0, %x0, b@4
+        Ret64 %x0,             b@5
+        ------------------------------------------------------
+
+        W/ Post-Increment Address Mode:
+        ------------------------------------------------------
+        MoveWithIncrement64 %x1, (%x0,Post($8)), b@2
+        Ret64               %x0,                 b@5
+        ------------------------------------------------------
+
+        * b3/B3CanonicalizePrePostIncrements.cpp:
+        (JSC::B3::canonicalizePrePostIncrements):
+        * b3/B3LowerToAir.cpp:
+        * b3/testb3.h:
+        * b3/testb3_3.cpp:
+        (testStorePreIndex32):
+        (testStorePreIndex64):
+        (testStorePostIndex32):
+        (testStorePostIndex64):
+        (addShrTests):
+
 2021-08-13  Keith Miller  <keith_mil...@apple.com>
 
         EnumeratorNextUpdatePropertyName always needs to be able to handle IndexedMode

Modified: trunk/Source/_javascript_Core/b3/B3CanonicalizePrePostIncrements.cpp (281061 => 281062)


--- trunk/Source/_javascript_Core/b3/B3CanonicalizePrePostIncrements.cpp	2021-08-15 15:44:27 UTC (rev 281061)
+++ trunk/Source/_javascript_Core/b3/B3CanonicalizePrePostIncrements.cpp	2021-08-15 17:41:03 UTC (rev 281062)
@@ -58,25 +58,27 @@
     BackwardsDominators& backwardsDominators = proc.backwardsDominators();
 
     IndexSet<Value*> ignoredValues;
-    HashMap<MemoryValue*, Value*> preIndexCandidates;
-    HashMap<MemoryValue*, Value*> postIndexCandidates;
-    HashMap<Value*, Vector<MemoryValue*>> baseToMemories;
+    HashMap<Value*, Vector<MemoryValue*>> baseToLoads;
+    HashMap<MemoryValue*, Value*> preIndexLoadCandidates;
+    HashMap<MemoryValue*, Value*> postIndexLoadCandidates;
     HashMap<ValueKey, Vector<Value*>> baseOffsetToAddresses;
 
-    // Pre-Index Pattern:
-    //     address = Add(base, offset)
-    //     ***
-    //     memory = Load(base, offset)
-    // Post-Index Pattern:
-    //     memory = Load(base, 0)
-    //     ***
-    //     address = Add(base, offset)
+    HashMap<Value*, Vector<MemoryValue*>> baseToStores;
+    HashMap<MemoryValue*, Value*> postIndexStoreCandidates;
+
     auto tryAddPrePostIndexCandidate = [&] (Value* value) {
         switch (value->opcode()) {
         case Load: {
-            MemoryValue* memory = value->as<MemoryValue>();
-
-            auto tryAddPreIndexCandidates = [&] () {
+            // Pre-Index Pattern:
+            //     address = Add(base, offset)
+            //     ...
+            //     memory = Load(base, offset)
+            // Post-Index Pattern:
+            //     memory = Load(base, 0)
+            //     ...
+            //     address = Add(base, offset)
+            auto tryAddpreIndexLoadCandidates = [&] () {
+                MemoryValue* memory = value->as<MemoryValue>();
                 if (memory->type() != Int32 && memory->type() != Int64)
                     return;
                 if (memory->offset()) {
@@ -86,20 +88,41 @@
                     if (!baseOffsetToAddresses.contains(baseOffsetkey))
                         return;
                     for (Value* address : baseOffsetToAddresses.get(baseOffsetkey))
-                        preIndexCandidates.add(memory, address);
+                        preIndexLoadCandidates.add(memory, address);
                 } else
-                    baseToMemories.add(memory->child(0), Vector<MemoryValue*>()).iterator->value.append(memory);
+                    baseToLoads.add(memory->child(0), Vector<MemoryValue*>()).iterator->value.append(memory);
             };
 
-            tryAddPreIndexCandidates();
+            tryAddpreIndexLoadCandidates();
             break;
         }
 
+        case Store: {
+            // Pre-Index Pattern:
+            //     address = Add(base, offset)
+            //     memory = Store(value, base, offset)
+            // Post-Index Pattern:
+            //     memory = Store(value, base, 0)
+            //     ...
+            //     address = Add(base, offset)
+            auto tryUpdateBaseToStores = [&] () {
+                MemoryValue* memory = value->as<MemoryValue>();
+                if (memory->child(0)->type() != Int32 && memory->child(0)->type() != Int64)
+                    return;
+                if (memory->child(0)->hasInt() || memory->offset())
+                    return;
+                baseToStores.add(memory->child(1), Vector<MemoryValue*>()).iterator->value.append(memory);
+            };
+
+            tryUpdateBaseToStores();
+            break;
+        }
+
         case Add: {
             Value* left = value->child(0);
             Value* right = value->child(1);
 
-            auto tryAddPostIndexCandidates = [&] () {
+            auto tryAddpostIndexCandidates = [&] () {
                 if (!right->hasIntPtr() || value->type() != Int64)
                     return;
                 intptr_t offset = right->asIntPtr();
@@ -109,13 +132,17 @@
                 // so far this Add value is a valid address candidate for both prefix and postfix pattern
                 ValueKey baseOffsetkey = ValueKey(left, static_cast<int64_t>(smallOffset));
                 baseOffsetToAddresses.add(baseOffsetkey, Vector<Value*>()).iterator->value.append(value);
-                if (!baseToMemories.contains(left))
-                    return;
-                for (MemoryValue* memory : baseToMemories.get(left))
-                    postIndexCandidates.add(memory, value);
+                if (baseToLoads.contains(left)) {
+                    for (MemoryValue* memory : baseToLoads.get(left))
+                        postIndexLoadCandidates.add(memory, value);
+                }
+                if (baseToStores.contains(left)) {
+                    for (MemoryValue* memory : baseToStores.get(left))
+                        postIndexStoreCandidates.add(memory, value);
+                }
             };
 
-            tryAddPostIndexCandidates();
+            tryAddpostIndexCandidates();
             break;
         }
 
@@ -134,6 +161,8 @@
             || (dominators.dominates(v2->owner, v1->owner) && backwardsDominators.dominates(v1->owner, v2->owner));
     };
 
+    // This search is expensive. However, due to the greedy pattern
+    // matching, no better method can be proposed at present.
     auto valueIndexInBasicBlock = [&] (Value* value) -> unsigned {
         unsigned index = 0;
         BasicBlock* block = value->owner;
@@ -144,7 +173,26 @@
         return index;
     };
 
-    auto detect = [&] (const HashMap<MemoryValue*, Value*>& candidates, bool isPreIndexCandidates) {
+    for (auto pair : preIndexLoadCandidates) {
+        MemoryValue* memory = pair.key;
+        Value* address = pair.value;
+        if (ignoredValues.contains(memory) || ignoredValues.contains(address) || !controlEquivalent(memory, address))
+            continue;
+        // address = Add(base, offset)       address = Add(base, offset)
+        // ...                          -->  newMemory = Load(base, offset)
+        // ...                               ...
+        // memory = Load(base, offset)       memory = Identity(newMemory)
+        unsigned insertionIndex = valueIndexInBasicBlock(address) + 1;
+        MemoryValue* newMemory = insertionSet.insert<MemoryValue>(insertionIndex, Load, memory->type(), address->origin(), memory->lastChild());
+        newMemory->setOffset(memory->offset());
+        memory->replaceWithIdentity(newMemory);
+        insertionSet.execute(address->owner);
+
+        ignoredValues.add(memory);
+        ignoredValues.add(address);
+    }
+
+    auto detectPostIndex = [&] (const HashMap<MemoryValue*, Value*>& candidates) {
         for (auto pair : candidates) {
             MemoryValue* memory = pair.key;
             Value* address = pair.value;
@@ -151,36 +199,30 @@
             if (ignoredValues.contains(memory) || ignoredValues.contains(address) || !controlEquivalent(memory, address))
                 continue;
 
-            if (isPreIndexCandidates) {
-                // address = Add(base, offset)       address = Add(base, offset)
-                // ***                          -->  newMemory = Load(base, offset)
-                // ***                               ***
-                // memory = Load(base, offset)       memory = Identity(newMemory)
-                unsigned insertionIndex = valueIndexInBasicBlock(address) + 1;
-                MemoryValue* newMemory = insertionSet.insert<MemoryValue>(insertionIndex, Load, memory->type(), address->origin(), memory->lastChild());
-                newMemory->setOffset(memory->offset());
-                memory->replaceWithIdentity(newMemory);
-                insertionSet.execute(address->owner);
-            } else {
-                // ***                               newOffset = Constant
-                // ***                               newAddress = Add(base, newOffset)
-                // memory = Load(base, 0)            memory = Load(base, 0)
-                // ***                          -->  ***
-                // address = Add(base, offset)       address = Identity(newAddress)
-                unsigned insertionIndex = valueIndexInBasicBlock(memory);
-                Value* newOffset = insertionSet.insert<Const64Value>(insertionIndex, memory->origin(), address->child(1)->asInt());
-                Value* newAddress = insertionSet.insert<Value>(insertionIndex, Add, memory->origin(), address->child(0), newOffset);
-                address->replaceWithIdentity(newAddress);
-                insertionSet.execute(memory->owner);
-            }
+            unsigned insertionIndex = valueIndexInBasicBlock(memory);
+            Value* newOffset = insertionSet.insert<Const64Value>(insertionIndex, memory->origin(), address->child(1)->asInt());
+            Value* newAddress = insertionSet.insert<Value>(insertionIndex, Add, memory->origin(), address->child(0), newOffset);
+            address->replaceWithIdentity(newAddress);
+            insertionSet.execute(memory->owner);
 
+            ignoredValues.add(memory);
             ignoredValues.add(address);
-            ignoredValues.add(memory);
         }
     };
 
-    detect(preIndexCandidates, true);
-    detect(postIndexCandidates, false);
+    // ...                                  newOffset = Constant
+    // ...                                  newAddress = Add(base, newOffset)
+    // memory = Load(base, 0)               memory = Load(base, 0)
+    // ...                            -->   ...
+    // address = Add(base, offset)          address = Identity(newAddress)
+    detectPostIndex(postIndexLoadCandidates);
+
+    // ...                                  newOffset = Constant
+    // ...                                  newAddress = Add(base, newOffset)
+    // memory = Store(value, base, 0)       memory = Store(value, base, 0)
+    // ...                            -->   ...
+    // address = Add(base, offset)          address = Identity(newAddress)
+    detectPostIndex(postIndexStoreCandidates);
     return true;
 }
 

Modified: trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp (281061 => 281062)


--- trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp	2021-08-15 15:44:27 UTC (rev 281061)
+++ trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp	2021-08-15 17:41:03 UTC (rev 281062)
@@ -3436,6 +3436,52 @@
         }
 
         case Store: {
+            // Pre-Index Canonical Form:
+            //     address = Add(base, Offset)              --->    Move %base %address
+            //     memory = Store(value, base, Offset)              MoveWithIncrement %value (%address, prefix(offset))
+            // Post-Index Canonical Form:
+            //     address = Add(base, Offset)              --->    Move %base %address
+            //     memory = Store(value, base, 0)                   MoveWithIncrement %value (%address, postfix(offset))
+            auto tryAppendIncrementAddress = [&] () -> bool {
+                MemoryValue* memory = m_value->as<MemoryValue>();
+                Value* value = memory->child(0);
+                Air::Opcode opcode = tryOpcodeForType(MoveWithIncrement32, MoveWithIncrement64, value->type());
+                if (!isValidForm(opcode, Arg::PreIndex, Arg::Tmp) || !m_index)
+                    return false;
+                Value* address = m_block->at(m_index - 1);
+                if (address->opcode() != Add || address->type() != Int64)
+                    return false;
+
+                Value* base1 = address->child(0);
+                Value* base2 = memory->child(1);
+                if (base1 != base2 || !address->child(1)->hasIntPtr())
+                    return false;
+                intptr_t offset = address->child(1)->asIntPtr();
+                Value::OffsetType smallOffset = static_cast<Value::OffsetType>(offset);
+                if (smallOffset != offset || !Arg::isValidIncrementIndexForm(smallOffset))
+                    return false;
+                if (m_locked.contains(address) || m_locked.contains(base1) || m_locked.contains(value))
+                    return false;
+
+                Arg incrementArg = Arg();
+                if (memory->offset()) {
+                    if (smallOffset == memory->offset())
+                        incrementArg = Arg::preIndex(tmp(address), smallOffset);
+                } else
+                    incrementArg = Arg::postIndex(tmp(address), smallOffset);
+
+                if (incrementArg) {
+                    append(relaxedMoveForType(address->type()), tmp(base1), tmp(address));
+                    append(opcode, tmp(value), incrementArg);
+                    m_locked.add(address);
+                    return true;
+                }
+                return false;
+            };
+
+            if (tryAppendIncrementAddress())
+                return;
+
             Value* valueToStore = m_value->child(0);
             if (canBeInternal(valueToStore)) {
                 bool matched = false;

Modified: trunk/Source/_javascript_Core/b3/testb3.h (281061 => 281062)


--- trunk/Source/_javascript_Core/b3/testb3.h	2021-08-15 15:44:27 UTC (rev 281061)
+++ trunk/Source/_javascript_Core/b3/testb3.h	2021-08-15 17:41:03 UTC (rev 281062)
@@ -1168,4 +1168,9 @@
 void testLoadPostIndex32();
 void testLoadPostIndex64();
 
+void testStorePreIndex32();
+void testStorePreIndex64();
+void testStorePostIndex32();
+void testStorePostIndex64();
+
 #endif // ENABLE(B3_JIT)

Modified: trunk/Source/_javascript_Core/b3/testb3_3.cpp (281061 => 281062)


--- trunk/Source/_javascript_Core/b3/testb3_3.cpp	2021-08-15 15:44:27 UTC (rev 281061)
+++ trunk/Source/_javascript_Core/b3/testb3_3.cpp	2021-08-15 17:41:03 UTC (rev 281062)
@@ -321,7 +321,114 @@
     CHECK_EQ(invoke<int64_t>(*code, bitwise_cast<intptr_t>(ptr)), test());
 }
 
+void testStorePreIndex32()
+{
+    if (Options::defaultB3OptLevel() < 2)
+        return;
 
+    int32_t nums[] = { 1, 2, 3 };
+    int32_t* ptr = &nums[1];
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* value = root->appendNew<Value>(
+        proc, Trunc, Origin(),
+        root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+    Value* offset = root->appendNew<Const64Value>(proc, Origin(), 4);
+    Value* preIncrement = root->appendNew<Value>(proc, Add, Origin(), address, offset);
+    root->appendNew<MemoryValue>(proc, Store, Origin(), value, preIncrement);
+    root->appendNewControlValue(proc, Return, Origin(), preIncrement);
+
+    auto code = compileProc(proc);
+    if (isARM64())
+        checkUsesInstruction(*code, "#4]!");
+    intptr_t res = invoke<intptr_t>(*code, bitwise_cast<intptr_t>(ptr), 4);
+    ptr = bitwise_cast<int32_t*>(res);
+    CHECK_EQ(nums[2], *ptr);
+}
+
+void testStorePreIndex64()
+{
+    if (Options::defaultB3OptLevel() < 2)
+        return;
+
+    int64_t nums[] = { 1, 2, 3 };
+    int64_t* ptr = &nums[1];
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+    Value* offset = root->appendNew<Const64Value>(proc, Origin(), 8);
+    Value* preIncrement = root->appendNew<Value>(proc, Add, Origin(), address, offset);
+    root->appendNew<MemoryValue>(proc, Store, Origin(), value, preIncrement);
+    root->appendNewControlValue(proc, Return, Origin(), preIncrement);
+
+    auto code = compileProc(proc);
+    intptr_t res = invoke<intptr_t>(*code, bitwise_cast<intptr_t>(ptr), 4);
+    ptr = bitwise_cast<int64_t*>(res);
+    CHECK_EQ(nums[2], *ptr);
+}
+
+void testStorePostIndex32()
+{
+    if (Options::defaultB3OptLevel() < 2)
+        return;
+
+    int32_t nums[] = { 1, 2, 3 };
+    int32_t* ptr = &nums[1];
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* value = root->appendNew<Value>(
+        proc, Trunc, Origin(),
+        root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1));
+    root->appendNew<MemoryValue>(proc, Store, Origin(), value, address);
+    Value* offset = root->appendNew<Const64Value>(proc, Origin(), 4);
+    Value* preIncrement = root->appendNew<Value>(proc, Add, Origin(), address, offset);
+    root->appendNewControlValue(proc, Return, Origin(), preIncrement);
+
+    auto code = compileProc(proc);
+    if (isARM64())
+        checkUsesInstruction(*code, "], #4");
+    intptr_t res = invoke<intptr_t>(*code, bitwise_cast<intptr_t>(ptr), 4);
+    ptr = bitwise_cast<int32_t*>(res);
+    CHECK_EQ(nums[1], 4);
+    CHECK_EQ(nums[2], *ptr);
+}
+
+void testStorePostIndex64()
+{
+    if (Options::defaultB3OptLevel() < 2)
+        return;
+
+    int64_t nums[] = { 1, 2, 3 };
+    int64_t* ptr = &nums[1];
+
+    Procedure proc;
+    BasicBlock* root = proc.addBlock();
+
+    Value* address = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR0);
+    Value* value = root->appendNew<ArgumentRegValue>(proc, Origin(), GPRInfo::argumentGPR1);
+    root->appendNew<MemoryValue>(proc, Store, Origin(), value, address);
+    Value* offset = root->appendNew<Const64Value>(proc, Origin(), 8);
+    Value* preIncrement = root->appendNew<Value>(proc, Add, Origin(), address, offset);
+    root->appendNewControlValue(proc, Return, Origin(), preIncrement);
+
+    auto code = compileProc(proc);
+    if (isARM64())
+        checkUsesInstruction(*code, "], #8");
+    intptr_t res = invoke<intptr_t>(*code, bitwise_cast<intptr_t>(ptr), 4);
+    ptr = bitwise_cast<int64_t*>(res);
+    CHECK_EQ(nums[1], 4);
+    CHECK_EQ(nums[2], *ptr);
+}
+
 void testInsertSignedBitfieldInZero32()
 {
     if (JSC::Options::defaultB3OptLevel() < 2)
@@ -3994,6 +4101,11 @@
     RUN(testLoadPreIndex64());
     RUN(testLoadPostIndex32());
     RUN(testLoadPostIndex64());
+
+    RUN(testStorePreIndex32());
+    RUN(testStorePreIndex64());
+    RUN(testStorePostIndex32());
+    RUN(testStorePostIndex64());
 }
 
 #endif // ENABLE(B3_JIT)
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to