Log Message
[JSC] ARM64: Support the immediate format used for bit operations in Air https://bugs.webkit.org/show_bug.cgi?id=154327
Patch by Benjamin Poulain <[email protected]> on 2016-02-17 Reviewed by Filip Pizlo. ARM64 supports a pretty rich form of immediates for bit operation. There are two formats used to encode repeating patterns and common input in a dense form. In this patch, I add 2 new type of Arg: BitImm32 and BitImm64. Those represents the valid immediate forms for bit operation. On x86, any 32bits value is valid. On ARM64, all the encoding form are tried and the immediate is used when possible. The arg type Imm64 is renamed to BigImm to better represent what it is: an immediate that does not fit into Imm. * assembler/ARM64Assembler.h: (JSC::LogicalImmediate::create32): Deleted. (JSC::LogicalImmediate::create64): Deleted. (JSC::LogicalImmediate::value): Deleted. (JSC::LogicalImmediate::isValid): Deleted. (JSC::LogicalImmediate::is64bit): Deleted. (JSC::LogicalImmediate::LogicalImmediate): Deleted. (JSC::LogicalImmediate::mask): Deleted. (JSC::LogicalImmediate::partialHSB): Deleted. (JSC::LogicalImmediate::highestSetBit): Deleted. (JSC::LogicalImmediate::findBitRange): Deleted. (JSC::LogicalImmediate::encodeLogicalImmediate): Deleted. * assembler/AssemblerCommon.h: (JSC::ARM64LogicalImmediate::create32): (JSC::ARM64LogicalImmediate::create64): (JSC::ARM64LogicalImmediate::value): (JSC::ARM64LogicalImmediate::isValid): (JSC::ARM64LogicalImmediate::is64bit): (JSC::ARM64LogicalImmediate::ARM64LogicalImmediate): (JSC::ARM64LogicalImmediate::mask): (JSC::ARM64LogicalImmediate::partialHSB): (JSC::ARM64LogicalImmediate::highestSetBit): (JSC::ARM64LogicalImmediate::findBitRange): (JSC::ARM64LogicalImmediate::encodeLogicalImmediate): * assembler/MacroAssemblerARM64.h: (JSC::MacroAssemblerARM64::and64): (JSC::MacroAssemblerARM64::or64): (JSC::MacroAssemblerARM64::xor64): * b3/B3LowerToAir.cpp: (JSC::B3::Air::LowerToAir::bitImm): (JSC::B3::Air::LowerToAir::bitImm64): (JSC::B3::Air::LowerToAir::appendBinOp): * b3/air/AirArg.cpp: (JSC::B3::Air::Arg::dump): (WTF::printInternal): * b3/air/AirArg.h: (JSC::B3::Air::Arg::bitImm): (JSC::B3::Air::Arg::bitImm64): (JSC::B3::Air::Arg::isBitImm): (JSC::B3::Air::Arg::isBitImm64): (JSC::B3::Air::Arg::isSomeImm): (JSC::B3::Air::Arg::value): (JSC::B3::Air::Arg::isGP): (JSC::B3::Air::Arg::isFP): (JSC::B3::Air::Arg::hasType): (JSC::B3::Air::Arg::isValidBitImmForm): (JSC::B3::Air::Arg::isValidBitImm64Form): (JSC::B3::Air::Arg::isValidForm): (JSC::B3::Air::Arg::asTrustedImm32): (JSC::B3::Air::Arg::asTrustedImm64): * b3/air/AirOpcode.opcodes: * b3/air/opcode_generator.rb:
Modified Paths
- trunk/Source/_javascript_Core/ChangeLog
- trunk/Source/_javascript_Core/assembler/ARM64Assembler.h
- trunk/Source/_javascript_Core/assembler/AssemblerCommon.h
- trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h
- trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp
- trunk/Source/_javascript_Core/b3/B3StackmapSpecial.cpp
- trunk/Source/_javascript_Core/b3/air/AirArg.cpp
- trunk/Source/_javascript_Core/b3/air/AirArg.h
- trunk/Source/_javascript_Core/b3/air/AirCCallSpecial.cpp
- trunk/Source/_javascript_Core/b3/air/AirEmitShuffle.cpp
- trunk/Source/_javascript_Core/b3/air/AirFixObviousSpills.cpp
- trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes
- trunk/Source/_javascript_Core/b3/air/opcode_generator.rb
- trunk/Source/_javascript_Core/b3/air/testair.cpp
Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (196735 => 196736)
--- trunk/Source/_javascript_Core/ChangeLog 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/ChangeLog 2016-02-18 01:38:40 UTC (rev 196736)
@@ -1,3 +1,75 @@
+2016-02-17 Benjamin Poulain <[email protected]>
+
+ [JSC] ARM64: Support the immediate format used for bit operations in Air
+ https://bugs.webkit.org/show_bug.cgi?id=154327
+
+ Reviewed by Filip Pizlo.
+
+ ARM64 supports a pretty rich form of immediates for bit operation.
+ There are two formats used to encode repeating patterns and common
+ input in a dense form.
+
+ In this patch, I add 2 new type of Arg: BitImm32 and BitImm64.
+ Those represents the valid immediate forms for bit operation.
+ On x86, any 32bits value is valid. On ARM64, all the encoding
+ form are tried and the immediate is used when possible.
+
+ The arg type Imm64 is renamed to BigImm to better represent what
+ it is: an immediate that does not fit into Imm.
+
+ * assembler/ARM64Assembler.h:
+ (JSC::LogicalImmediate::create32): Deleted.
+ (JSC::LogicalImmediate::create64): Deleted.
+ (JSC::LogicalImmediate::value): Deleted.
+ (JSC::LogicalImmediate::isValid): Deleted.
+ (JSC::LogicalImmediate::is64bit): Deleted.
+ (JSC::LogicalImmediate::LogicalImmediate): Deleted.
+ (JSC::LogicalImmediate::mask): Deleted.
+ (JSC::LogicalImmediate::partialHSB): Deleted.
+ (JSC::LogicalImmediate::highestSetBit): Deleted.
+ (JSC::LogicalImmediate::findBitRange): Deleted.
+ (JSC::LogicalImmediate::encodeLogicalImmediate): Deleted.
+ * assembler/AssemblerCommon.h:
+ (JSC::ARM64LogicalImmediate::create32):
+ (JSC::ARM64LogicalImmediate::create64):
+ (JSC::ARM64LogicalImmediate::value):
+ (JSC::ARM64LogicalImmediate::isValid):
+ (JSC::ARM64LogicalImmediate::is64bit):
+ (JSC::ARM64LogicalImmediate::ARM64LogicalImmediate):
+ (JSC::ARM64LogicalImmediate::mask):
+ (JSC::ARM64LogicalImmediate::partialHSB):
+ (JSC::ARM64LogicalImmediate::highestSetBit):
+ (JSC::ARM64LogicalImmediate::findBitRange):
+ (JSC::ARM64LogicalImmediate::encodeLogicalImmediate):
+ * assembler/MacroAssemblerARM64.h:
+ (JSC::MacroAssemblerARM64::and64):
+ (JSC::MacroAssemblerARM64::or64):
+ (JSC::MacroAssemblerARM64::xor64):
+ * b3/B3LowerToAir.cpp:
+ (JSC::B3::Air::LowerToAir::bitImm):
+ (JSC::B3::Air::LowerToAir::bitImm64):
+ (JSC::B3::Air::LowerToAir::appendBinOp):
+ * b3/air/AirArg.cpp:
+ (JSC::B3::Air::Arg::dump):
+ (WTF::printInternal):
+ * b3/air/AirArg.h:
+ (JSC::B3::Air::Arg::bitImm):
+ (JSC::B3::Air::Arg::bitImm64):
+ (JSC::B3::Air::Arg::isBitImm):
+ (JSC::B3::Air::Arg::isBitImm64):
+ (JSC::B3::Air::Arg::isSomeImm):
+ (JSC::B3::Air::Arg::value):
+ (JSC::B3::Air::Arg::isGP):
+ (JSC::B3::Air::Arg::isFP):
+ (JSC::B3::Air::Arg::hasType):
+ (JSC::B3::Air::Arg::isValidBitImmForm):
+ (JSC::B3::Air::Arg::isValidBitImm64Form):
+ (JSC::B3::Air::Arg::isValidForm):
+ (JSC::B3::Air::Arg::asTrustedImm32):
+ (JSC::B3::Air::Arg::asTrustedImm64):
+ * b3/air/AirOpcode.opcodes:
+ * b3/air/opcode_generator.rb:
+
2016-02-17 Keith Miller <[email protected]>
Spread operator should be allowed when not the first argument of parameter list
Modified: trunk/Source/_javascript_Core/assembler/ARM64Assembler.h (196735 => 196736)
--- trunk/Source/_javascript_Core/assembler/ARM64Assembler.h 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/assembler/ARM64Assembler.h 2016-02-18 01:38:40 UTC (rev 196736)
@@ -147,228 +147,8 @@
int m_value;
};
-class LogicalImmediate {
-public:
- static LogicalImmediate create32(uint32_t value)
- {
- // Check for 0, -1 - these cannot be encoded.
- if (!value || !~value)
- return InvalidLogicalImmediate;
+typedef ARM64LogicalImmediate LogicalImmediate;
- // First look for a 32-bit pattern, then for repeating 16-bit
- // patterns, 8-bit, 4-bit, and finally 2-bit.
-
- unsigned hsb, lsb;
- bool inverted;
- if (findBitRange<32>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<32>(hsb, lsb, inverted);
-
- if ((value & 0xffff) != (value >> 16))
- return InvalidLogicalImmediate;
- value &= 0xffff;
-
- if (findBitRange<16>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<16>(hsb, lsb, inverted);
-
- if ((value & 0xff) != (value >> 8))
- return InvalidLogicalImmediate;
- value &= 0xff;
-
- if (findBitRange<8>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<8>(hsb, lsb, inverted);
-
- if ((value & 0xf) != (value >> 4))
- return InvalidLogicalImmediate;
- value &= 0xf;
-
- if (findBitRange<4>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<4>(hsb, lsb, inverted);
-
- if ((value & 0x3) != (value >> 2))
- return InvalidLogicalImmediate;
- value &= 0x3;
-
- if (findBitRange<2>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<2>(hsb, lsb, inverted);
-
- return InvalidLogicalImmediate;
- }
-
- static LogicalImmediate create64(uint64_t value)
- {
- // Check for 0, -1 - these cannot be encoded.
- if (!value || !~value)
- return InvalidLogicalImmediate;
-
- // Look for a contiguous bit range.
- unsigned hsb, lsb;
- bool inverted;
- if (findBitRange<64>(value, hsb, lsb, inverted))
- return encodeLogicalImmediate<64>(hsb, lsb, inverted);
-
- // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
- if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
- return create32(static_cast<uint32_t>(value));
- return InvalidLogicalImmediate;
- }
-
- int value() const
- {
- ASSERT(isValid());
- return m_value;
- }
-
- bool isValid() const
- {
- return m_value != InvalidLogicalImmediate;
- }
-
- bool is64bit() const
- {
- return m_value & (1 << 12);
- }
-
-private:
- LogicalImmediate(int value)
- : m_value(value)
- {
- }
-
- // Generate a mask with bits in the range hsb..0 set, for example:
- // hsb:63 = 0xffffffffffffffff
- // hsb:42 = 0x000007ffffffffff
- // hsb: 0 = 0x0000000000000001
- static uint64_t mask(unsigned hsb)
- {
- ASSERT(hsb < 64);
- return 0xffffffffffffffffull >> (63 - hsb);
- }
-
- template<unsigned N>
- static void partialHSB(uint64_t& value, unsigned&result)
- {
- if (value & (0xffffffffffffffffull << N)) {
- result += N;
- value >>= N;
- }
- }
-
- // Find the bit number of the highest bit set in a non-zero value, for example:
- // 0x8080808080808080 = hsb:63
- // 0x0000000000000001 = hsb: 0
- // 0x000007ffffe00000 = hsb:42
- static unsigned highestSetBit(uint64_t value)
- {
- ASSERT(value);
- unsigned hsb = 0;
- partialHSB<32>(value, hsb);
- partialHSB<16>(value, hsb);
- partialHSB<8>(value, hsb);
- partialHSB<4>(value, hsb);
- partialHSB<2>(value, hsb);
- partialHSB<1>(value, hsb);
- return hsb;
- }
-
- // This function takes a value and a bit width, where value obeys the following constraints:
- // * bits outside of the width of the value must be zero.
- // * bits within the width of value must neither be all clear or all set.
- // The input is inspected to detect values that consist of either two or three contiguous
- // ranges of bits. The output range hsb..lsb will describe the second range of the value.
- // if the range is set, inverted will be false, and if the range is clear, inverted will
- // be true. For example (with width 8):
- // 00001111 = hsb:3, lsb:0, inverted:false
- // 11110000 = hsb:3, lsb:0, inverted:true
- // 00111100 = hsb:5, lsb:2, inverted:false
- // 11000011 = hsb:5, lsb:2, inverted:true
- template<unsigned width>
- static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
- {
- ASSERT(value & mask(width - 1));
- ASSERT(value != mask(width - 1));
- ASSERT(!(value & ~mask(width - 1)));
-
- // Detect cases where the top bit is set; if so, flip all the bits & set invert.
- // This halves the number of patterns we need to look for.
- const uint64_t msb = 1ull << (width - 1);
- if ((inverted = (value & msb)))
- value ^= mask(width - 1);
-
- // Find the highest set bit in value, generate a corresponding mask & flip all
- // bits under it.
- hsb = highestSetBit(value);
- value ^= mask(hsb);
- if (!value) {
- // If this cleared the value, then the range hsb..0 was all set.
- lsb = 0;
- return true;
- }
-
- // Try making one more mask, and flipping the bits!
- lsb = highestSetBit(value);
- value ^= mask(lsb);
- if (!value) {
- // Success - but lsb actually points to the hsb of a third range - add one
- // to get to the lsb of the mid range.
- ++lsb;
- return true;
- }
-
- return false;
- }
-
- // Encodes the set of immN:immr:imms fields found in a logical immediate.
- template<unsigned width>
- static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
- {
- // Check width is a power of 2!
- ASSERT(!(width & (width -1)));
- ASSERT(width <= 64 && width >= 2);
- ASSERT(hsb >= lsb);
- ASSERT(hsb < width);
-
- int immN = 0;
- int imms = 0;
- int immr = 0;
-
- // For 64-bit values this is easy - just set immN to true, and imms just
- // contains the bit number of the highest set bit of the set range. For
- // values with narrower widths, these are encoded by a leading set of
- // one bits, followed by a zero bit, followed by the remaining set of bits
- // being the high bit of the range. For a 32-bit immediate there are no
- // leading one bits, just a zero followed by a five bit number. For a
- // 16-bit immediate there is one one bit, a zero bit, and then a four bit
- // bit-position, etc.
- if (width == 64)
- immN = 1;
- else
- imms = 63 & ~(width + width - 1);
-
- if (inverted) {
- // if width is 64 & hsb is 62, then we have a value something like:
- // 0x80000000ffffffff (in this case with lsb 32).
- // The ror should be by 1, imms (effectively set width minus 1) is
- // 32. Set width is full width minus cleared width.
- immr = (width - 1) - hsb;
- imms |= (width - ((hsb - lsb) + 1)) - 1;
- } else {
- // if width is 64 & hsb is 62, then we have a value something like:
- // 0x7fffffff00000000 (in this case with lsb 32).
- // The value is effectively rol'ed by lsb, which is equivalent to
- // a ror by width - lsb (or 0, in the case where lsb is 0). imms
- // is hsb - lsb.
- immr = (width - lsb) & (width - 1);
- imms |= hsb - lsb;
- }
-
- return immN << 12 | immr << 6 | imms;
- }
-
- static const int InvalidLogicalImmediate = -1;
-
- int m_value;
-};
-
inline uint16_t getHalfword(uint64_t value, int which)
{
return value >> (which << 4);
Modified: trunk/Source/_javascript_Core/assembler/AssemblerCommon.h (196735 => 196736)
--- trunk/Source/_javascript_Core/assembler/AssemblerCommon.h 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/assembler/AssemblerCommon.h 2016-02-18 01:38:40 UTC (rev 196736)
@@ -66,6 +66,229 @@
return isInt9(value);
}
+class ARM64LogicalImmediate {
+public:
+ static ARM64LogicalImmediate create32(uint32_t value)
+ {
+ // Check for 0, -1 - these cannot be encoded.
+ if (!value || !~value)
+ return InvalidLogicalImmediate;
+
+ // First look for a 32-bit pattern, then for repeating 16-bit
+ // patterns, 8-bit, 4-bit, and finally 2-bit.
+
+ unsigned hsb, lsb;
+ bool inverted;
+ if (findBitRange<32>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<32>(hsb, lsb, inverted);
+
+ if ((value & 0xffff) != (value >> 16))
+ return InvalidLogicalImmediate;
+ value &= 0xffff;
+
+ if (findBitRange<16>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<16>(hsb, lsb, inverted);
+
+ if ((value & 0xff) != (value >> 8))
+ return InvalidLogicalImmediate;
+ value &= 0xff;
+
+ if (findBitRange<8>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<8>(hsb, lsb, inverted);
+
+ if ((value & 0xf) != (value >> 4))
+ return InvalidLogicalImmediate;
+ value &= 0xf;
+
+ if (findBitRange<4>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<4>(hsb, lsb, inverted);
+
+ if ((value & 0x3) != (value >> 2))
+ return InvalidLogicalImmediate;
+ value &= 0x3;
+
+ if (findBitRange<2>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<2>(hsb, lsb, inverted);
+
+ return InvalidLogicalImmediate;
+ }
+
+ static ARM64LogicalImmediate create64(uint64_t value)
+ {
+ // Check for 0, -1 - these cannot be encoded.
+ if (!value || !~value)
+ return InvalidLogicalImmediate;
+
+ // Look for a contiguous bit range.
+ unsigned hsb, lsb;
+ bool inverted;
+ if (findBitRange<64>(value, hsb, lsb, inverted))
+ return encodeLogicalImmediate<64>(hsb, lsb, inverted);
+
+ // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern.
+ if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32))
+ return create32(static_cast<uint32_t>(value));
+ return InvalidLogicalImmediate;
+ }
+
+ int value() const
+ {
+ ASSERT(isValid());
+ return m_value;
+ }
+
+ bool isValid() const
+ {
+ return m_value != InvalidLogicalImmediate;
+ }
+
+ bool is64bit() const
+ {
+ return m_value & (1 << 12);
+ }
+
+private:
+ ARM64LogicalImmediate(int value)
+ : m_value(value)
+ {
+ }
+
+ // Generate a mask with bits in the range hsb..0 set, for example:
+ // hsb:63 = 0xffffffffffffffff
+ // hsb:42 = 0x000007ffffffffff
+ // hsb: 0 = 0x0000000000000001
+ static uint64_t mask(unsigned hsb)
+ {
+ ASSERT(hsb < 64);
+ return 0xffffffffffffffffull >> (63 - hsb);
+ }
+
+ template<unsigned N>
+ static void partialHSB(uint64_t& value, unsigned&result)
+ {
+ if (value & (0xffffffffffffffffull << N)) {
+ result += N;
+ value >>= N;
+ }
+ }
+
+ // Find the bit number of the highest bit set in a non-zero value, for example:
+ // 0x8080808080808080 = hsb:63
+ // 0x0000000000000001 = hsb: 0
+ // 0x000007ffffe00000 = hsb:42
+ static unsigned highestSetBit(uint64_t value)
+ {
+ ASSERT(value);
+ unsigned hsb = 0;
+ partialHSB<32>(value, hsb);
+ partialHSB<16>(value, hsb);
+ partialHSB<8>(value, hsb);
+ partialHSB<4>(value, hsb);
+ partialHSB<2>(value, hsb);
+ partialHSB<1>(value, hsb);
+ return hsb;
+ }
+
+ // This function takes a value and a bit width, where value obeys the following constraints:
+ // * bits outside of the width of the value must be zero.
+ // * bits within the width of value must neither be all clear or all set.
+ // The input is inspected to detect values that consist of either two or three contiguous
+ // ranges of bits. The output range hsb..lsb will describe the second range of the value.
+ // if the range is set, inverted will be false, and if the range is clear, inverted will
+ // be true. For example (with width 8):
+ // 00001111 = hsb:3, lsb:0, inverted:false
+ // 11110000 = hsb:3, lsb:0, inverted:true
+ // 00111100 = hsb:5, lsb:2, inverted:false
+ // 11000011 = hsb:5, lsb:2, inverted:true
+ template<unsigned width>
+ static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted)
+ {
+ ASSERT(value & mask(width - 1));
+ ASSERT(value != mask(width - 1));
+ ASSERT(!(value & ~mask(width - 1)));
+
+ // Detect cases where the top bit is set; if so, flip all the bits & set invert.
+ // This halves the number of patterns we need to look for.
+ const uint64_t msb = 1ull << (width - 1);
+ if ((inverted = (value & msb)))
+ value ^= mask(width - 1);
+
+ // Find the highest set bit in value, generate a corresponding mask & flip all
+ // bits under it.
+ hsb = highestSetBit(value);
+ value ^= mask(hsb);
+ if (!value) {
+ // If this cleared the value, then the range hsb..0 was all set.
+ lsb = 0;
+ return true;
+ }
+
+ // Try making one more mask, and flipping the bits!
+ lsb = highestSetBit(value);
+ value ^= mask(lsb);
+ if (!value) {
+ // Success - but lsb actually points to the hsb of a third range - add one
+ // to get to the lsb of the mid range.
+ ++lsb;
+ return true;
+ }
+
+ return false;
+ }
+
+ // Encodes the set of immN:immr:imms fields found in a logical immediate.
+ template<unsigned width>
+ static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted)
+ {
+ // Check width is a power of 2!
+ ASSERT(!(width & (width -1)));
+ ASSERT(width <= 64 && width >= 2);
+ ASSERT(hsb >= lsb);
+ ASSERT(hsb < width);
+
+ int immN = 0;
+ int imms = 0;
+ int immr = 0;
+
+ // For 64-bit values this is easy - just set immN to true, and imms just
+ // contains the bit number of the highest set bit of the set range. For
+ // values with narrower widths, these are encoded by a leading set of
+ // one bits, followed by a zero bit, followed by the remaining set of bits
+ // being the high bit of the range. For a 32-bit immediate there are no
+ // leading one bits, just a zero followed by a five bit number. For a
+ // 16-bit immediate there is one one bit, a zero bit, and then a four bit
+ // bit-position, etc.
+ if (width == 64)
+ immN = 1;
+ else
+ imms = 63 & ~(width + width - 1);
+
+ if (inverted) {
+ // if width is 64 & hsb is 62, then we have a value something like:
+ // 0x80000000ffffffff (in this case with lsb 32).
+ // The ror should be by 1, imms (effectively set width minus 1) is
+ // 32. Set width is full width minus cleared width.
+ immr = (width - 1) - hsb;
+ imms |= (width - ((hsb - lsb) + 1)) - 1;
+ } else {
+ // if width is 64 & hsb is 62, then we have a value something like:
+ // 0x7fffffff00000000 (in this case with lsb 32).
+ // The value is effectively rol'ed by lsb, which is equivalent to
+ // a ror by width - lsb (or 0, in the case where lsb is 0). imms
+ // is hsb - lsb.
+ immr = (width - lsb) & (width - 1);
+ imms |= hsb - lsb;
+ }
+
+ return immN << 12 | immr << 6 | imms;
+ }
+
+ static const int InvalidLogicalImmediate = -1;
+
+ int m_value;
+};
+
+
} // namespace JSC.
#endif // AssemblerCommon_h
Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h (196735 => 196736)
--- trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerARM64.h 2016-02-18 01:38:40 UTC (rev 196736)
@@ -367,6 +367,19 @@
m_assembler.and_<64>(dest, src1, src2);
}
+ void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<64>(dest, src, dataTempRegister);
+ }
+
void and64(RegisterID src, RegisterID dest)
{
m_assembler.and_<64>(dest, dest, src);
@@ -578,7 +591,20 @@
signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.orr<64>(dest, src, dataTempRegister);
}
-
+
+ void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.orr<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<64>(dest, src, dataTempRegister);
+ }
+
void or64(TrustedImm64 imm, RegisterID dest)
{
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
@@ -831,6 +857,23 @@
xor64(imm, dest, dest);
}
+ void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn<64>(dest, src);
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.eor<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.eor<64>(dest, src, dataTempRegister);
+ }
+ }
+
void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
if (imm.m_value == -1)
Modified: trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/B3LowerToAir.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -498,6 +498,26 @@
return Arg();
}
+ Arg bitImm(Value* value)
+ {
+ if (value->hasInt()) {
+ int64_t intValue = value->asInt();
+ if (Arg::isValidBitImmForm(intValue))
+ return Arg::bitImm(intValue);
+ }
+ return Arg();
+ }
+
+ Arg bitImm64(Value* value)
+ {
+ if (value->hasInt()) {
+ int64_t intValue = value->asInt();
+ if (Arg::isValidBitImm64Form(intValue))
+ return Arg::bitImm64(intValue);
+ }
+ return Arg();
+ }
+
Arg immOrTmp(Value* value)
{
if (Arg result = imm(value))
@@ -647,6 +667,36 @@
}
}
+ if (isValidForm(opcode, Arg::BitImm, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (Arg rightArg = bitImm(right)) {
+ append(opcode, rightArg, tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (Arg leftArg = bitImm(left)) {
+ append(opcode, leftArg, tmp(right), result);
+ return;
+ }
+ }
+ }
+
+ if (isValidForm(opcode, Arg::BitImm64, Arg::Tmp, Arg::Tmp)) {
+ if (commutativity == Commutative) {
+ if (Arg rightArg = bitImm64(right)) {
+ append(opcode, rightArg, tmp(left), result);
+ return;
+ }
+ } else {
+ // A non-commutative operation could have an immediate in left.
+ if (Arg leftArg = bitImm64(left)) {
+ append(opcode, leftArg, tmp(right), result);
+ return;
+ }
+ }
+ }
+
if (imm(right) && isValidForm(opcode, Arg::Tmp, Arg::Imm, Arg::Tmp)) {
append(opcode, tmp(left), imm(right), result);
return;
@@ -928,10 +978,10 @@
if (imm(value.value()))
arg = imm(value.value());
else if (value.value()->hasInt64())
- arg = Arg::imm64(value.value()->asInt64());
+ arg = Arg::bigImm(value.value()->asInt64());
else if (value.value()->hasDouble() && canBeInternal(value.value())) {
commitInternal(value.value());
- arg = Arg::imm64(bitwise_cast<int64_t>(value.value()->asDouble()));
+ arg = Arg::bigImm(bitwise_cast<int64_t>(value.value()->asDouble()));
} else
arg = tmp(value.value());
break;
@@ -1934,7 +1984,7 @@
if (imm(m_value))
append(Move, imm(m_value), tmp(m_value));
else
- append(Move, Arg::imm64(m_value->asInt()), tmp(m_value));
+ append(Move, Arg::bigImm(m_value->asInt()), tmp(m_value));
return;
}
Modified: trunk/Source/_javascript_Core/b3/B3StackmapSpecial.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/B3StackmapSpecial.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/B3StackmapSpecial.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -207,7 +207,7 @@
switch (arg.kind()) {
case Arg::Tmp:
case Arg::Imm:
- case Arg::Imm64:
+ case Arg::BigImm:
break;
default:
if (!arg.isStackMemory())
@@ -255,7 +255,7 @@
return ValueRep::reg(arg.reg());
break;
case Arg::Imm:
- case Arg::Imm64:
+ case Arg::BigImm:
return ValueRep::constant(arg.value());
break;
case Arg::Addr:
Modified: trunk/Source/_javascript_Core/b3/air/AirArg.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/AirArg.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/AirArg.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -121,9 +121,15 @@
case Imm:
out.print("$", m_offset);
return;
- case Imm64:
+ case BigImm:
out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
return;
+ case BitImm:
+ out.print("$", m_offset);
+ return;
+ case BitImm64:
+ out.printf("$0x%llx", static_cast<long long unsigned>(m_offset));
+ return;
case Addr:
if (offset())
out.print(offset());
@@ -185,9 +191,15 @@
case Arg::Imm:
out.print("Imm");
return;
- case Arg::Imm64:
- out.print("Imm64");
+ case Arg::BigImm:
+ out.print("BigImm");
return;
+ case Arg::BitImm:
+ out.print("BitImm");
+ return;
+ case Arg::BitImm64:
+ out.print("BitImm64");
+ return;
case Arg::Addr:
out.print("Addr");
return;
Modified: trunk/Source/_javascript_Core/b3/air/AirArg.h (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/AirArg.h 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/AirArg.h 2016-02-18 01:38:40 UTC (rev 196736)
@@ -60,11 +60,14 @@
Tmp,
// This is an immediate that the instruction will materialize. Imm is the immediate that can be
- // inlined into most instructions, while Imm64 indicates a constant materialization and is
+ // inlined into most instructions, while BigImm indicates a constant materialization and is
// usually only usable with Move. Specials may also admit it, for example for stackmaps used for
// OSR exit and tail calls.
+ // BitImm is an immediate for Bitwise operation (And, Xor, etc).
Imm,
- Imm64,
+ BigImm,
+ BitImm,
+ BitImm64,
// These are the addresses. Instructions may load from (Use), store to (Def), or evaluate
// (UseAddr) addresses.
@@ -475,17 +478,33 @@
return result;
}
- static Arg imm64(int64_t value)
+ static Arg bigImm(int64_t value)
{
Arg result;
- result.m_kind = Imm64;
+ result.m_kind = BigImm;
result.m_offset = value;
return result;
}
+ static Arg bitImm(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BitImm;
+ result.m_offset = value;
+ return result;
+ }
+
+ static Arg bitImm64(int64_t value)
+ {
+ Arg result;
+ result.m_kind = BitImm64;
+ result.m_offset = value;
+ return result;
+ }
+
static Arg immPtr(const void* address)
{
- return imm64(bitwise_cast<intptr_t>(address));
+ return bigImm(bitwise_cast<intptr_t>(address));
}
static Arg addr(Air::Tmp base, int32_t offset = 0)
@@ -652,14 +671,24 @@
return kind() == Imm;
}
- bool isImm64() const
+ bool isBigImm() const
{
- return kind() == Imm64;
+ return kind() == BigImm;
}
+ bool isBitImm() const
+ {
+ return kind() == BitImm;
+ }
+
+ bool isBitImm64() const
+ {
+ return kind() == BitImm64;
+ }
+
bool isSomeImm() const
{
- return isImm() || isImm64();
+ return isImm() || isBigImm() || isBitImm() || isBitImm64();
}
bool isAddr() const
@@ -747,7 +776,7 @@
int64_t value() const
{
- ASSERT(kind() == Imm || kind() == Imm64);
+ ASSERT(isSomeImm());
return m_offset;
}
@@ -767,7 +796,7 @@
void* pointerValue() const
{
- ASSERT(kind() == Imm64);
+ ASSERT(kind() == BigImm);
return bitwise_cast<void*>(static_cast<intptr_t>(m_offset));
}
@@ -837,7 +866,9 @@
{
switch (kind()) {
case Imm:
- case Imm64:
+ case BigImm:
+ case BitImm:
+ case BitImm64:
case Addr:
case Index:
case Stack:
@@ -861,6 +892,8 @@
{
switch (kind()) {
case Imm:
+ case BitImm:
+ case BitImm64:
case RelCond:
case ResCond:
case DoubleCond:
@@ -872,7 +905,7 @@
case Index:
case Stack:
case CallArg:
- case Imm64: // Yes, we allow Imm64 as a double immediate. We use this for implementing stackmaps.
+ case BigImm: // Yes, we allow BigImm as a double immediate. We use this for implementing stackmaps.
return true;
case Tmp:
return isFPTmp();
@@ -884,6 +917,8 @@
{
switch (kind()) {
case Imm:
+ case BitImm:
+ case BitImm64:
case Special:
case Tmp:
return true;
@@ -994,6 +1029,24 @@
return false;
}
+ static bool isValidBitImmForm(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return ARM64LogicalImmediate::create32(value).isValid();
+ return false;
+ }
+
+ static bool isValidBitImm64Form(int64_t value)
+ {
+ if (isX86())
+ return B3::isRepresentableAs<int32_t>(value);
+ if (isARM64())
+ return ARM64LogicalImmediate::create64(value).isValid();
+ return false;
+ }
+
static bool isValidAddrForm(int32_t offset, Optional<Width> width = Nullopt)
{
if (isX86())
@@ -1042,8 +1095,12 @@
return true;
case Imm:
return isValidImmForm(value());
- case Imm64:
+ case BigImm:
return true;
+ case BitImm:
+ return isValidBitImmForm(value());
+ case BitImm64:
+ return isValidBitImm64Form(value());
case Addr:
case Stack:
case CallArg:
@@ -1119,14 +1176,14 @@
MacroAssembler::TrustedImm32 asTrustedImm32() const
{
- ASSERT(isImm());
+ ASSERT(isImm() || isBitImm());
return MacroAssembler::TrustedImm32(static_cast<int32_t>(m_offset));
}
#if USE(JSVALUE64)
MacroAssembler::TrustedImm64 asTrustedImm64() const
{
- ASSERT(isImm64());
+ ASSERT(isBigImm() || isBitImm64());
return MacroAssembler::TrustedImm64(value());
}
#endif
@@ -1134,7 +1191,7 @@
MacroAssembler::TrustedImmPtr asTrustedImmPtr() const
{
if (is64Bit())
- ASSERT(isImm64());
+ ASSERT(isBigImm());
else
ASSERT(isImm());
return MacroAssembler::TrustedImmPtr(pointerValue());
Modified: trunk/Source/_javascript_Core/b3/air/AirCCallSpecial.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/AirCCallSpecial.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/AirCCallSpecial.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -76,7 +76,7 @@
if (is32Bit())
break;
return false;
- case Arg::Imm64:
+ case Arg::BigImm:
if (is64Bit())
break;
return false;
@@ -125,7 +125,7 @@
{
switch (inst.args[calleeArgOffset].kind()) {
case Arg::Imm:
- case Arg::Imm64:
+ case Arg::BigImm:
jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister);
jit.call(scratchRegister);
break;
Modified: trunk/Source/_javascript_Core/b3/air/AirEmitShuffle.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/AirEmitShuffle.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/AirEmitShuffle.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -310,7 +310,7 @@
else {
ASSERT(pair.src().isSomeImm());
ASSERT(move == Move32);
- result.append(Inst(Move, origin, Arg::imm64(pair.src().value()), scratch));
+ result.append(Inst(Move, origin, Arg::bigImm(pair.src().value()), scratch));
}
result.append(Inst(moveForWidth(pair.width()), origin, scratch, pair.dst()));
returnScratch(scratchIndex, scratch);
Modified: trunk/Source/_javascript_Core/b3/air/AirFixObviousSpills.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/AirFixObviousSpills.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/AirFixObviousSpills.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -237,7 +237,7 @@
if (Arg::isValidImmForm(alias->constant))
arg = Arg::imm(alias->constant);
else
- arg = Arg::imm64(alias->constant);
+ arg = Arg::bigImm(alias->constant);
didThings = true;
return;
}
Modified: trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/AirOpcode.opcodes 2016-02-18 01:38:40 UTC (rev 196736)
@@ -39,7 +39,7 @@
# Argument kinds:
# Tmp => temporary or register
# Imm => 32-bit immediate int
-# Imm64 => TrustedImm64
+# BigImm => TrustedImm64
# Addr => address as temporary/register+offset
# Index => BaseIndex address
# Abs => AbsoluteAddress
@@ -270,6 +270,7 @@
And32 U:G:32, U:G:32, ZD:G:32
Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
x86: Tmp, Addr, Tmp
x86: Addr, Tmp, Tmp
@@ -282,6 +283,7 @@
64: And64 U:G:64, U:G:64, D:G:64
Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
x86_64: And64 U:G:64, UD:G:64
Tmp, Tmp
@@ -361,6 +363,7 @@
Or32 U:G:32, U:G:32, ZD:G:32
Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
x86: Tmp, Addr, Tmp
x86: Addr, Tmp, Tmp
@@ -373,6 +376,7 @@
64: Or64 U:G:64, U:G:64, D:G:64
Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
64: Or64 U:G:64, UD:G:64
Tmp, Tmp
@@ -380,6 +384,7 @@
Xor32 U:G:32, U:G:32, ZD:G:32
Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
x86: Tmp, Addr, Tmp
x86: Addr, Tmp, Tmp
@@ -392,6 +397,7 @@
64: Xor64 U:G:64, U:G:64, D:G:64
Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
64: Xor64 U:G:64, UD:G:64
Tmp, Tmp
@@ -464,7 +470,7 @@
Move U:G:Ptr, D:G:Ptr
Tmp, Tmp
Imm, Tmp as signExtend32ToPtr
- Imm64, Tmp
+ BigImm, Tmp
Addr, Tmp as loadPtr # This means that "Move Addr, Tmp" is code-generated as "load" not "move".
Index, Tmp as loadPtr
Tmp, Addr as storePtr
Modified: trunk/Source/_javascript_Core/b3/air/opcode_generator.rb (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/opcode_generator.rb 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/opcode_generator.rb 2016-02-18 01:38:40 UTC (rev 196736)
@@ -190,7 +190,7 @@
end
def isKind(token)
- token =~ /\A((Tmp)|(Imm)|(Imm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
+ token =~ /\A((Tmp)|(Imm)|(BigImm)|(BitImm)|(BitImm64)|(Addr)|(Index)|(RelCond)|(ResCond)|(DoubleCond))\Z/
end
def isArch(token)
@@ -264,7 +264,7 @@
def consumeKind
result = token.string
- parseError("Expected kind (Imm, Imm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
+ parseError("Expected kind (Imm, BigImm, BitImm, BitImm64, Tmp, Addr, Index, RelCond, ResCond, or DoubleCond)") unless isKind(result)
advance
result
end
@@ -426,7 +426,7 @@
parseError("Form has wrong number of arguments for overload") unless kinds.length == signature.length
kinds.each_with_index {
| kind, index |
- if kind.name == "Imm" or kind.name == "Imm64"
+ if kind.name == "Imm" or kind.name == "BigImm" or kind.name == "BitImm" or kind.name == "BitImm64"
if signature[index].role != "U"
parseError("Form has an immediate for a non-use argument")
end
@@ -530,14 +530,14 @@
outp.puts "switch (#{columnGetter[columnIndex]}) {"
groups.each_pair {
| key, value |
- outp.puts "#if USE(JSVALUE64)" if key == "Imm64"
+ outp.puts "#if USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
Kind.argKinds(key).each {
| argKind |
outp.puts "case Arg::#{argKind}:"
}
matchForms(outp, speed, value, columnIndex + 1, columnGetter, filter, callback)
outp.puts "break;"
- outp.puts "#endif // USE(JSVALUE64)" if key == "Imm64"
+ outp.puts "#endif // USE(JSVALUE64)" if key == "BigImm" or key == "BitImm64"
}
outp.puts "default:"
outp.puts "break;"
@@ -798,6 +798,12 @@
when "Imm"
outp.puts "if (!Arg::isValidImmForm(args[#{index}].value()))"
outp.puts "OPGEN_RETURN(false);"
+ when "BitImm"
+ outp.puts "if (!Arg::isValidBitImmForm(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
+ when "BitImm64"
+ outp.puts "if (!Arg::isValidBitImm64Form(args[#{index}].value()))"
+ outp.puts "OPGEN_RETURN(false);"
when "Addr"
if arg.role == "UA"
outp.puts "if (args[#{index}].isStack() && args[#{index}].stackSlot()->isSpill())"
@@ -809,7 +815,7 @@
when "Index"
outp.puts "if (!Arg::isValidIndexForm(args[#{index}].scale(), args[#{index}].offset(), #{arg.widthCode}))"
outp.puts "OPGEN_RETURN(false);"
- when "Imm64"
+ when "BigImm"
when "RelCond"
when "ResCond"
when "DoubleCond"
@@ -1055,9 +1061,9 @@
else
outp.print "args[#{index}].fpr()"
end
- when "Imm"
+ when "Imm", "BitImm"
outp.print "args[#{index}].asTrustedImm32()"
- when "Imm64"
+ when "BigImm", "BitImm64"
outp.print "args[#{index}].asTrustedImm64()"
when "Addr"
outp.print "args[#{index}].asAddress()"
Modified: trunk/Source/_javascript_Core/b3/air/testair.cpp (196735 => 196736)
--- trunk/Source/_javascript_Core/b3/air/testair.cpp 2016-02-18 01:25:21 UTC (rev 196735)
+++ trunk/Source/_javascript_Core/b3/air/testair.cpp 2016-02-18 01:38:40 UTC (rev 196736)
@@ -126,7 +126,7 @@
(*map)[value] = new T(value);
T* ptr = (*map)[value];
- block->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(ptr)), scratch);
+ block->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(ptr)), scratch);
block->append(move, nullptr, Arg::addr(scratch), tmp);
}
@@ -157,7 +157,7 @@
int32_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -192,7 +192,7 @@
int32_t things[5];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -238,7 +238,7 @@
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -290,7 +290,7 @@
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -334,7 +334,7 @@
int32_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -370,7 +370,7 @@
int32_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -414,7 +414,7 @@
Vector<int32_t> things(regs.size(), 666);
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
@@ -455,7 +455,7 @@
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -507,7 +507,7 @@
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -562,7 +562,7 @@
int32_t things[8];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -609,7 +609,7 @@
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -654,7 +654,7 @@
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -699,7 +699,7 @@
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -744,7 +744,7 @@
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -789,7 +789,7 @@
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -833,7 +833,7 @@
int32_t things[6];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -875,7 +875,7 @@
Vector<int32_t> things(regs.size(), 666);
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
@@ -912,7 +912,7 @@
Vector<int32_t> things(regs.size(), 666);
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), base);
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(Move32, nullptr, Arg::stack(slot, i * sizeof(int32_t)), Tmp(GPRInfo::regT0));
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, i * sizeof(int32_t)));
@@ -945,7 +945,7 @@
int64_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
@@ -981,7 +981,7 @@
int64_t things[5];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
@@ -1018,7 +1018,7 @@
int64_t things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
@@ -1054,7 +1054,7 @@
int64_t things[5];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int64_t)));
@@ -1095,7 +1095,7 @@
int32_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
@@ -1141,7 +1141,7 @@
int32_t things[3];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT2), Arg::addr(base, 2 * sizeof(int32_t)));
@@ -1191,7 +1191,7 @@
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width32));
Vector<int32_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move32, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int32_t)));
@@ -1241,7 +1241,7 @@
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
@@ -1305,7 +1305,7 @@
}
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
@@ -1354,7 +1354,7 @@
int32_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move32, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int32_t)));
root->append(Move32, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int32_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
@@ -1399,7 +1399,7 @@
int64_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
@@ -1444,7 +1444,7 @@
int64_t things[2];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(Move, nullptr, Tmp(GPRInfo::regT0), Arg::addr(base, 0 * sizeof(int64_t)));
root->append(Move, nullptr, Tmp(GPRInfo::regT1), Arg::addr(base, 1 * sizeof(int64_t)));
root->append(Move, nullptr, Arg::imm(0), Tmp(GPRInfo::returnValueGPR));
@@ -1494,7 +1494,7 @@
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
@@ -1546,7 +1546,7 @@
shuffle.append(Tmp(regs[i - 1]), Tmp(regs[i]), Arg::widthArg(Arg::Width64));
Vector<int64_t> things(regs.size(), 666);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things[0])), Tmp(GPRInfo::regT0));
for (unsigned i = 0; i < regs.size(); ++i) {
root->append(
Move, nullptr, Tmp(regs[i]), Arg::addr(Tmp(GPRInfo::regT0), i * sizeof(int64_t)));
@@ -1581,7 +1581,7 @@
double things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
@@ -1615,7 +1615,7 @@
double things[4];
Tmp base = code.newTmp(Arg::GP);
- root->append(Move, nullptr, Arg::imm64(bitwise_cast<intptr_t>(&things)), base);
+ root->append(Move, nullptr, Arg::bigImm(bitwise_cast<intptr_t>(&things)), base);
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT0), Arg::addr(base, 0 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT1), Arg::addr(base, 1 * sizeof(double)));
root->append(MoveDouble, nullptr, Tmp(FPRInfo::fpRegT2), Arg::addr(base, 2 * sizeof(double)));
_______________________________________________ webkit-changes mailing list [email protected] https://lists.webkit.org/mailman/listinfo/webkit-changes
