Hi,
The ashl/lshr/ashr expanders calls ix86_expand_binary_operator, while
they will be called for some post-reload split, and TARGET_APX_NDD is
required for these calls to avoid force-load to memory at postreload
stage.
Bootstrapped/regtested on x86-64-pc-linux-gnu{-m32,}
Ok for master?
gcc/ChangeLog:
PR target/112943
* config/i386/i386.md (ashl3): Add TARGET_APX_NDD to
ix86_expand_binary_operator call.
(3): Likewise for rshift.
(di3): Likewise for DImode rotate.
(3): Likewise for SWI124 rotate.
gcc/testsuite/ChangeLog:
PR target/112943
* gcc.target/i386/pr112943.c: New test.
---
gcc/config/i386/i386.md | 12 +++--
gcc/testsuite/gcc.target/i386/pr112943.c | 63
2 files changed, 71 insertions(+), 4 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/i386/pr112943.c
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index b4db50f61cd..f83064ec335 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -14308,7 +14308,8 @@ (define_expand "ashl3"
(ashift:SDWIM (match_operand:SDWIM 1 "")
(match_operand:QI 2 "nonmemory_operand")))]
""
- "ix86_expand_binary_operator (ASHIFT, mode, operands); DONE;")
+ "ix86_expand_binary_operator (ASHIFT, mode, operands,
+ TARGET_APX_NDD); DONE;")
(define_insn_and_split "*ashl3_doubleword_mask"
[(set (match_operand: 0 "register_operand")
@@ -15564,7 +15565,8 @@ (define_expand "3"
(any_shiftrt:SDWIM (match_operand:SDWIM 1 "")
(match_operand:QI 2 "nonmemory_operand")))]
""
- "ix86_expand_binary_operator (, mode, operands); DONE;")
+ "ix86_expand_binary_operator (, mode, operands,
+ TARGET_APX_NDD); DONE;")
;; Avoid useless masking of count operand.
(define_insn_and_split "*3_mask"
@@ -16791,7 +16793,8 @@ (define_expand "di3"
""
{
if (TARGET_64BIT)
-ix86_expand_binary_operator (, DImode, operands);
+ix86_expand_binary_operator (, DImode, operands,
+TARGET_APX_NDD);
else if (const_1_to_31_operand (operands[2], VOIDmode))
emit_insn (gen_ix86_di3_doubleword
(operands[0], operands[1], operands[2]));
@@ -16811,7 +16814,8 @@ (define_expand "3"
(any_rotate:SWIM124 (match_operand:SWIM124 1 "nonimmediate_operand")
(match_operand:QI 2 "nonmemory_operand")))]
""
- "ix86_expand_binary_operator (, mode, operands); DONE;")
+ "ix86_expand_binary_operator (, mode, operands,
+ TARGET_APX_NDD); DONE;")
;; Avoid useless masking of count operand.
(define_insn_and_split "*3_mask"
diff --git a/gcc/testsuite/gcc.target/i386/pr112943.c
b/gcc/testsuite/gcc.target/i386/pr112943.c
new file mode 100644
index 000..45da6cce5b7
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr112943.c
@@ -0,0 +1,63 @@
+/* PR target/112943 */
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -march=westmere -mapxf" } */
+
+typedef unsigned char __attribute__((__vector_size__(1))) v8u8;
+typedef char __attribute__((__vector_size__(2))) v16u8;
+typedef char __attribute__((__vector_size__(4))) v32u8;
+typedef char __attribute__((__vector_size__(8))) v64u8;
+typedef char __attribute__((__vector_size__(16))) v128u8;
+typedef _Float16 __attribute__((__vector_size__(2))) v16f16;
+typedef _Float16 __attribute__((__vector_size__(16))) v128f16;
+typedef _Float64x __attribute__((__vector_size__(16))) v128f128;
+typedef _Decimal64 d64;
+char foo0_u8_0;
+v8u8 foo0_v8u8_0;
+__attribute__((__vector_size__(sizeof(char char foo0_v8s8_0;
+__attribute__((__vector_size__(sizeof(long unsigned long v64u64_0;
+_Float16 foo0_f16_0;
+v128f16 foo0_v128f16_0;
+double foo0_f64_0;
+int foo0_f128_0, foo0_v32d32_0, foo0__0;
+d64 foo0_d64_0;
+v8u8 *foo0_ret;
+unsigned __int128 foo0_u128_3;
+v8u8 d;
+void foo0() {
+v64u64_0 -= foo0_u8_0;
+v8u8 v8u8_1 = foo0_v8u8_0 % d;
+v128f128 v128f128_1 = __builtin_convertvector(v64u64_0, v128f128);
+__int128 u128_2 = (9223372036854775808 << 4) * foo0_u8_0; /* { dg-warning
"integer constant is so large that it is unsigned" "so large" } */
+__int128 u128_r = u128_2 + foo0_u128_3 + foo0_f128_0 +
(__int128)foo0_d64_0;
+v16f16 v16f16_1 = __builtin_convertvector(foo0_v8s8_0, v16f16);
+v128f16 v128f16_1 = 0 > foo0_v128f16_0;
+v128u8 v128u8_r = (v128u8)v128f16_1 + (v128u8)v128f128_1;
+v64u8 v64u8_r = ((union {
+ v128u8 a;
+ v64u8 b;
+ })v128u8_r)
+.b +
+ (v64u8)v64u64_0;
+v32u8 v32u8_r = ((union {
+ v64u8 a;
+ v32u8 b;
+ })v64u8_r)
+.b +
+ (v32u8)foo0_v32d32_0;
+v16u8 v16u8_r = ((union {
+ v32u8 a;
+ v16u8 b;
+ })v32u8_r)
+