When I'm working on PR112443, I notice there's some misoptimizations: after we
fold _mm{,256}_blendv_epi8/pd/ps into gimple, the backend fails to combine it
back to v{,p}blendv{v,ps,pd} since the pattern is too complicated, so I think
maybe we should hanlde it in the gimple level.

The dump is like

  _1 = c_3(D) >= { 0, 0, 0, 0 };
  _2 = VEC_COND_EXPR <_1, { -1, -1, -1, -1 }, { 0, 0, 0, 0 }>;
  _7 = VIEW_CONVERT_EXPR<vector(32) char>(_2);
  _8 = VIEW_CONVERT_EXPR<vector(32) char>(b_6(D));
  _9 = VIEW_CONVERT_EXPR<vector(32) char>(a_5(D));
  _10 = _7 < { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  _11 = VEC_COND_EXPR <_10, _8, _9>;


It can be optimized to

  _6 = VIEW_CONVERT_EXPR<vector(32) char>(b_4(D));
  _7 = VIEW_CONVERT_EXPR<vector(32) char>(a_3(D));
  _10 = VIEW_CONVERT_EXPR<vector(32) char>(c_1(D));
  _5 = _10 >= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
  _8 = VEC_COND_EXPR <_5, _6, _7>;
  _9 = VIEW_CONVERT_EXPR<__m256i>(_8);

since _7 is either -1 or 0, _7 < 0 should is euqal to _1 = c_3(D) > { 0, 0, 0, 
0 };
The patch add a gimple pattern to handle that.

Bootstrapped and regtested on x86_64-pc-linux-gnu{-m32,}
Ok for trunk?

gcc/ChangeLog:

        * match.pd (VCE:(a cmp b ? -1 : 0) < 0) ? c : d ---> (VCE:a cmp
        VCE:b) ? c : d): New gimple simplication.

gcc/testsuite/ChangeLog:

        * gcc.target/i386/avx512vl-blendv-3.c: New test.
        * gcc.target/i386/blendv-3.c: New test.
---
 gcc/match.pd                                  | 17 +++++++
 .../gcc.target/i386/avx512vl-blendv-3.c       |  6 +++
 gcc/testsuite/gcc.target/i386/blendv-3.c      | 46 +++++++++++++++++++
 3 files changed, 69 insertions(+)
 create mode 100644 gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
 create mode 100644 gcc/testsuite/gcc.target/i386/blendv-3.c

diff --git a/gcc/match.pd b/gcc/match.pd
index dbc811b2b38..e6f9c4fa1fd 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -5170,6 +5170,23 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
  (if (optimize_vectors_before_lowering_p () && types_match (@0, @3))
   (vec_cond (bit_and @0 (bit_not @3)) @2 @1)))
 
+(for cmp (simple_comparison)
+ (simplify
+  (vec_cond
+    (lt@4 (view_convert?@5 (vec_cond (cmp @0 @1)
+                                integer_all_onesp
+                                integer_zerop))
+         integer_zerop) @2 @3)
+  (if (VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0))
+       && VECTOR_INTEGER_TYPE_P (TREE_TYPE (@5))
+       && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (TREE_TYPE (@5))
+       && VECTOR_TYPE_P (type))
+   (with {
+          tree itype = TREE_TYPE (@5);
+          tree vbtype = TREE_TYPE (@4);}
+     (vec_cond (cmp:vbtype (view_convert:itype @0)
+                          (view_convert:itype @1)) @2 @3)))))
+
 /* c1 ? c2 ? a : b : b  -->  (c1 & c2) ? a : b  */
 (simplify
  (vec_cond @0 (vec_cond:s @1 @2 @3) @3)
diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c 
b/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
new file mode 100644
index 00000000000..2777e72ab5f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/avx512vl-blendv-3.c
@@ -0,0 +1,6 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx512vl -mavx512bw -O2" } */
+/* { dg-final { scan-assembler-times {vp?blendv(?:b|p[sd])[ \t]*} 6 } } */
+/* { dg-final { scan-assembler-not {vpcmp} } } */
+
+#include "blendv-3.c"
diff --git a/gcc/testsuite/gcc.target/i386/blendv-3.c 
b/gcc/testsuite/gcc.target/i386/blendv-3.c
new file mode 100644
index 00000000000..fa0fb067a73
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/blendv-3.c
@@ -0,0 +1,46 @@
+/* { dg-do compile } */
+/* { dg-options "-mavx2 -O2" } */
+/* { dg-final { scan-assembler-times {vp?blendv(?:b|p[sd])[ \t]*} 6 } } */
+/* { dg-final { scan-assembler-not {vpcmp} } } */
+
+#include <immintrin.h>
+
+__m256i
+foo (__m256i a, __m256i b, __m256i c)
+{
+  return _mm256_blendv_epi8 (a, b, ~c < 0);
+}
+
+__m256d
+foo1 (__m256d a, __m256d b, __m256i c)
+{
+  __m256i d = ~c < 0;
+  return _mm256_blendv_pd (a, b, (__m256d)d);
+}
+
+__m256
+foo2 (__m256 a, __m256 b, __m256i c)
+{
+  __m256i d = ~c < 0;
+  return _mm256_blendv_ps (a, b, (__m256)d);
+}
+
+__m128i
+foo4 (__m128i a, __m128i b, __m128i c)
+{
+  return _mm_blendv_epi8 (a, b, ~c < 0);
+}
+
+__m128d
+foo5 (__m128d a, __m128d b, __m128i c)
+{
+  __m128i d = ~c < 0;
+  return _mm_blendv_pd (a, b, (__m128d)d);
+}
+
+__m128
+foo6 (__m128 a, __m128 b, __m128i c)
+{
+  __m128i d = ~c < 0;
+  return _mm_blendv_ps (a, b, (__m128)d);
+}
-- 
2.31.1

Reply via email to