Hi,
Currently we allow vector types in scalar conditional reductions by
accident (via the GNU vector extension). This patch prevents that.
Bootstrapped and regtested on x86, power10, and aarch64.
Regtested on riscv64.
Regards
Robin
PR tree-optimization/123301
gcc/ChangeLog:
* tree-if-conv.cc (convert_scalar_cond_reduction):
Disallow vector types.
gcc/testsuite/ChangeLog:
* gcc.target/riscv/rvv/autovec/pr123301.c: New test.
---
.../gcc.target/riscv/rvv/autovec/pr123301.c | 45 +++++++++++++++++++
gcc/tree-if-conv.cc | 1 +
2 files changed, 46 insertions(+)
create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123301.c
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123301.c
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123301.c
new file mode 100644
index 00000000000..db4b8a43ac1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr123301.c
@@ -0,0 +1,45 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=rv64gcv -mabi=lp64d" } */
+
+#define BS_VEC(type, num) type __attribute__((vector_size(num * sizeof(type))))
+#define BITCAST(T, F, arg)
\
+ ((union {
\
+ F src;
\
+ T dst;
\
+ })arg)
\
+ .dst
+#include <riscv_bitmanip.h>
+BS_VEC(uint64_t, 2)
+backsmith_snippet_423(BS_VEC(int16_t, 2), BS_VEC(int32_t, 8), uint8_t)
+{}
+uint32_t backsmith_pure_1(BS_VEC(uint32_t, 2) BS_ARG_2, int8_t BS_ARG_3)
+{
+ BS_VEC(uint64_t, 4) BS_VAR_0;
+ int32_t BS_VAR_4;
+ uint64_t BS_TEMP_105 = 8;
+ for (uint64_t BS_INC_0 = 0; BS_INC_0 < BS_TEMP_105; BS_INC_0 += 1)
+ if (BS_ARG_2[1])
+ {
+ BS_VAR_4 = BS_INC_0;
+ BS_VEC(uint32_t, 2)
+ BS_TEMP_107 = __builtin_convertvector(
+ (BS_VEC(int32_t, 2)){ BS_VAR_4, BS_VAR_4 },
+ BS_VEC(uint32_t, 2));
+ BS_VEC(uint32_t, 2)
+ BS_TEMP_108 = __builtin_convertvector(
+ (BS_VEC(int8_t, 2)){ BS_ARG_3 }, BS_VEC(uint32_t, 2));
+ if (BITCAST(uint64_t, BS_VEC(uint32_t, 2),
+ ((BS_VEC(uint32_t, 2)){
+ BS_TEMP_107[0] ? BS_TEMP_108[0] : 0,
+ BS_TEMP_107[1] ? BS_TEMP_108[1] : 0 }))
+ < backsmith_snippet_423(
+ __builtin_convertvector((BS_VEC(uint64_t, 2)){},
+ BS_VEC(int16_t, 2)),
+ (BS_VEC(int32_t, 8)){}, 0)[1])
+ BS_VAR_0 |= __builtin_convertvector(
+ (BS_VEC(int32_t, 4)){ BS_VAR_4 }, BS_VEC(uint64_t, 4));
+ }
+ if (BS_VAR_0[0])
+ for (;;)
+ ;
+}
diff --git a/gcc/tree-if-conv.cc b/gcc/tree-if-conv.cc
index c8f7b8453d8..51fbcc128c6 100644
--- a/gcc/tree-if-conv.cc
+++ b/gcc/tree-if-conv.cc
@@ -1993,6 +1993,7 @@ convert_scalar_cond_reduction (gimple *reduc,
gimple_stmt_iterator *gsi,
ifn = get_conditional_internal_fn (reduction_op);
if (loop_versioned && ifn != IFN_LAST
&& vectorized_internal_fn_supported_p (ifn, TREE_TYPE (lhs))
+ && !VECTOR_TYPE_P (TREE_TYPE (lhs))
&& !swap)
{
gcall *cond_call = gimple_build_call_internal (ifn, 4,
--
2.52.0