This was responsible for a bunch of SVE FAILs with --param vect-force-slp=1
Bootstrap and regtest running on x86_64-unknown-linux-gnu.
* tree-vect-slp.cc (arg1_arg3_map): New.
(arg1_arg3_arg4_map): Likewise.
(vect_get_operand_map): Handle IFN_SCATTER_STORE,
IFN_MASK_SCATTER_STORE and IFN_MASK_LEN_SCATTER_STORE.
(vect_build_slp_tree_1): Likewise.
* tree-vect-stmts.cc (vectorizable_store): For SLP masked
gather/scatter record the mask with proper number of copies.
---
gcc/tree-vect-slp.cc | 17 ++++++++++++++++-
gcc/tree-vect-stmts.cc | 6 ++++--
2 files changed, 20 insertions(+), 3 deletions(-)
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 8e4ad05e2a4..eebac1955de 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -512,7 +512,9 @@ static const int no_arg_map[] = { 0 };
static const int arg0_map[] = { 1, 0 };
static const int arg1_map[] = { 1, 1 };
static const int arg2_map[] = { 1, 2 };
+static const int arg1_arg3_map[] = { 2, 1, 3 };
static const int arg1_arg4_map[] = { 2, 1, 4 };
+static const int arg1_arg3_arg4_map[] = { 3, 1, 3, 4 };
static const int arg3_arg2_map[] = { 2, 3, 2 };
static const int op1_op0_map[] = { 2, 1, 0 };
static const int off_map[] = { 1, -3 };
@@ -573,6 +575,13 @@ vect_get_operand_map (const gimple *stmt, bool
gather_scatter_p = false,
case IFN_MASK_LEN_GATHER_LOAD:
return arg1_arg4_map;
+ case IFN_SCATTER_STORE:
+ return arg1_arg3_map;
+
+ case IFN_MASK_SCATTER_STORE:
+ case IFN_MASK_LEN_SCATTER_STORE:
+ return arg1_arg3_arg4_map;
+
case IFN_MASK_STORE:
return gather_scatter_p ? off_arg3_arg2_map : arg3_arg2_map;
@@ -1187,7 +1196,10 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char
*swap,
if (cfn == CFN_MASK_LOAD
|| cfn == CFN_GATHER_LOAD
|| cfn == CFN_MASK_GATHER_LOAD
- || cfn == CFN_MASK_LEN_GATHER_LOAD)
+ || cfn == CFN_MASK_LEN_GATHER_LOAD
+ || cfn == CFN_SCATTER_STORE
+ || cfn == CFN_MASK_SCATTER_STORE
+ || cfn == CFN_MASK_LEN_SCATTER_STORE)
ldst_p = true;
else if (cfn == CFN_MASK_STORE)
{
@@ -1473,6 +1485,9 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char
*swap,
&& rhs_code != CFN_GATHER_LOAD
&& rhs_code != CFN_MASK_GATHER_LOAD
&& rhs_code != CFN_MASK_LEN_GATHER_LOAD
+ && rhs_code != CFN_SCATTER_STORE
+ && rhs_code != CFN_MASK_SCATTER_STORE
+ && rhs_code != CFN_MASK_LEN_SCATTER_STORE
&& !STMT_VINFO_GATHER_SCATTER_P (stmt_info)
/* Not grouped loads are handled as externals for BB
vectorization. For loop vectorization we can handle
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 28bfd8f4e28..f77a223b0c4 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -9163,7 +9163,8 @@ vectorizable_store (vec_info *vinfo,
{
if (loop_masks)
final_mask = vect_get_loop_mask (loop_vinfo, gsi,
- loop_masks, ncopies,
+ loop_masks,
+ ncopies * vec_num,
vectype, j);
if (vec_mask)
final_mask = prepare_vec_mask (loop_vinfo, mask_vectype,
@@ -9189,7 +9190,8 @@ vectorizable_store (vec_info *vinfo,
{
if (loop_lens)
final_len = vect_get_loop_len (loop_vinfo, gsi,
- loop_lens, ncopies,
+ loop_lens,
+ ncopies * vec_num,
vectype, j, 1);
else
final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype));
--
2.43.0