The following changes the record_stmt_cost calls in
vectorizable_load/store to only pass the SLP node when costing
vector stmts. For now we'll still pass the stmt_vec_info,
determined from SLP_TREE_REPRESENTATIVE, so this merely cleans up
the API.
Bootstrap and regtest running on x86_64-unknown-linux-gnu.
* tree-vectorizer.h (record_stmt_cost): Remove mixed
stmt_vec_info/SLP node inline overload.
* tree-vect-stmts.cc (vectorizable_store): For costing
vector stmts only pass SLP node to record_stmt_cost.
(vectorizable_load): Likewise.
---
gcc/tree-vect-stmts.cc | 41 ++++++++++++++++-------------------------
gcc/tree-vectorizer.h | 13 -------------
2 files changed, 16 insertions(+), 38 deletions(-)
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 7075948a19a..b190473c258 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -8680,7 +8680,7 @@ vectorizable_store (vec_info *vinfo,
}
else if (vls_type != VLS_STORE_INVARIANT)
return;
- *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info,
+ *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
slp_node, 0, vect_prologue);
};
@@ -8989,8 +8989,7 @@ vectorizable_store (vec_info *vinfo,
if (nstores > 1)
inside_cost
+= record_stmt_cost (cost_vec, n_adjacent_stores,
- vec_to_scalar, stmt_info, slp_node,
- 0, vect_body);
+ vec_to_scalar, slp_node, 0, vect_body);
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
@@ -9327,8 +9326,7 @@ vectorizable_store (vec_info *vinfo,
{
if (costing_p && vls_type == VLS_STORE_INVARIANT)
prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
- stmt_info, slp_node, 0,
- vect_prologue);
+ slp_node, 0, vect_prologue);
else if (!costing_p)
{
/* Since the store is not grouped, DR_GROUP_SIZE is 1, and
@@ -9578,11 +9576,11 @@ vectorizable_store (vec_info *vinfo,
consumed by the load). */
inside_cost
+= record_stmt_cost (cost_vec, cnunits, vec_to_scalar,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
/* N scalar stores plus extracting the elements. */
inside_cost
+= record_stmt_cost (cost_vec, cnunits, vec_to_scalar,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
inside_cost
+= record_stmt_cost (cost_vec, cnunits, scalar_store,
stmt_info, 0, vect_body);
@@ -9779,8 +9777,7 @@ vectorizable_store (vec_info *vinfo,
int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: "
@@ -9809,8 +9806,7 @@ vectorizable_store (vec_info *vinfo,
{
if (costing_p)
inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
else
{
tree perm_mask = perm_mask_for_reverse (vectype);
@@ -10029,7 +10025,7 @@ vectorizable_store (vec_info *vinfo,
/* Spill. */
prologue_cost
+= record_stmt_cost (cost_vec, ncopies, vector_store,
- stmt_info, slp_node, 0, vect_epilogue);
+ slp_node, 0, vect_epilogue);
/* Loads. */
prologue_cost
+= record_stmt_cost (cost_vec, ncopies * nregs, scalar_load,
@@ -10607,7 +10603,7 @@ vectorizable_load (vec_info *vinfo,
= hoist_p ? vect_prologue : vect_body;
unsigned int cost = record_stmt_cost (cost_vec, 1, scalar_load,
stmt_info, 0, cost_loc);
- cost += record_stmt_cost (cost_vec, 1, scalar_to_vec, stmt_info,
+ cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
slp_node, 0, cost_loc);
unsigned int prologue_cost = hoist_p ? cost : 0;
unsigned int inside_cost = hoist_p ? 0 : cost;
@@ -10911,8 +10907,7 @@ vectorizable_load (vec_info *vinfo,
{
if (costing_p)
inside_cost += record_stmt_cost (cost_vec, 1, vec_construct,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
else
{
tree vec_inv = build_constructor (lvectype, v);
@@ -10967,8 +10962,7 @@ vectorizable_load (vec_info *vinfo,
vect_transform_slp_perm_load (vinfo, slp_node, vNULL, NULL, vf,
true, &n_perms, &n_loads);
inside_cost += record_stmt_cost (cost_vec, n_perms, vec_perm,
- first_stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
}
else
vect_transform_slp_perm_load (vinfo, slp_node, dr_chain, gsi, vf,
@@ -11743,7 +11737,7 @@ vectorizable_load (vec_info *vinfo,
/* For emulated gathers N offset vector element
offset add is consumed by the load). */
inside_cost = record_stmt_cost (cost_vec, const_nunits,
- vec_to_scalar, stmt_info,
+ vec_to_scalar,
slp_node, 0, vect_body);
/* N scalar loads plus gathering them into a
vector. */
@@ -11752,7 +11746,7 @@ vectorizable_load (vec_info *vinfo,
stmt_info, 0, vect_body);
inside_cost
= record_stmt_cost (cost_vec, 1, vec_construct,
- stmt_info, slp_node, 0, vect_body);
+ slp_node, 0, vect_body);
continue;
}
unsigned HOST_WIDE_INT const_offset_nunits
@@ -12413,8 +12407,7 @@ vectorizable_load (vec_info *vinfo,
{
if (costing_p)
inside_cost = record_stmt_cost (cost_vec, 1, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
else
{
tree perm_mask = perm_mask_for_reverse (vectype);
@@ -12483,8 +12476,7 @@ vectorizable_load (vec_info *vinfo,
vect_transform_slp_perm_load (vinfo, slp_node, vNULL, nullptr, vf,
true, &n_perms, nullptr);
inside_cost = record_stmt_cost (cost_vec, n_perms, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
}
else
{
@@ -12511,8 +12503,7 @@ vectorizable_load (vec_info *vinfo,
int group_size = DR_GROUP_SIZE (first_stmt_info);
int nstmts = ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
- stmt_info, slp_node, 0,
- vect_body);
+ slp_node, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
index 118200ff4a8..7aa2b02b63c 100644
--- a/gcc/tree-vectorizer.h
+++ b/gcc/tree-vectorizer.h
@@ -2441,19 +2441,6 @@ record_stmt_cost (stmt_vector_for_cost *body_cost_vec,
int count,
STMT_VINFO_VECTYPE (stmt_info), misalign, where);
}
-/* Overload of record_stmt_cost with VECTYPE derived from STMT_INFO and
- SLP node specified. */
-
-inline unsigned
-record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
- enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
- slp_tree node,
- int misalign, enum vect_cost_model_location where)
-{
- return record_stmt_cost (body_cost_vec, count, kind, stmt_info, node,
- STMT_VINFO_VECTYPE (stmt_info), misalign, where);
-}
-
/* Overload of record_stmt_cost with VECTYPE derived from SLP node. */
inline unsigned
--
2.43.0