This cleans the rest of vectorizable_load from non-SLP

        * tree-vect-stmts.cc (vectorizable_load): Step 1.
---
 gcc/tree-vect-stmts.cc | 62 +++++++++++++++++++++---------------------
 1 file changed, 31 insertions(+), 31 deletions(-)

diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index f699d808e68..92739903754 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -9850,7 +9850,7 @@ vectorizable_load (vec_info *vinfo,
   bool compute_in_loop = false;
   class loop *at_loop;
   int vec_num;
-  bool slp = (slp_node != NULL);
+  bool slp = true;
   bool slp_perm = false;
   bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
   poly_uint64 vf;
@@ -9909,7 +9909,7 @@ vectorizable_load (vec_info *vinfo,
        return false;
 
       mask_index = internal_fn_mask_index (ifn);
-      if (mask_index >= 0 && slp_node)
+      if (mask_index >= 0 && 1)
        mask_index = vect_slp_child_index_for_operand
                    (call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
       if (mask_index >= 0
@@ -9918,7 +9918,7 @@ vectorizable_load (vec_info *vinfo,
        return false;
 
       els_index = internal_fn_else_index (ifn);
-      if (els_index >= 0 && slp_node)
+      if (els_index >= 0 && 1)
        els_index = vect_slp_child_index_for_operand
          (call, els_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
       if (els_index >= 0
@@ -9942,7 +9942,7 @@ vectorizable_load (vec_info *vinfo,
   /* Multiple types in SLP are handled by creating the appropriate number of
      vectorized stmts for each SLP node.  Hence, NCOPIES is always 1 in
      case of SLP.  */
-  if (slp)
+  if (1)
     ncopies = 1;
   else
     ncopies = vect_get_num_copies (loop_vinfo, vectype);
@@ -9951,7 +9951,7 @@ vectorizable_load (vec_info *vinfo,
 
   /* FORNOW. This restriction should be relaxed.  */
   if (nested_in_vect_loop
-      && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
+      && (ncopies > 1 || (1 && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
     {
       if (dump_enabled_p ())
         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -9998,7 +9998,7 @@ vectorizable_load (vec_info *vinfo,
       group_size = DR_GROUP_SIZE (first_stmt_info);
 
       /* Refuse non-SLP vectorization of SLP-only groups.  */
-      if (!slp && STMT_VINFO_SLP_VECT_ONLY (first_stmt_info))
+      if (0 && STMT_VINFO_SLP_VECT_ONLY (first_stmt_info))
        {
          if (dump_enabled_p ())
            dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -10046,7 +10046,7 @@ vectorizable_load (vec_info *vinfo,
 
   /* ???  The following checks should really be part of
      get_group_load_store_type.  */
-  if (slp
+  if (1
       && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
       && !((memory_access_type == VMAT_ELEMENTWISE
            || memory_access_type == VMAT_GATHER_SCATTER)
@@ -10090,7 +10090,7 @@ vectorizable_load (vec_info *vinfo,
        }
     }
 
-  if (slp_node
+  if (1
       && slp_node->ldst_lanes
       && memory_access_type != VMAT_LOAD_STORE_LANES)
     {
@@ -10142,7 +10142,7 @@ vectorizable_load (vec_info *vinfo,
 
   if (costing_p) /* transformation not required.  */
     {
-      if (slp_node
+      if (1
          && mask
          && !vect_maybe_update_slp_op_vectype (slp_op,
                                                mask_vectype))
@@ -10153,7 +10153,7 @@ vectorizable_load (vec_info *vinfo,
          return false;
        }
 
-      if (!slp)
+      if (0)
        STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
       else
        SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
@@ -10210,7 +10210,7 @@ vectorizable_load (vec_info *vinfo,
   if (elsvals.length ())
     maskload_elsval = *elsvals.begin ();
 
-  if (!slp)
+  if (0)
     gcc_assert (memory_access_type
                == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
   else
@@ -10289,7 +10289,7 @@ vectorizable_load (vec_info *vinfo,
                                       vectype, &gsi2);
        }
       gimple *new_stmt = SSA_NAME_DEF_STMT (new_temp);
-      if (slp)
+      if (1)
        for (j = 0; j < (int) SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); ++j)
          slp_node->push_vec_def (new_stmt);
       else
@@ -10616,11 +10616,11 @@ vectorizable_load (vec_info *vinfo,
     }
 
   if (memory_access_type == VMAT_GATHER_SCATTER
-      || (!slp && memory_access_type == VMAT_CONTIGUOUS))
+      || (0 && memory_access_type == VMAT_CONTIGUOUS))
     grouped_load = false;
 
   if (grouped_load
-      || (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()))
+      || (1 && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()))
     {
       if (grouped_load)
        {
@@ -10634,7 +10634,7 @@ vectorizable_load (vec_info *vinfo,
        }
       /* For SLP vectorization we directly vectorize a subchain
          without permutation.  */
-      if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
+      if (1 && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
        first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
       /* For BB vectorization always use the first stmt to base
         the data ref pointer on.  */
@@ -10652,7 +10652,7 @@ vectorizable_load (vec_info *vinfo,
             in multiple different permutations (having multiple
             slp nodes which refer to the same group) the CSE
             is even wrong code.  See PR56270.  */
-         && !slp)
+         && 0)
        {
          *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
          return true;
@@ -10661,7 +10661,7 @@ vectorizable_load (vec_info *vinfo,
       group_gap_adj = 0;
 
       /* VEC_NUM is the number of vect stmts to be created for this group.  */
-      if (slp)
+      if (1)
        {
          grouped_load = false;
          /* If an SLP permutation is from N elements to N elements,
@@ -10706,7 +10706,7 @@ vectorizable_load (vec_info *vinfo,
       group_size = vec_num = 1;
       group_gap_adj = 0;
       ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
-      if (slp)
+      if (1)
        vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
     }
 
@@ -10910,7 +10910,7 @@ vectorizable_load (vec_info *vinfo,
   auto_vec<tree> vec_masks;
   if (mask && !costing_p)
     {
-      if (slp_node)
+      if (1)
        vect_get_slp_defs (SLP_TREE_CHILDREN (slp_node)[mask_index],
                           &vec_masks);
       else
@@ -10929,7 +10929,7 @@ vectorizable_load (vec_info *vinfo,
       /* For costing some adjacent vector loads, we'd like to cost with
         the total number of them once instead of cost each one by one. */
       unsigned int n_adjacent_loads = 0;
-      if (slp_node)
+      if (1)
        ncopies = slp_node->vec_stmts_size / group_size;
       for (j = 0; j < ncopies; j++)
        {
@@ -11053,7 +11053,7 @@ vectorizable_load (vec_info *vinfo,
          gimple_call_set_nothrow (call, true);
          vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
 
-         if (!slp)
+         if (0)
            dr_chain.create (group_size);
          /* Extract each vector into an SSA_NAME.  */
          for (unsigned i = 0; i < group_size; i++)
@@ -11061,23 +11061,23 @@ vectorizable_load (vec_info *vinfo,
              new_temp = read_vector_array (vinfo, stmt_info, gsi, scalar_dest,
                                            vec_array, i, need_zeroing,
                                            final_mask);
-             if (slp)
+             if (1)
                slp_node->push_vec_def (new_temp);
              else
                dr_chain.quick_push (new_temp);
            }
 
-         if (!slp)
+         if (0)
            /* Record the mapping between SSA_NAMEs and statements.  */
            vect_record_grouped_load_vectors (vinfo, stmt_info, dr_chain);
 
          /* Record that VEC_ARRAY is now dead.  */
          vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
 
-         if (!slp)
+         if (0)
            dr_chain.release ();
 
-         if (!slp_node)
+         if (0)
            *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
        }
 
@@ -11453,15 +11453,15 @@ vectorizable_load (vec_info *vinfo,
                }
 
              /* Store vector loads in the corresponding SLP_NODE.  */
-             if (slp)
+             if (1)
                slp_node->push_vec_def (new_stmt);
            }
 
-         if (!slp && !costing_p)
+         if (0 && !costing_p)
            STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
        }
 
-      if (!slp && !costing_p)
+      if (0 && !costing_p)
        *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
 
       if (costing_p && dump_enabled_p ())
@@ -12052,7 +12052,7 @@ vectorizable_load (vec_info *vinfo,
            dr_chain.quick_push (new_temp);
 
          /* Store vector loads in the corresponding SLP_NODE.  */
-         if (!costing_p && slp && !slp_perm)
+         if (!costing_p && 1 && !slp_perm)
            slp_node->push_vec_def (new_stmt);
 
          /* With SLP permutation we load the gaps as well, without
@@ -12090,7 +12090,7 @@ vectorizable_load (vec_info *vinfo,
                                         stmt_info, bump);
        }
 
-      if (slp && !slp_perm)
+      if (1 && !slp_perm)
        continue;
 
       if (slp_perm)
@@ -12152,7 +12152,7 @@ vectorizable_load (vec_info *vinfo,
        }
       dr_chain.release ();
     }
-  if (!slp && !costing_p)
+  if (0 && !costing_p)
     *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
 
   if (costing_p)
-- 
2.43.0

Reply via email to