On Wed, 1 Jun 2022 at 14:12, Richard Sandiford
<richard.sandif...@arm.com> wrote:
>
> Prathamesh Kulkarni <prathamesh.kulka...@linaro.org> writes:
> > On Thu, 12 May 2022 at 16:15, Richard Sandiford
> > <richard.sandif...@arm.com> wrote:
> >>
> >> Prathamesh Kulkarni <prathamesh.kulka...@linaro.org> writes:
> >> > On Wed, 11 May 2022 at 12:44, Richard Sandiford
> >> > <richard.sandif...@arm.com> wrote:
> >> >>
> >> >> Prathamesh Kulkarni <prathamesh.kulka...@linaro.org> writes:
> >> >> > On Fri, 6 May 2022 at 16:00, Richard Sandiford
> >> >> > <richard.sandif...@arm.com> wrote:
> >> >> >>
> >> >> >> Prathamesh Kulkarni <prathamesh.kulka...@linaro.org> writes:
> >> >> >> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
> >> >> >> > b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> >> >> > index c24c0548724..1ef4ea2087b 100644
> >> >> >> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> >> >> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> >> >> > @@ -44,6 +44,14 @@
> >> >> >> >  #include "aarch64-sve-builtins-shapes.h"
> >> >> >> >  #include "aarch64-sve-builtins-base.h"
> >> >> >> >  #include "aarch64-sve-builtins-functions.h"
> >> >> >> > +#include "aarch64-builtins.h"
> >> >> >> > +#include "gimple-ssa.h"
> >> >> >> > +#include "tree-phinodes.h"
> >> >> >> > +#include "tree-ssa-operands.h"
> >> >> >> > +#include "ssa-iterators.h"
> >> >> >> > +#include "stringpool.h"
> >> >> >> > +#include "value-range.h"
> >> >> >> > +#include "tree-ssanames.h"
> >> >> >>
> >> >> >> Minor, but: I think the preferred approach is to include "ssa.h"
> >> >> >> rather than include some of these headers directly.
> >> >> >>
> >> >> >> >
> >> >> >> >  using namespace aarch64_sve;
> >> >> >> >
> >> >> >> > @@ -1207,6 +1215,56 @@ public:
> >> >> >> >      insn_code icode = code_for_aarch64_sve_ld1rq (e.vector_mode 
> >> >> >> > (0));
> >> >> >> >      return e.use_contiguous_load_insn (icode);
> >> >> >> >    }
> >> >> >> > +
> >> >> >> > +  gimple *
> >> >> >> > +  fold (gimple_folder &f) const OVERRIDE
> >> >> >> > +  {
> >> >> >> > +    tree arg0 = gimple_call_arg (f.call, 0);
> >> >> >> > +    tree arg1 = gimple_call_arg (f.call, 1);
> >> >> >> > +
> >> >> >> > +    /* Transform:
> >> >> >> > +       lhs = svld1rq ({-1, -1, ... }, arg1)
> >> >> >> > +       into:
> >> >> >> > +       tmp = mem_ref<int32x4_t> [(int * {ref-all}) arg1]
> >> >> >> > +       lhs = vec_perm_expr<tmp, tmp, {0, 1, 2, 3, ...}>.
> >> >> >> > +       on little endian target.  */
> >> >> >> > +
> >> >> >> > +    if (!BYTES_BIG_ENDIAN
> >> >> >> > +     && integer_all_onesp (arg0))
> >> >> >> > +      {
> >> >> >> > +     tree lhs = gimple_call_lhs (f.call);
> >> >> >> > +     auto simd_type = aarch64_get_simd_info_for_type (Int32x4_t);
> >> >> >>
> >> >> >> Does this work for other element sizes?  I would have expected it
> >> >> >> to be the (128-bit) Advanced SIMD vector associated with the same
> >> >> >> element type as the SVE vector.
> >> >> >>
> >> >> >> The testcase should cover more than just int32x4_t -> svint32_t,
> >> >> >> just to be sure.
> >> >> > In the attached patch, it obtains corresponding advsimd type with:
> >> >> >
> >> >> > tree eltype = TREE_TYPE (lhs_type);
> >> >> > unsigned nunits = 128 / TREE_INT_CST_LOW (TYPE_SIZE (eltype));
> >> >> > tree vectype = build_vector_type (eltype, nunits);
> >> >> >
> >> >> > While this seems to work with different element sizes, I am not sure 
> >> >> > if it's
> >> >> > the correct approach ?
> >> >>
> >> >> Yeah, that looks correct.  Other SVE code uses aarch64_vq_mode
> >> >> to get the vector mode associated with a .Q “element”, so an
> >> >> alternative would be:
> >> >>
> >> >>     machine_mode vq_mode = aarch64_vq_mode (TYPE_MODE (eltype)).require 
> >> >> ();
> >> >>     tree vectype = build_vector_type_for_mode (eltype, vq_mode);
> >> >>
> >> >> which is more explicit about wanting an Advanced SIMD vector.
> >> >>
> >> >> >> > +
> >> >> >> > +     tree elt_ptr_type
> >> >> >> > +       = build_pointer_type_for_mode (simd_type.eltype, VOIDmode, 
> >> >> >> > true);
> >> >> >> > +     tree zero = build_zero_cst (elt_ptr_type);
> >> >> >> > +
> >> >> >> > +     /* Use element type alignment.  */
> >> >> >> > +     tree access_type
> >> >> >> > +       = build_aligned_type (simd_type.itype, TYPE_ALIGN 
> >> >> >> > (simd_type.eltype));
> >> >> >> > +
> >> >> >> > +     tree tmp = make_ssa_name_fn (cfun, access_type, 0);
> >> >> >> > +     gimple *mem_ref_stmt
> >> >> >> > +       = gimple_build_assign (tmp, fold_build2 (MEM_REF, 
> >> >> >> > access_type, arg1, zero));
> >> >> >>
> >> >> >> Long line.  Might be easier to format by assigning the fold_build2 
> >> >> >> result
> >> >> >> to a temporary variable.
> >> >> >>
> >> >> >> > +     gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT);
> >> >> >> > +
> >> >> >> > +     tree mem_ref_lhs = gimple_get_lhs (mem_ref_stmt);
> >> >> >> > +     tree vectype = TREE_TYPE (mem_ref_lhs);
> >> >> >> > +     tree lhs_type = TREE_TYPE (lhs);
> >> >> >>
> >> >> >> Is this necessary?  The code above supplied the types and I wouldn't
> >> >> >> have expected them to change during the build process.
> >> >> >>
> >> >> >> > +
> >> >> >> > +     int source_nelts = TYPE_VECTOR_SUBPARTS 
> >> >> >> > (vectype).to_constant ();
> >> >> >> > +     vec_perm_builder sel (TYPE_VECTOR_SUBPARTS (lhs_type), 
> >> >> >> > source_nelts, 1);
> >> >> >> > +     for (int i = 0; i < source_nelts; i++)
> >> >> >> > +       sel.quick_push (i);
> >> >> >> > +
> >> >> >> > +     vec_perm_indices indices (sel, 1, source_nelts);
> >> >> >> > +     gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE 
> >> >> >> > (lhs_type), indices));
> >> >> >> > +     tree mask = vec_perm_indices_to_tree (lhs_type, indices);
> >> >> >> > +     return gimple_build_assign (lhs, VEC_PERM_EXPR, mem_ref_lhs, 
> >> >> >> > mem_ref_lhs, mask);
> >> >> >>
> >> >> >> Nit: long line.
> >> >> >>
> >> >> >> > +      }
> >> >> >> > +
> >> >> >> > +    return NULL;
> >> >> >> > +  }
> >> >> >> >  };
> >> >> >> >
> >> >> >> >  class svld1ro_impl : public load_replicate
> >> >> >> > diff --git a/gcc/config/aarch64/aarch64.cc 
> >> >> >> > b/gcc/config/aarch64/aarch64.cc
> >> >> >> > index f650abbc4ce..47810fec804 100644
> >> >> >> > --- a/gcc/config/aarch64/aarch64.cc
> >> >> >> > +++ b/gcc/config/aarch64/aarch64.cc
> >> >> >> > @@ -23969,6 +23969,35 @@ aarch64_evpc_sve_tbl (struct 
> >> >> >> > expand_vec_perm_d *d)
> >> >> >> >    return true;
> >> >> >> >  }
> >> >> >> >
> >> >> >> > +/* Try to implement D using SVE dup instruction.  */
> >> >> >> > +
> >> >> >> > +static bool
> >> >> >> > +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d)
> >> >> >> > +{
> >> >> >> > +  if (BYTES_BIG_ENDIAN
> >> >> >> > +      || d->perm.length ().is_constant ()
> >> >> >> > +      || !d->one_vector_p
> >> >> >> > +      || d->target == NULL
> >> >> >> > +      || d->op0 == NULL
> >> >> >>
> >> >> >> These last two lines mean that we always return false for d->testing.
> >> >> >> The idea instead is that the return value should be the same for both
> >> >> >> d->testing and !d->testing.  The difference is that for !d->testing 
> >> >> >> we
> >> >> >> also emit code to do the permute.
> >> >>
> >> >> It doesn't look like the new patch addresses this.  There should be
> >> >> no checks for/uses of “d->target” and “d->op0” until after:
> >> >>
> >> >>   if (d->testing_p)
> >> >>     return true;
> >> >>
> >> >> This...
> >> >>
> >> >> >> > +      || GET_MODE_NUNITS (GET_MODE (d->target)).is_constant ()
> >> >> >>
> >> >> >> Sorry, I've forgotten the context now, but: these positive tests
> >> >> >> for is_constant surprised me.  Do we really only want to do this
> >> >> >> for variable-length SVE code generation, rather than fixed-length?
> >> >> >>
> >> >> >> > +      || !GET_MODE_NUNITS (GET_MODE (d->op0)).is_constant ())
> >> >> >> > +    return false;
> >> >> >> > +
> >> >> >> > +  if (d->testing_p)
> >> >> >> > +    return true;
> >> >> >>
> >> >> >> This should happen after the later tests, once we're sure that the
> >> >> >> permute vector has the right form.  If the issue is that op0 isn't
> >> >> >> provided for testing then I think the hook needs to be passed the
> >> >> >> input mode alongside the result mode.
> >> >>
> >> >> ...was my guess about why the checks were there.
> >> > Ah right sorry. IIUC, if d->testing is true, then d->op0 could be NULL ?
> >> > In that case, how do we obtain input mode ?
> >>
> >> Well, like I say, I think we might need to extend the vec_perm_const
> >> hook interface so that it gets passed the input mode, now that that
> >> isn't necessarily the same as the output mode.
> >>
> >> It would be good to do that as a separate prepatch, since it would
> >> affect other targets too.  And for safety, that patch should make all
> >> existing implementations of the hook return false if the modes aren't
> >> equal, including for aarch64.  The current patch can then make the
> >> aarch64 hook treat the dup case as an exception.
> > Hi Richard,
> > I have attached updated patch, which tries to address above suggestions.
> > I had a question about couple of things:
> > (1) The patch resulted in ICE for float operands, because we were
> > using lhs_type to build mask, which is float vector type.
> > So I adjusted the patch to make mask vector of integer_type_node with
> > length == length(lhs_type) if lhs has float vector type.
> > Does that look OK ?
>
> Let's use:
>
>   build_vector_type (ssizetype, lhs_len)
>
> unconditionally, even for integers.
OK thanks, done in attached patch.
>
> > (2) Moved check for d->vmode != op_mode (and only checking for dup in
> > that case), inside vec_perm_const_1,
> > since it does some initial bookkeeping (like swapping operands),
> > before calling respective functions.
> > Does that look OK ?
> >
> > Thanks,
> > Prathamesh
> >>
> >> Thanks,
> >> Richard
> >
> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
> > b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > index bee410929bd..48e849bec34 100644
> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > @@ -44,6 +44,7 @@
> >  #include "aarch64-sve-builtins-shapes.h"
> >  #include "aarch64-sve-builtins-base.h"
> >  #include "aarch64-sve-builtins-functions.h"
> > +#include "ssa.h"
> >
> >  using namespace aarch64_sve;
> >
> > @@ -1207,6 +1208,66 @@ public:
> >      insn_code icode = code_for_aarch64_sve_ld1rq (e.vector_mode (0));
> >      return e.use_contiguous_load_insn (icode);
> >    }
> > +
> > +  gimple *
> > +  fold (gimple_folder &f) const override
> > +  {
> > +    tree arg0 = gimple_call_arg (f.call, 0);
> > +    tree arg1 = gimple_call_arg (f.call, 1);
> > +
> > +    /* Transform:
> > +       lhs = svld1rq ({-1, -1, ... }, arg1)
> > +       into:
> > +       tmp = mem_ref<vectype> [(int * {ref-all}) arg1]
> > +       lhs = vec_perm_expr<tmp, tmp, {0, 1, 2, 3, ...}>.
> > +       on little endian target.
> > +       vectype is the corresponding ADVSIMD type.  */
> > +
> > +    if (!BYTES_BIG_ENDIAN
> > +     && integer_all_onesp (arg0))
> > +      {
> > +     tree lhs = gimple_call_lhs (f.call);
> > +     tree lhs_type = TREE_TYPE (lhs);
> > +     poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
> > +     tree eltype = TREE_TYPE (lhs_type);
> > +
> > +     scalar_mode elmode = GET_MODE_INNER (TYPE_MODE (lhs_type));
> > +     machine_mode vq_mode = aarch64_vq_mode (elmode).require ();
> > +     tree vectype = build_vector_type_for_mode (eltype, vq_mode);
> > +
> > +     tree elt_ptr_type
> > +       = build_pointer_type_for_mode (eltype, VOIDmode, true);
> > +     tree zero = build_zero_cst (elt_ptr_type);
> > +
> > +     /* Use element type alignment.  */
> > +     tree access_type
> > +       = build_aligned_type (vectype, TYPE_ALIGN (eltype));
> > +
> > +     tree mem_ref_lhs = make_ssa_name_fn (cfun, access_type, 0);
> > +     tree mem_ref_op = fold_build2 (MEM_REF, access_type, arg1, zero);
> > +     gimple *mem_ref_stmt
> > +       = gimple_build_assign (mem_ref_lhs, mem_ref_op);
> > +     gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT);
> > +
> > +     int source_nelts = TYPE_VECTOR_SUBPARTS (access_type).to_constant ();
> > +     vec_perm_builder sel (lhs_len, source_nelts, 1);
> > +     for (int i = 0; i < source_nelts; i++)
> > +       sel.quick_push (i);
> > +
> > +     vec_perm_indices indices (sel, 1, source_nelts);
> > +     gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_type),
> > +                                                TYPE_MODE (access_type),
> > +                                                indices));
> > +     tree mask_type = (FLOAT_TYPE_P (eltype))
> > +                      ? build_vector_type (integer_type_node, lhs_len)
> > +                      : lhs_type;
> > +     tree mask = vec_perm_indices_to_tree (mask_type, indices);
> > +     return gimple_build_assign (lhs, VEC_PERM_EXPR,
> > +                                 mem_ref_lhs, mem_ref_lhs, mask);
> > +      }
> > +
> > +    return NULL;
> > +  }
> >  };
> >
> >  class svld1ro_impl : public load_replicate
> > diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
> > index d4c575ce976..ae8e913d525 100644
> > --- a/gcc/config/aarch64/aarch64.cc
> > +++ b/gcc/config/aarch64/aarch64.cc
> > @@ -23401,7 +23401,8 @@ struct expand_vec_perm_d
> >    bool testing_p;
> >  };
> >
> > -static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d);
> > +static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d,
> > +                                          machine_mode op_mode);
> >
> >  /* Generate a variable permutation.  */
> >
> > @@ -23638,7 +23639,7 @@ aarch64_evpc_reencode (struct expand_vec_perm_d *d)
> >    newd.one_vector_p = d->one_vector_p;
> >
> >    newd.perm.new_vector (newpermconst, newd.one_vector_p ? 1 : 2, nelt / 2);
> > -  return aarch64_expand_vec_perm_const_1 (&newd);
> > +  return aarch64_expand_vec_perm_const_1 (&newd, newd.vmode);
> >  }
> >
> >  /* Recognize patterns suitable for the UZP instructions.  */
> > @@ -23945,6 +23946,32 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d *d)
> >    return true;
> >  }
> >
> > +/* Try to implement D using SVE dup instruction.  */
> > +
> > +static bool
> > +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d, machine_mode op_mode)
> > +{
> > +  if (BYTES_BIG_ENDIAN
> > +      || d->perm.length ().is_constant ()
>
> Sorry, I've forgotten: why do we need this is_constant check?
Oh I guess I had put it there, to check if target vector is of
variable length, sorry.
I assume we don't need this. Removed in the attached patch.
>
> > +      || !d->one_vector_p
> > +      || aarch64_classify_vector_mode (op_mode) != VEC_ADVSIMD)
> > +    return false;
>
> We need to check that nelts_per_pattern is 1 as well.
OK thanks, done.
>
> > +  int npatterns = d->perm.encoding ().npatterns ();
> > +  if (!known_eq (npatterns, GET_MODE_NUNITS (op_mode)))
> > +    return false;
> > +
> > +  for (int i = 0; i < npatterns; i++)
> > +    if (!known_eq (d->perm[i], i))
> > +      return false;
> > +
> > +  if (d->testing_p)
> > +    return true;
> > +
> > +  aarch64_expand_sve_dupq (d->target, GET_MODE (d->target), d->op0);
> > +  return true;
> > +}
> > +
> >  /* Try to implement D using SVE SEL instruction.  */
> >
> >  static bool
> > @@ -24066,7 +24093,8 @@ aarch64_evpc_ins (struct expand_vec_perm_d *d)
> >  }
> >
> >  static bool
> > -aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
> > +aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d,
> > +                              machine_mode op_mode)
>
> I think we should add op_mode to expand_vec_perm_d instead.
> Let's also add an op_vec_flags to cache the aarch64_classify_vector_mode
> result.
OK thanks, done.
>
> >  {
> >    /* The pattern matching functions above are written to look for a small
> >       number to begin the sequence (0, 1, N/2).  If we begin with an index
> > @@ -24084,6 +24112,12 @@ aarch64_expand_vec_perm_const_1 (struct 
> > expand_vec_perm_d *d)
> >         || d->vec_flags == VEC_SVE_PRED)
> >        && known_gt (nelt, 1))
> >      {
> > +      /* If operand and result modes differ, then only check
> > +      for dup case.  */
> > +      if (d->vmode != op_mode)
> > +     return (d->vec_flags == VEC_SVE_DATA)
> > +             ? aarch64_evpc_sve_dup (d, op_mode) : false;
> > +
>
> I think it'd be more future-proof to format this as:
>
>     if (d->vmod == d->op_mode)
>       {
>         …existing code…
>       }
>     else
>       {
>         if (aarch64_evpc_sve_dup (d))
>           return true;
>       }
>
> with the d->vec_flags == VEC_SVE_DATA check being in aarch64_evpc_sve_dup,
> alongside the op_mode check.  I think we'll be adding more checks here
> over time.
Um I was wondering if we should structure it as:
if (d->vmode == d->op_mode)
  {
     ...existing code...
  }
if (aarch64_evpc_sve_dup (d))
  return true;

So we check for dup irrespective of  d->vmode == d->op_mode ?

Thanks,
Prathamesh
>
> >        if (aarch64_evpc_rev_local (d))
> >       return true;
> >        else if (aarch64_evpc_rev_global (d))
> > @@ -24105,7 +24139,12 @@ aarch64_expand_vec_perm_const_1 (struct 
> > expand_vec_perm_d *d)
> >        else if (aarch64_evpc_reencode (d))
> >       return true;
> >        if (d->vec_flags == VEC_SVE_DATA)
> > -     return aarch64_evpc_sve_tbl (d);
> > +     {
> > +       if (aarch64_evpc_sve_tbl (d))
> > +         return true;
> > +       else if (aarch64_evpc_sve_dup (d, op_mode))
> > +         return true;
> > +     }
> >        else if (d->vec_flags == VEC_ADVSIMD)
> >       return aarch64_evpc_tbl (d);
> >      }
>
> Is this part still needed, given the above?
>
> Thanks,
> Richard
>
> > @@ -24119,9 +24158,6 @@ aarch64_vectorize_vec_perm_const (machine_mode 
> > vmode, machine_mode op_mode,
> >                                 rtx target, rtx op0, rtx op1,
> >                                 const vec_perm_indices &sel)
> >  {
> > -  if (vmode != op_mode)
> > -    return false;
> > -
> >    struct expand_vec_perm_d d;
> >
> >    /* Check whether the mask can be applied to a single vector.  */
> > @@ -24154,10 +24190,10 @@ aarch64_vectorize_vec_perm_const (machine_mode 
> > vmode, machine_mode op_mode,
> >    d.testing_p = !target;
> >
> >    if (!d.testing_p)
> > -    return aarch64_expand_vec_perm_const_1 (&d);
> > +    return aarch64_expand_vec_perm_const_1 (&d, op_mode);
> >
> >    rtx_insn *last = get_last_insn ();
> > -  bool ret = aarch64_expand_vec_perm_const_1 (&d);
> > +  bool ret = aarch64_expand_vec_perm_const_1 (&d, op_mode);
> >    gcc_assert (last == get_last_insn ());
> >
> >    return ret;
diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc 
b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
index bee410929bd..1a804b1ab73 100644
--- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
+++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
@@ -44,6 +44,7 @@
 #include "aarch64-sve-builtins-shapes.h"
 #include "aarch64-sve-builtins-base.h"
 #include "aarch64-sve-builtins-functions.h"
+#include "ssa.h"
 
 using namespace aarch64_sve;
 
@@ -1207,6 +1208,64 @@ public:
     insn_code icode = code_for_aarch64_sve_ld1rq (e.vector_mode (0));
     return e.use_contiguous_load_insn (icode);
   }
+
+  gimple *
+  fold (gimple_folder &f) const override
+  {
+    tree arg0 = gimple_call_arg (f.call, 0);
+    tree arg1 = gimple_call_arg (f.call, 1);
+
+    /* Transform:
+       lhs = svld1rq ({-1, -1, ... }, arg1)
+       into:
+       tmp = mem_ref<vectype> [(int * {ref-all}) arg1]
+       lhs = vec_perm_expr<tmp, tmp, {0, 1, 2, 3, ...}>.
+       on little endian target.
+       vectype is the corresponding ADVSIMD type.  */
+
+    if (!BYTES_BIG_ENDIAN
+       && integer_all_onesp (arg0))
+      {
+       tree lhs = gimple_call_lhs (f.call);
+       tree lhs_type = TREE_TYPE (lhs);
+       poly_uint64 lhs_len = TYPE_VECTOR_SUBPARTS (lhs_type);
+       tree eltype = TREE_TYPE (lhs_type);
+
+       scalar_mode elmode = GET_MODE_INNER (TYPE_MODE (lhs_type));
+       machine_mode vq_mode = aarch64_vq_mode (elmode).require ();
+       tree vectype = build_vector_type_for_mode (eltype, vq_mode);
+
+       tree elt_ptr_type
+         = build_pointer_type_for_mode (eltype, VOIDmode, true);
+       tree zero = build_zero_cst (elt_ptr_type);
+
+       /* Use element type alignment.  */
+       tree access_type
+         = build_aligned_type (vectype, TYPE_ALIGN (eltype));
+
+       tree mem_ref_lhs = make_ssa_name_fn (cfun, access_type, 0);
+       tree mem_ref_op = fold_build2 (MEM_REF, access_type, arg1, zero);
+       gimple *mem_ref_stmt
+         = gimple_build_assign (mem_ref_lhs, mem_ref_op);
+       gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT);
+
+       int source_nelts = TYPE_VECTOR_SUBPARTS (access_type).to_constant ();
+       vec_perm_builder sel (lhs_len, source_nelts, 1);
+       for (int i = 0; i < source_nelts; i++)
+         sel.quick_push (i);
+
+       vec_perm_indices indices (sel, 1, source_nelts);
+       gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_type),
+                                                  TYPE_MODE (access_type),
+                                                  indices));
+       tree mask_type = build_vector_type (ssizetype, lhs_len);
+       tree mask = vec_perm_indices_to_tree (mask_type, indices);
+       return gimple_build_assign (lhs, VEC_PERM_EXPR,
+                                   mem_ref_lhs, mem_ref_lhs, mask);
+      }
+
+    return NULL;
+  }
 };
 
 class svld1ro_impl : public load_replicate
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index d4c575ce976..bb24701b0d2 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -23395,8 +23395,10 @@ struct expand_vec_perm_d
 {
   rtx target, op0, op1;
   vec_perm_indices perm;
+  machine_mode op_mode;
   machine_mode vmode;
   unsigned int vec_flags;
+  unsigned int op_vec_flags;
   bool one_vector_p;
   bool testing_p;
 };
@@ -23945,6 +23947,32 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d *d)
   return true;
 }
 
+/* Try to implement D using SVE dup instruction.  */
+
+static bool
+aarch64_evpc_sve_dup (struct expand_vec_perm_d *d)
+{
+  if (BYTES_BIG_ENDIAN
+      || !d->one_vector_p
+      || d->vec_flags != VEC_SVE_DATA
+      || d->op_vec_flags != VEC_ADVSIMD
+      || d->perm.encoding ().nelts_per_pattern () != 1
+      || !known_eq (d->perm.encoding ().npatterns (),
+                   GET_MODE_NUNITS (d->op_mode)))
+    return false;
+
+  int npatterns = d->perm.encoding ().npatterns ();
+  for (int i = 0; i < npatterns; i++)
+    if (!known_eq (d->perm[i], i))
+      return false;
+
+  if (d->testing_p)
+    return true;
+
+  aarch64_expand_sve_dupq (d->target, GET_MODE (d->target), d->op0);
+  return true;
+}
+
 /* Try to implement D using SVE SEL instruction.  */
 
 static bool
@@ -24084,30 +24112,39 @@ aarch64_expand_vec_perm_const_1 (struct 
expand_vec_perm_d *d)
        || d->vec_flags == VEC_SVE_PRED)
       && known_gt (nelt, 1))
     {
-      if (aarch64_evpc_rev_local (d))
-       return true;
-      else if (aarch64_evpc_rev_global (d))
-       return true;
-      else if (aarch64_evpc_ext (d))
-       return true;
-      else if (aarch64_evpc_dup (d))
-       return true;
-      else if (aarch64_evpc_zip (d))
-       return true;
-      else if (aarch64_evpc_uzp (d))
-       return true;
-      else if (aarch64_evpc_trn (d))
-       return true;
-      else if (aarch64_evpc_sel (d))
-       return true;
-      else if (aarch64_evpc_ins (d))
-       return true;
-      else if (aarch64_evpc_reencode (d))
+      /* If operand and result modes differ, then only check
+        for dup case.  */
+      if (d->vmode == d->op_mode)
+       {
+         if (aarch64_evpc_rev_local (d))
+           return true;
+         else if (aarch64_evpc_rev_global (d))
+           return true;
+         else if (aarch64_evpc_ext (d))
+           return true;
+         else if (aarch64_evpc_dup (d))
+           return true;
+         else if (aarch64_evpc_zip (d))
+           return true;
+         else if (aarch64_evpc_uzp (d))
+           return true;
+         else if (aarch64_evpc_trn (d))
+           return true;
+         else if (aarch64_evpc_sel (d))
+           return true;
+         else if (aarch64_evpc_ins (d))
+           return true;
+         else if (aarch64_evpc_reencode (d))
+           return true;
+
+         if (d->vec_flags == VEC_SVE_DATA)
+           return aarch64_evpc_sve_tbl (d);
+         else if (d->vec_flags == VEC_ADVSIMD)
+           return aarch64_evpc_tbl (d);
+       }
+
+      if (aarch64_evpc_sve_dup (d))
        return true;
-      if (d->vec_flags == VEC_SVE_DATA)
-       return aarch64_evpc_sve_tbl (d);
-      else if (d->vec_flags == VEC_ADVSIMD)
-       return aarch64_evpc_tbl (d);
     }
   return false;
 }
@@ -24119,9 +24156,6 @@ aarch64_vectorize_vec_perm_const (machine_mode vmode, 
machine_mode op_mode,
                                  rtx target, rtx op0, rtx op1,
                                  const vec_perm_indices &sel)
 {
-  if (vmode != op_mode)
-    return false;
-
   struct expand_vec_perm_d d;
 
   /* Check whether the mask can be applied to a single vector.  */
@@ -24145,6 +24179,8 @@ aarch64_vectorize_vec_perm_const (machine_mode vmode, 
machine_mode op_mode,
                     sel.nelts_per_input ());
   d.vmode = vmode;
   d.vec_flags = aarch64_classify_vector_mode (d.vmode);
+  d.op_mode = op_mode;
+  d.op_vec_flags = aarch64_classify_vector_mode (d.op_mode);
   d.target = target;
   d.op0 = op0 ? force_reg (vmode, op0) : NULL_RTX;
   if (op0 == op1)

Reply via email to