On Wed, 11 May 2022 at 12:44, Richard Sandiford
<[email protected]> wrote:
>
> Prathamesh Kulkarni <[email protected]> writes:
> > On Fri, 6 May 2022 at 16:00, Richard Sandiford
> > <[email protected]> wrote:
> >>
> >> Prathamesh Kulkarni <[email protected]> writes:
> >> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > index c24c0548724..1ef4ea2087b 100644
> >> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> >> > @@ -44,6 +44,14 @@
> >> > #include "aarch64-sve-builtins-shapes.h"
> >> > #include "aarch64-sve-builtins-base.h"
> >> > #include "aarch64-sve-builtins-functions.h"
> >> > +#include "aarch64-builtins.h"
> >> > +#include "gimple-ssa.h"
> >> > +#include "tree-phinodes.h"
> >> > +#include "tree-ssa-operands.h"
> >> > +#include "ssa-iterators.h"
> >> > +#include "stringpool.h"
> >> > +#include "value-range.h"
> >> > +#include "tree-ssanames.h"
> >>
> >> Minor, but: I think the preferred approach is to include "ssa.h"
> >> rather than include some of these headers directly.
> >>
> >> >
> >> > using namespace aarch64_sve;
> >> >
> >> > @@ -1207,6 +1215,56 @@ public:
> >> > insn_code icode = code_for_aarch64_sve_ld1rq (e.vector_mode (0));
> >> > return e.use_contiguous_load_insn (icode);
> >> > }
> >> > +
> >> > + gimple *
> >> > + fold (gimple_folder &f) const OVERRIDE
> >> > + {
> >> > + tree arg0 = gimple_call_arg (f.call, 0);
> >> > + tree arg1 = gimple_call_arg (f.call, 1);
> >> > +
> >> > + /* Transform:
> >> > + lhs = svld1rq ({-1, -1, ... }, arg1)
> >> > + into:
> >> > + tmp = mem_ref<int32x4_t> [(int * {ref-all}) arg1]
> >> > + lhs = vec_perm_expr<tmp, tmp, {0, 1, 2, 3, ...}>.
> >> > + on little endian target. */
> >> > +
> >> > + if (!BYTES_BIG_ENDIAN
> >> > + && integer_all_onesp (arg0))
> >> > + {
> >> > + tree lhs = gimple_call_lhs (f.call);
> >> > + auto simd_type = aarch64_get_simd_info_for_type (Int32x4_t);
> >>
> >> Does this work for other element sizes? I would have expected it
> >> to be the (128-bit) Advanced SIMD vector associated with the same
> >> element type as the SVE vector.
> >>
> >> The testcase should cover more than just int32x4_t -> svint32_t,
> >> just to be sure.
> > In the attached patch, it obtains corresponding advsimd type with:
> >
> > tree eltype = TREE_TYPE (lhs_type);
> > unsigned nunits = 128 / TREE_INT_CST_LOW (TYPE_SIZE (eltype));
> > tree vectype = build_vector_type (eltype, nunits);
> >
> > While this seems to work with different element sizes, I am not sure if it's
> > the correct approach ?
>
> Yeah, that looks correct. Other SVE code uses aarch64_vq_mode
> to get the vector mode associated with a .Q “element”, so an
> alternative would be:
>
> machine_mode vq_mode = aarch64_vq_mode (TYPE_MODE (eltype)).require ();
> tree vectype = build_vector_type_for_mode (eltype, vq_mode);
>
> which is more explicit about wanting an Advanced SIMD vector.
>
> >> > +
> >> > + tree elt_ptr_type
> >> > + = build_pointer_type_for_mode (simd_type.eltype, VOIDmode, true);
> >> > + tree zero = build_zero_cst (elt_ptr_type);
> >> > +
> >> > + /* Use element type alignment. */
> >> > + tree access_type
> >> > + = build_aligned_type (simd_type.itype, TYPE_ALIGN
> >> > (simd_type.eltype));
> >> > +
> >> > + tree tmp = make_ssa_name_fn (cfun, access_type, 0);
> >> > + gimple *mem_ref_stmt
> >> > + = gimple_build_assign (tmp, fold_build2 (MEM_REF, access_type,
> >> > arg1, zero));
> >>
> >> Long line. Might be easier to format by assigning the fold_build2 result
> >> to a temporary variable.
> >>
> >> > + gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT);
> >> > +
> >> > + tree mem_ref_lhs = gimple_get_lhs (mem_ref_stmt);
> >> > + tree vectype = TREE_TYPE (mem_ref_lhs);
> >> > + tree lhs_type = TREE_TYPE (lhs);
> >>
> >> Is this necessary? The code above supplied the types and I wouldn't
> >> have expected them to change during the build process.
> >>
> >> > +
> >> > + int source_nelts = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
> >> > + vec_perm_builder sel (TYPE_VECTOR_SUBPARTS (lhs_type),
> >> > source_nelts, 1);
> >> > + for (int i = 0; i < source_nelts; i++)
> >> > + sel.quick_push (i);
> >> > +
> >> > + vec_perm_indices indices (sel, 1, source_nelts);
> >> > + gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_type),
> >> > indices));
> >> > + tree mask = vec_perm_indices_to_tree (lhs_type, indices);
> >> > + return gimple_build_assign (lhs, VEC_PERM_EXPR, mem_ref_lhs,
> >> > mem_ref_lhs, mask);
> >>
> >> Nit: long line.
> >>
> >> > + }
> >> > +
> >> > + return NULL;
> >> > + }
> >> > };
> >> >
> >> > class svld1ro_impl : public load_replicate
> >> > diff --git a/gcc/config/aarch64/aarch64.cc
> >> > b/gcc/config/aarch64/aarch64.cc
> >> > index f650abbc4ce..47810fec804 100644
> >> > --- a/gcc/config/aarch64/aarch64.cc
> >> > +++ b/gcc/config/aarch64/aarch64.cc
> >> > @@ -23969,6 +23969,35 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d
> >> > *d)
> >> > return true;
> >> > }
> >> >
> >> > +/* Try to implement D using SVE dup instruction. */
> >> > +
> >> > +static bool
> >> > +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d)
> >> > +{
> >> > + if (BYTES_BIG_ENDIAN
> >> > + || d->perm.length ().is_constant ()
> >> > + || !d->one_vector_p
> >> > + || d->target == NULL
> >> > + || d->op0 == NULL
> >>
> >> These last two lines mean that we always return false for d->testing.
> >> The idea instead is that the return value should be the same for both
> >> d->testing and !d->testing. The difference is that for !d->testing we
> >> also emit code to do the permute.
>
> It doesn't look like the new patch addresses this. There should be
> no checks for/uses of “d->target” and “d->op0” until after:
>
> if (d->testing_p)
> return true;
>
> This...
>
> >> > + || GET_MODE_NUNITS (GET_MODE (d->target)).is_constant ()
> >>
> >> Sorry, I've forgotten the context now, but: these positive tests
> >> for is_constant surprised me. Do we really only want to do this
> >> for variable-length SVE code generation, rather than fixed-length?
> >>
> >> > + || !GET_MODE_NUNITS (GET_MODE (d->op0)).is_constant ())
> >> > + return false;
> >> > +
> >> > + if (d->testing_p)
> >> > + return true;
> >>
> >> This should happen after the later tests, once we're sure that the
> >> permute vector has the right form. If the issue is that op0 isn't
> >> provided for testing then I think the hook needs to be passed the
> >> input mode alongside the result mode.
>
> ...was my guess about why the checks were there.
Ah right sorry. IIUC, if d->testing is true, then d->op0 could be NULL ?
In that case, how do we obtain input mode ?
Thanks,
Prathamesh
>
> >> It might then be better to test:
> >>
> >> aarch64_classify_vector_mode (...input_mode...) == VEC_ADVSIMD
> >>
> >> (despite what I said earlier, about testing is_constant, sorry).
> > Thanks for the suggestions, I tried to address them in the attached patch.
> > Does it look OK after bootstrap+test ?
> >
> > The patch seems to generate the same code for different vector types.
> > For eg:
> >
> > svint32_t foo (int32x4_t x)
> > {
> > return svld1rq (svptrue_b8 (), &x[0]);
> > }
> >
> > svint16_t foo2(int16x8_t x)
> > {
> > return svld1rq_s16 (svptrue_b8 (), &x[0]);
> > }
> >
> > .optimized dump:
> > ;; Function foo (foo, funcdef_no=4350, decl_uid=29928,
> > cgraph_uid=4351, symbol_order=4350)
> > svint32_t foo (int32x4_t x)
> > {
> > svint32_t _2;
> >
> > <bb 2> [local count: 1073741824]:
> > _2 = VEC_PERM_EXPR <x_3(D), x_3(D), { 0, 1, 2, 3, ... }>;
> > return _2;
> >
> > }
> >
> > ;; Function foo2 (foo2, funcdef_no=4351, decl_uid=29931,
> > cgraph_uid=4352, symbol_order=4351)
> >
> > svint16_t foo2 (int16x8_t x)
> > {
> > svint16_t _2;
> >
> > <bb 2> [local count: 1073741824]:
> > _2 = VEC_PERM_EXPR <x_3(D), x_3(D), { 0, 1, 2, 3, 4, 5, 6, 7, ... }>;
> > return _2;
> >
> > }
> >
> > resulting in code-gen:
> > foo:
> > dup z0.q, z0.q[0]
> > ret
> >
> > foo2:
> > dup z0.q, z0.q[0]
> > ret
> >
> > I suppose this is correct, since in both cases it's replicating the
> > entire 128-bit vector (irrespective of element sizes) ?
>
> Yeah, the output code will be the same for all cases.
>
> > Thanks,
> > Prathamesh
> >>
> >> > +
> >> > + int npatterns = d->perm.encoding ().npatterns ();
> >> > + if (!known_eq (npatterns, GET_MODE_NUNITS (GET_MODE (d->op0))))
> >> > + return false;
> >> > +
> >> > + for (int i = 0; i < npatterns; i++)
> >> > + if (!known_eq (d->perm[i], i))
> >> > + return false;
> >> > +
> >> > + aarch64_expand_sve_dupq (d->target, GET_MODE (d->target), d->op0);
> >> > + return true;
> >> > +}
> >> > +
> >> > /* Try to implement D using SVE SEL instruction. */
> >> >
> >> > static bool
> >> > @@ -24129,7 +24158,12 @@ aarch64_expand_vec_perm_const_1 (struct
> >> > expand_vec_perm_d *d)
> >> > else if (aarch64_evpc_reencode (d))
> >> > return true;
> >> > if (d->vec_flags == VEC_SVE_DATA)
> >> > - return aarch64_evpc_sve_tbl (d);
> >> > + {
> >> > + if (aarch64_evpc_sve_dup (d))
> >> > + return true;
> >> > + else if (aarch64_evpc_sve_tbl (d))
> >> > + return true;
> >> > + }
> >> > else if (d->vec_flags == VEC_ADVSIMD)
> >> > return aarch64_evpc_tbl (d);
> >> > }
> >> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463.c
> >> > b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463.c
> >> > new file mode 100644
> >> > index 00000000000..35100a9e01c
> >> > --- /dev/null
> >> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463.c
> >> > @@ -0,0 +1,17 @@
> >> > +/* { dg-do compile } */
> >> > +/* { dg-options "-O3" } */
> >> > +
> >> > +#include "arm_neon.h"
> >> > +#include "arm_sve.h"
> >> > +
> >> > +svint32_t f1 (int32x4_t x)
> >> > +{
> >> > + return svld1rq (svptrue_b8 (), &x[0]);
> >> > +}
> >> > +
> >> > +svint32_t f2 (int *x)
> >> > +{
> >> > + return svld1rq (svptrue_b8 (), x);
> >> > +}
> >> > +
> >> > +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q,
> >> > z[0-9]+\.q\[0\]} 2 { target aarch64_little_endian } } } */
> >
> > diff --git a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > index c24c0548724..8a2e5b886e4 100644
> > --- a/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > +++ b/gcc/config/aarch64/aarch64-sve-builtins-base.cc
> > @@ -44,6 +44,7 @@
> > #include "aarch64-sve-builtins-shapes.h"
> > #include "aarch64-sve-builtins-base.h"
> > #include "aarch64-sve-builtins-functions.h"
> > +#include "ssa.h"
> >
> > using namespace aarch64_sve;
> >
> > @@ -1207,6 +1208,59 @@ public:
> > insn_code icode = code_for_aarch64_sve_ld1rq (e.vector_mode (0));
> > return e.use_contiguous_load_insn (icode);
> > }
> > +
> > + gimple *
> > + fold (gimple_folder &f) const OVERRIDE
> > + {
> > + tree arg0 = gimple_call_arg (f.call, 0);
> > + tree arg1 = gimple_call_arg (f.call, 1);
> > +
> > + /* Transform:
> > + lhs = svld1rq ({-1, -1, ... }, arg1)
> > + into:
> > + tmp = mem_ref<vectype> [(int * {ref-all}) arg1]
> > + lhs = vec_perm_expr<tmp, tmp, {0, 1, 2, 3, ...}>.
> > + on little endian target.
> > + vectype is the corresponding ADVSIMD type. */
> > +
> > + if (!BYTES_BIG_ENDIAN
> > + && integer_all_onesp (arg0))
> > + {
> > + tree lhs = gimple_call_lhs (f.call);
> > + tree lhs_type = TREE_TYPE (lhs);
> > + tree eltype = TREE_TYPE (lhs_type);
> > + unsigned nunits = 128 / TREE_INT_CST_LOW (TYPE_SIZE (eltype));
> > + tree vectype = build_vector_type (eltype, nunits);
> > +
> > + tree elt_ptr_type
> > + = build_pointer_type_for_mode (eltype, VOIDmode, true);
> > + tree zero = build_zero_cst (elt_ptr_type);
> > +
> > + /* Use element type alignment. */
> > + tree access_type
> > + = build_aligned_type (vectype, TYPE_ALIGN (eltype));
> > +
> > + tree mem_ref_lhs = make_ssa_name_fn (cfun, access_type, 0);
> > + tree mem_ref_op = fold_build2 (MEM_REF, access_type, arg1, zero);
> > + gimple *mem_ref_stmt
> > + = gimple_build_assign (mem_ref_lhs, mem_ref_op);
> > + gsi_insert_before (f.gsi, mem_ref_stmt, GSI_SAME_STMT);
> > +
> > + int source_nelts = TYPE_VECTOR_SUBPARTS (access_type).to_constant ();
> > + vec_perm_builder sel (TYPE_VECTOR_SUBPARTS (lhs_type), source_nelts,
> > 1);
> > + for (int i = 0; i < source_nelts; i++)
> > + sel.quick_push (i);
> > +
> > + vec_perm_indices indices (sel, 1, source_nelts);
> > + gcc_checking_assert (can_vec_perm_const_p (TYPE_MODE (lhs_type),
> > + indices));
> > + tree mask = vec_perm_indices_to_tree (lhs_type, indices);
> > + return gimple_build_assign (lhs, VEC_PERM_EXPR,
> > + mem_ref_lhs, mem_ref_lhs, mask);
> > + }
> > +
> > + return NULL;
> > + }
> > };
> >
> > class svld1ro_impl : public load_replicate
> > diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
> > index f650abbc4ce..072ec9bd153 100644
> > --- a/gcc/config/aarch64/aarch64.cc
> > +++ b/gcc/config/aarch64/aarch64.cc
> > @@ -23969,6 +23969,35 @@ aarch64_evpc_sve_tbl (struct expand_vec_perm_d *d)
> > return true;
> > }
> >
> > +/* Try to implement D using SVE dup instruction. */
> > +
> > +static bool
> > +aarch64_evpc_sve_dup (struct expand_vec_perm_d *d)
> > +{
> > + if (BYTES_BIG_ENDIAN
> > + || d->perm.length ().is_constant ()
> > + || !d->one_vector_p
> > + || d->target == NULL
> > + || d->op0 == NULL
> > + || (aarch64_classify_vector_mode (GET_MODE (d->target)) &
> > VEC_ANY_SVE) == 0
>
> This check isn't necessary, since the caller has already checked that
> this is an SVE permute.
>
> > + || (aarch64_classify_vector_mode (GET_MODE (d->op0)) & VEC_ADVSIMD)
> > == 0)
> > + return false;
> > +
> > + int npatterns = d->perm.encoding ().npatterns ();
> > + if (!known_eq (npatterns, GET_MODE_NUNITS (GET_MODE (d->op0))))
> > + return false;
> > +
> > + for (int i = 0; i < npatterns; i++)
> > + if (!known_eq (d->perm[i], i))
> > + return false;
> > +
> > + if (d->testing_p)
> > + return true;
> > +
> > + aarch64_expand_sve_dupq (d->target, GET_MODE (d->target), d->op0);
> > + return true;
> > +}
> > +
> > /* Try to implement D using SVE SEL instruction. */
> >
> > static bool
> > @@ -24129,7 +24158,12 @@ aarch64_expand_vec_perm_const_1 (struct
> > expand_vec_perm_d *d)
> > else if (aarch64_evpc_reencode (d))
> > return true;
> > if (d->vec_flags == VEC_SVE_DATA)
> > - return aarch64_evpc_sve_tbl (d);
> > + {
> > + if (aarch64_evpc_sve_dup (d))
> > + return true;
> > + else if (aarch64_evpc_sve_tbl (d))
> > + return true;
> > + }
> > else if (d->vec_flags == VEC_ADVSIMD)
> > return aarch64_evpc_tbl (d);
> > }
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463-1.c
> > b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463-1.c
> > new file mode 100644
> > index 00000000000..5af3b6ed24c
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463-1.c
> > @@ -0,0 +1,23 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-O3" } */
> > +
> > +#include "arm_neon.h"
> > +#include "arm_sve.h"
> > +
> > +#define TEST(ret_type, param_type, suffix) \
> > +ret_type test_##suffix(param_type x) \
> > +{ \
> > + return svld1rq_##suffix (svptrue_b8 (), &x[0]); \
> > +}
> > +
> > +TEST(svint8_t, int8x16_t, s8)
> > +TEST(svint16_t, int16x8_t, s16)
> > +TEST(svint32_t, int32x4_t, s32)
> > +TEST(svint64_t, int64x2_t, s64)
> > +
> > +TEST(svuint8_t, uint8x16_t, u8)
> > +TEST(svuint16_t, uint16x8_t, u16)
> > +TEST(svuint32_t, uint32x4_t, u32)
> > +TEST(svuint64_t, uint64x2_t, u64)
> > +
> > +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, z[0-9]+\.q\[0\]}
> > 8 { target aarch64_little_endian } } } */
> > diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463-2.c
> > b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463-2.c
> > new file mode 100644
> > index 00000000000..17e78c57c1b
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general/pr96463-2.c
> > @@ -0,0 +1,23 @@
> > +/* { dg-do compile } */
> > +/* { dg-options "-O3" } */
> > +
> > +#include "arm_neon.h"
> > +#include "arm_sve.h"
> > +
> > +#define TEST(ret_type, param_type, suffix) \
> > +ret_type test_##suffix(param_type *x) \
> > +{ \
> > + return svld1rq_##suffix (svptrue_b8 (), &x[0]); \
> > +}
> > +
> > +TEST(svint8_t, int8_t, s8)
> > +TEST(svint16_t, int16_t, s16)
> > +TEST(svint32_t, int32_t, s32)
> > +TEST(svint64_t, int64_t, s64)
> > +
> > +TEST(svuint8_t, uint8_t, u8)
> > +TEST(svuint16_t, uint16_t, u16)
> > +TEST(svuint32_t, uint32_t, u32)
> > +TEST(svuint64_t, uint64_t, u64)
> > +
> > +/* { dg-final { scan-assembler-times {\tdup\tz[0-9]+\.q, z[0-9]+\.q\[0\]}
> > 8 { target aarch64_little_endian } } } */
>
> It would be good to check the float modes too.
>
> Thanks,
> Richard