https://gcc.gnu.org/g:39263ed2d39ac1cebde59bc5e72ddcad5dc7a1ec

commit r15-906-g39263ed2d39ac1cebde59bc5e72ddcad5dc7a1ec
Author: Richard Sandiford <richard.sandif...@arm.com>
Date:   Wed May 29 16:43:33 2024 +0100

    aarch64: Split aarch64_combinev16qi before RA [PR115258]
    
    Two-vector TBL instructions are fed by an aarch64_combinev16qi, whose
    purpose is to put the two input data vectors into consecutive registers.
    This aarch64_combinev16qi was then split after reload into individual
    moves (from the first input to the first half of the output, and from
    the second input to the second half of the output).
    
    In the worst case, the RA might allocate things so that the destination
    of the aarch64_combinev16qi is the second input followed by the first
    input.  In that case, the split form of aarch64_combinev16qi uses three
    eors to swap the registers around.
    
    This PR is about a test where this worst case occurred.  And given the
    insn description, that allocation doesn't semm unreasonable.
    
    early-ra should (hopefully) mean that we're now better at allocating
    subregs of vector registers.  The upcoming RA subreg patches should
    improve things further.  The best fix for the PR therefore seems
    to be to split the combination before RA, so that the RA can see
    the underlying moves.
    
    Perhaps it even makes sense to do this at expand time, avoiding the need
    for aarch64_combinev16qi entirely.  That deserves more experimentation
    though.
    
    gcc/
            PR target/115258
            * config/aarch64/aarch64-simd.md (aarch64_combinev16qi): Allow
            the split before reload.
            * config/aarch64/aarch64.cc (aarch64_split_combinev16qi): Generalize
            into a form that handles pseudo registers.
    
    gcc/testsuite/
            PR target/115258
            * gcc.target/aarch64/pr115258.c: New test.

Diff:
---
 gcc/config/aarch64/aarch64-simd.md          |  2 +-
 gcc/config/aarch64/aarch64.cc               | 29 ++++++++++++++---------------
 gcc/testsuite/gcc.target/aarch64/pr115258.c | 19 +++++++++++++++++++
 3 files changed, 34 insertions(+), 16 deletions(-)

diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index c311888e4bd..868f4486218 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -8474,7 +8474,7 @@
                        UNSPEC_CONCAT))]
   "TARGET_SIMD"
   "#"
-  "&& reload_completed"
+  "&& 1"
   [(const_int 0)]
 {
   aarch64_split_combinev16qi (operands);
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index ee12d8897a8..13191ec8e34 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -25333,27 +25333,26 @@ aarch64_output_sve_ptrues (rtx const_unspec)
 void
 aarch64_split_combinev16qi (rtx operands[3])
 {
-  unsigned int dest = REGNO (operands[0]);
-  unsigned int src1 = REGNO (operands[1]);
-  unsigned int src2 = REGNO (operands[2]);
   machine_mode halfmode = GET_MODE (operands[1]);
-  unsigned int halfregs = REG_NREGS (operands[1]);
-  rtx destlo, desthi;
 
   gcc_assert (halfmode == V16QImode);
 
-  if (src1 == dest && src2 == dest + halfregs)
+  rtx destlo = simplify_gen_subreg (halfmode, operands[0],
+                                   GET_MODE (operands[0]), 0);
+  rtx desthi = simplify_gen_subreg (halfmode, operands[0],
+                                   GET_MODE (operands[0]),
+                                   GET_MODE_SIZE (halfmode));
+
+  bool skiplo = rtx_equal_p (destlo, operands[1]);
+  bool skiphi = rtx_equal_p (desthi, operands[2]);
+
+  if (skiplo && skiphi)
     {
       /* No-op move.  Can't split to nothing; emit something.  */
       emit_note (NOTE_INSN_DELETED);
       return;
     }
 
-  /* Preserve register attributes for variable tracking.  */
-  destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
-  desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
-                              GET_MODE_SIZE (halfmode));
-
   /* Special case of reversed high/low parts.  */
   if (reg_overlap_mentioned_p (operands[2], destlo)
       && reg_overlap_mentioned_p (operands[1], desthi))
@@ -25366,16 +25365,16 @@ aarch64_split_combinev16qi (rtx operands[3])
     {
       /* Try to avoid unnecessary moves if part of the result
         is in the right place already.  */
-      if (src1 != dest)
+      if (!skiplo)
        emit_move_insn (destlo, operands[1]);
-      if (src2 != dest + halfregs)
+      if (!skiphi)
        emit_move_insn (desthi, operands[2]);
     }
   else
     {
-      if (src2 != dest + halfregs)
+      if (!skiphi)
        emit_move_insn (desthi, operands[2]);
-      if (src1 != dest)
+      if (!skiplo)
        emit_move_insn (destlo, operands[1]);
     }
 }
diff --git a/gcc/testsuite/gcc.target/aarch64/pr115258.c 
b/gcc/testsuite/gcc.target/aarch64/pr115258.c
new file mode 100644
index 00000000000..9a489d4604c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pr115258.c
@@ -0,0 +1,19 @@
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+/*
+** fun:
+**     (ldr|adrp)      [^\n]+
+**     (ldr|adrp)      [^\n]+
+**     (ldr|adrp)      [^\n]+
+**     (ldr|adrp)      [^\n]+
+**     tbl     v[0-9]+.16b, {v[0-9]+.16b - v[0-9]+.16b}, v[0-9]+.16b
+**     str     [^\n]+
+**     ret
+*/
+typedef int veci __attribute__ ((vector_size (4 * sizeof (int))));
+void fun (veci *a, veci *b, veci *c) {
+  *c = __builtin_shufflevector (*a, *b, 0, 5, 2, 7);
+}
+
+/* { dg-final { scan-assembler-not {\teor\t} } } */

Reply via email to