https://gcc.gnu.org/g:f407f0cf841fedde01f6485875ae14d5281baa63

commit r16-7461-gf407f0cf841fedde01f6485875ae14d5281baa63
Author: Chris Bazley <[email protected]>
Date:   Wed Feb 11 15:53:22 2026 +0000

    AArch64: Add tests to ensure rev is produced
    
    If the compiler mistakenly vectorizes byte order reversal
    then the resultant code is inevitably less efficient than a
    rev instruction.  This kind of error will become more likely if
    SVE predication is ever used to vectorize smaller groups
    than could be vectorized using ASIMD instructions.  Add tests to
    guard against future regressions.
    
    gcc/testsuite/ChangeLog:
    
            * gcc.target/aarch64/rev_32_1.c: New test.
            * gcc.target/aarch64/rev_32_2.c: New test.
            * gcc.target/aarch64/rev_32_3.c: New test

Diff:
---
 gcc/testsuite/gcc.target/aarch64/rev_32_1.c | 25 +++++++++++++++++++++++++
 gcc/testsuite/gcc.target/aarch64/rev_32_2.c | 29 +++++++++++++++++++++++++++++
 gcc/testsuite/gcc.target/aarch64/rev_32_3.c | 26 ++++++++++++++++++++++++++
 3 files changed, 80 insertions(+)

diff --git a/gcc/testsuite/gcc.target/aarch64/rev_32_1.c 
b/gcc/testsuite/gcc.target/aarch64/rev_32_1.c
new file mode 100644
index 000000000000..31765f0d05ef
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/rev_32_1.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/* If the compiler mistakenly vectorizes byte order reversal
+ * then the resultant code is inevitably less efficient than a
+ * rev instruction.  Guard against such regressions.
+ */
+typedef unsigned int __u32;
+typedef unsigned char __u8;
+
+/*
+** rev:
+**     rev     w1, w1
+**     str     w1, \[x0\]
+**     ret
+*/
+void
+rev (__u8 (*dst)[4], __u32 src)
+{
+  (*dst)[0] = src >> 24;
+  (*dst)[1] = src >> 16;
+  (*dst)[2] = src >> 8;
+  (*dst)[3] = src >> 0;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/rev_32_2.c 
b/gcc/testsuite/gcc.target/aarch64/rev_32_2.c
new file mode 100644
index 000000000000..08bfc2aac83d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/rev_32_2.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/* If the compiler mistakenly vectorizes byte order reversal
+ * then the resultant code is inevitably less efficient than a
+ * rev instruction.  Guard against such regressions.
+ */
+typedef unsigned int __u32;
+typedef unsigned char __u8;
+
+/*
+** rev2:
+**     ldr     w0, \[x0\]
+**     rev     w0, w0
+**     ret
+*/
+__u32
+rev2 (const __u8 (*src)[4])
+{
+  __u32 dst = 0;
+
+  dst |= (__u32) (*src)[3] << 0;
+  dst |= (__u32) (*src)[2] << 8;
+  dst |= (__u32) (*src)[1] << 16;
+  dst |= (__u32) (*src)[0] << 24;
+
+  return dst;
+}
diff --git a/gcc/testsuite/gcc.target/aarch64/rev_32_3.c 
b/gcc/testsuite/gcc.target/aarch64/rev_32_3.c
new file mode 100644
index 000000000000..d80b1f02d8bb
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/rev_32_3.c
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" } } */
+
+/* If the compiler mistakenly vectorizes byte order reversal
+ * then the resultant code is inevitably less efficient than a
+ * rev instruction.  Guard against such regressions.
+ */
+typedef unsigned char __u8;
+
+/*
+** rev3:
+**     ldr     w1, \[x1\]
+**     rev     w1, w1
+**     str     w1, \[x0\]
+**     ret
+*/
+void
+rev3 (unsigned char (*__restrict dst)[4],
+      const unsigned char (*__restrict src)[4])
+{
+  (*dst)[0] = (*src)[3];
+  (*dst)[1] = (*src)[2];
+  (*dst)[2] = (*src)[1];
+  (*dst)[3] = (*src)[0];
+}

Reply via email to