"Andre Vieira (lists)" <andre.simoesdiasvie...@arm.com> writes:
> This patch fixes some testisms introduced by:
>
> commit 5aa3fec38cc6f52285168b161bab1a869d864b44
> Author: Andre Vieira <andre.simoesdiasvie...@arm.com>
> Date:   Wed Apr 10 16:29:46 2024 +0100
>
>      aarch64: Add support for _BitInt
>
> The testcases were relying on an unnecessary sign-extend that is no longer
> generated.
>
> The tested version was just slightly behind top of trunk when the patch 
> was committed, and the codegen had changed, for the better, by then.
>
> OK for trunk? (I am away tomorrow, so if you want this in before the 
> weekend feel free to commit it on my behalf, if approved ofc...)
>
>
> gcc/testsuite/ChangeLog:
>
>       * gcc.target/aarch64/bitfield-bitint-abi-align16.c (g1, g8, g16, g1p, 
> g8p,
>       g16p): Remove unnecessary sbfx.
>       * gcc.target/aarch64/bitfield-bitint-abi-align8.c (g1, g8, g16, g1p, 
> g8p,
>       g16p): Likewise.

LGTM, thanks.  Pushed to trunk.

Richard

>
>
> diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c 
> b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
> index 
> 3f292a45f955d35b802a0bd789cd39d5fa7b5860..4a228b0a1ce696dc80e32305162d58f01d44051d
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
> +++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align16.c
> @@ -55,9 +55,8 @@
>  ** g1:
>  **   mov     (x[0-9]+), x0
>  **   mov     w0, w1
> -**   sbfx    (x[0-9]+), \1, 0, 63
> -**   and     x4, \2, 9223372036854775807
> -**   and     x2, \2, 1
> +**   and     x4, \1, 9223372036854775807
> +**   and     x2, \1, 1
>  **   mov     x3, 0
>  **   b       f1
>  */
> @@ -66,9 +65,8 @@
>  ** g8:
>  **   mov     (x[0-9]+), x0
>  **   mov     w0, w1
> -**   sbfx    (x[0-9]+), \1, 0, 63
> -**   and     x4, \2, 9223372036854775807
> -**   and     x2, \2, 1
> +**   and     x4, \1, 9223372036854775807
> +**   and     x2, \1, 1
>  **   mov     x3, 0
>  **   b       f8
>  */
> @@ -76,9 +74,8 @@
>  ** g16:
>  **   mov     (x[0-9]+), x0
>  **   mov     w0, w1
> -**   sbfx    (x[0-9]+), \1, 0, 63
> -**   and     x4, \2, 9223372036854775807
> -**   and     x2, \2, 1
> +**   and     x4, \1, 9223372036854775807
> +**   and     x2, \1, 1
>  **   mov     x3, 0
>  **   b       f16
>  */
> @@ -107,9 +104,8 @@
>  /*
>  ** g1p:
>  **   mov     (w[0-9]+), w1
> -**   sbfx    (x[0-9]+), x0, 0, 63
> -**   and     x3, \2, 9223372036854775807
> -**   and     x1, \2, 1
> +**   and     x3, x0, 9223372036854775807
> +**   and     x1, x0, 1
>  **   mov     x2, 0
>  **   mov     w0, \1
>  **   b       f1p
> @@ -117,9 +113,8 @@
>  /*
>  ** g8p:
>  **   mov     (w[0-9]+), w1
> -**   sbfx    (x[0-9]+), x0, 0, 63
> -**   and     x3, \2, 9223372036854775807
> -**   and     x1, \2, 1
> +**   and     x3, x0, 9223372036854775807
> +**   and     x1, x0, 1
>  **   mov     x2, 0
>  **   mov     w0, \1
>  **   b       f8p
> @@ -128,9 +123,8 @@
>  ** g16p:
>  **   mov     (x[0-9]+), x0
>  **   mov     w0, w1
> -**   sbfx    (x[0-9]+), \1, 0, 63
> -**   and     x4, \2, 9223372036854775807
> -**   and     x2, \2, 1
> +**   and     x4, \1, 9223372036854775807
> +**   and     x2, \1, 1
>  **   mov     x3, 0
>  **   b       f16p
>  */
> diff --git a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c 
> b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
> index 
> da3c23550bae6734f69e2baf0e8db741fb65cfda..e7f773640f04f56646e5e1a5fb91280ea7e4db98
>  100644
> --- a/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
> +++ b/gcc/testsuite/gcc.target/aarch64/bitfield-bitint-abi-align8.c
> @@ -54,9 +54,8 @@
>  /*
>  ** g1:
>  **   mov     (w[0-9]+), w1
> -**   sbfx    (x[0-9]+), x0, 0, 63
> -**   and     x3, \2, 9223372036854775807
> -**   and     x1, \2, 1
> +**   and     x3, x0, 9223372036854775807
> +**   and     x1, x0, 1
>  **   mov     x2, 0
>  **   mov     w0, \1
>  **   b       f1
> @@ -65,9 +64,8 @@
>  /*
>  ** g8:
>  **   mov     (w[0-9]+), w1
> -**   sbfx    (x[0-9]+), x0, 0, 63
> -**   and     x3, \2, 9223372036854775807
> -**   and     x1, \2, 1
> +**   and     x3, x0, 9223372036854775807
> +**   and     x1, x0, 1
>  **   mov     x2, 0
>  **   mov     w0, \1
>  **   b       f8
> @@ -76,9 +74,8 @@
>  ** g16:
>  **   mov     (x[0-9]+), x0
>  **   mov     w0, w1
> -**   sbfx    (x[0-9]+), \1, 0, 63
> -**   and     x4, \2, 9223372036854775807
> -**   and     x2, \2, 1
> +**   and     x4, \1, 9223372036854775807
> +**   and     x2, \1, 1
>  **   mov     x3, 0
>  **   b       f16
>  */
> @@ -107,9 +104,8 @@
>  /*
>  ** g1p:
>  **   mov     (w[0-9]+), w1
> -**   sbfx    (x[0-9]+), x0, 0, 63
> -**   and     x3, \2, 9223372036854775807
> -**   and     x1, \2, 1
> +**   and     x3, x0, 9223372036854775807
> +**   and     x1, x0, 1
>  **   mov     x2, 0
>  **   mov     w0, \1
>  **   b       f1p
> @@ -117,9 +113,8 @@
>  /*
>  ** g8p:
>  **   mov     (w[0-9]+), w1
> -**   sbfx    (x[0-9]+), x0, 0, 63
> -**   and     x3, \2, 9223372036854775807
> -**   and     x1, \2, 1
> +**   and     x3, x0, 9223372036854775807
> +**   and     x1, x0, 1
>  **   mov     x2, 0
>  **   mov     w0, \1
>  **   b       f8p
> @@ -128,9 +123,8 @@
>  ** g16p:
>  **   mov     (x[0-9]+), x0
>  **   mov     w0, w1
> -**   sbfx    (x[0-9]+), \1, 0, 63
> -**   and     x4, \2, 9223372036854775807
> -**   and     x2, \2, 1
> +**   and     x4, \1, 9223372036854775807
> +**   and     x2, \1, 1
>  **   mov     x3, 0
>  **   b       f16p
>  */

Reply via email to