https://gcc.gnu.org/bugzilla/show_bug.cgi?id=91103

--- Comment #6 from Hongtao.liu <crazylht at gmail dot com> ---
For elements located above 128bits, it seems always better(?) to use
valign{d,q}

diff --git a/origin.s b/after.s
index 9a7dfee..9a23f7e 100644
--- a/origin.s
+++ b/after.s
@@ -6,7 +6,7 @@
 foo_v8sf_4:
 .LFB0:
        .cfi_startproc
-       vextractf128    $0x1, %ymm0, %xmm0
+       valignd $4, %ymm0, %ymm0, %ymm0
        ret
        .cfi_endproc
 .LFE0:
@@ -17,8 +17,7 @@ foo_v8sf_4:
 foo_v8sf_7:
 .LFB1:
        .cfi_startproc
-       vextractf128    $0x1, %ymm0, %xmm0
-       vshufps $255, %xmm0, %xmm0, %xmm0
+       valignd $7, %ymm0, %ymm0, %ymm0
        ret
        .cfi_endproc
 .LFE1:
@@ -29,8 +28,8 @@ foo_v8sf_7:
 foo_v8si_4:
 .LFB2:
        .cfi_startproc
-       vextracti128    $0x1, %ymm0, %xmm0
-       vmovd   %xmm0, %eax
+       valignd $4, %ymm0, %ymm0, %ymm1
+       vmovd   %xmm1, %eax
        ret
        .cfi_endproc
 .LFE2:
@@ -41,8 +40,8 @@ foo_v8si_4:
 foo_v8si_7:
 .LFB3:
        .cfi_startproc
-       vextracti128    $0x1, %ymm0, %xmm0
-       vpextrd $3, %xmm0, %eax
+       valignd $7, %ymm0, %ymm0, %ymm1
+       vmovd   %xmm1, %eax
        ret
        .cfi_endproc
 .LFE3:
@@ -53,7 +52,7 @@ foo_v8si_7:
 foo_v16sf_8:
 .LFB4:
        .cfi_startproc
-       vextractf32x8   $0x1, %zmm0, %ymm0
+       valignd $8, %zmm0, %zmm0, %zmm0
        ret
        .cfi_endproc
 .LFE4:
@@ -64,9 +63,7 @@ foo_v16sf_8:
 foo_v16sf_15:
 .LFB5:
        .cfi_startproc
-       vextractf32x8   $0x1, %zmm0, %ymm0
-       vextractf128    $0x1, %ymm0, %xmm0
-       vshufps $255, %xmm0, %xmm0, %xmm0
+       valignd $15, %zmm0, %zmm0, %zmm0
        ret
        .cfi_endproc
 .LFE5:
@@ -77,8 +74,8 @@ foo_v16sf_15:
 foo_v16si_8:
 .LFB6:
        .cfi_startproc
-       vextracti32x8   $0x1, %zmm0, %ymm0
-       vmovd   %xmm0, %eax
+       valignd $8, %zmm0, %zmm0, %zmm1
+       vmovd   %xmm1, %eax
        ret
        .cfi_endproc
 .LFE6:
@@ -89,9 +86,8 @@ foo_v16si_8:
 foo_v16si_15:
 .LFB7:
        .cfi_startproc
-       vextracti32x8   $0x1, %zmm0, %ymm0
-       vextracti128    $0x1, %ymm0, %xmm0
-       vpextrd $3, %xmm0, %eax
+       valignd $15, %zmm0, %zmm0, %zmm1
+       vmovd   %xmm1, %eax
        ret
        .cfi_endproc
 .LFE7:
@@ -102,7 +98,7 @@ foo_v16si_15:
 foo_v4df_2:
 .LFB8:
        .cfi_startproc
-       vextractf64x2   $0x1, %ymm0, %xmm0
+       valignq $2, %ymm0, %ymm0, %ymm0
        ret
        .cfi_endproc
 .LFE8:
@@ -113,8 +109,7 @@ foo_v4df_2:
 foo_v4df_3:
 .LFB9:
        .cfi_startproc
-       vextractf64x2   $0x1, %ymm0, %xmm0
-       vunpckhpd       %xmm0, %xmm0, %xmm0
+       valignq $3, %ymm0, %ymm0, %ymm0
        ret
        .cfi_endproc
 .LFE9:
@@ -125,8 +120,8 @@ foo_v4df_3:
 foo_v4di_2:
 .LFB10:
        .cfi_startproc
-       vextracti64x2   $0x1, %ymm0, %xmm0
-       vmovq   %xmm0, %rax
+       valignq $2, %ymm0, %ymm0, %ymm1
+       vmovq   %xmm1, %rax
        ret
        .cfi_endproc
 .LFE10:
@@ -137,8 +132,8 @@ foo_v4di_2:
 foo_v4di_3:
 .LFB11:
        .cfi_startproc
-       vextracti64x2   $0x1, %ymm0, %xmm0
-       vpextrq $1, %xmm0, %rax
+       valignq $3, %ymm0, %ymm0, %ymm1
+       vmovq   %xmm1, %rax
        ret
        .cfi_endproc
 .LFE11:
@@ -149,7 +144,7 @@ foo_v4di_3:
 foo_v8df_4:
 .LFB12:
        .cfi_startproc
-       vextractf64x4   $0x1, %zmm0, %ymm0
+       valignq $4, %zmm0, %zmm0, %zmm0
        ret
        .cfi_endproc
 .LFE12:
@@ -160,9 +155,7 @@ foo_v8df_4:
 foo_v8df_7:
 .LFB13:
        .cfi_startproc
-       vextractf64x4   $0x1, %zmm0, %ymm0
-       vextractf64x2   $0x1, %ymm0, %xmm0
-       vunpckhpd       %xmm0, %xmm0, %xmm0
+       valignq $7, %zmm0, %zmm0, %zmm0
        ret
        .cfi_endproc
 .LFE13:
@@ -173,8 +166,8 @@ foo_v8df_7:
 foo_v8di_4:
 .LFB14:
        .cfi_startproc
-       vextracti64x4   $0x1, %zmm0, %ymm0
-       vmovq   %xmm0, %rax
+       valignq $4, %zmm0, %zmm0, %zmm1
+       vmovq   %xmm1, %rax
        ret
        .cfi_endproc
 .LFE14:
@@ -185,12 +178,11 @@ foo_v8di_4:
 foo_v8di_7:
 .LFB15:
        .cfi_startproc
-       vextracti64x4   $0x1, %zmm0, %ymm0
-       vextracti64x2   $0x1, %ymm0, %xmm0
-       vpextrq $1, %xmm0, %rax
+       valignq $7, %zmm0, %zmm0, %zmm1
+       vmovq   %xmm1, %rax
        ret
        .cfi_endproc
 .LFE15:
        .size   foo_v8di_7, .-foo_v8di_7
-       .ident  "GCC: (GNU) 12.0.0 20210907 (experimental)"
+       .ident  "GCC: (GNU) 12.0.0 20210908 (experimental)"
        .section        .note.GNU-stack,"",@progbits

Reply via email to