https://gcc.gnu.org/g:37c21d4c6ad0afe2aacdd6384b9efa96f5754169

commit r15-3224-g37c21d4c6ad0afe2aacdd6384b9efa96f5754169
Author: Christophe Lyon <christophe.l...@linaro.org>
Date:   Wed Aug 21 13:58:08 2024 +0000

    arm: Always use vmov.f64 instead of vmov.f32 with MVE
    
    With MVE, vmov.f64 is always supported (no need for +fp.dp extension).
    
    This patch updates two patterns:
    - in movdi_vfp, we incorrectly checked
      TARGET_VFP_SINGLE || TARGET_HAVE_MVE instead of
      TARGET_VFP_SINGLE && !TARGET_HAVE_MVE, and didn't take into account
      these two possibilities when computing the length attribute.
    
    - in thumb2_movdf_vfp, we checked only TARGET_VFP_SINGLE.
    
    No need to update movdf_vfp, since it is enabled only for TARGET_ARM
    (which is not the case when MVE is enabled).
    
    The patch also updates gcc.target/arm/armv8_1m-fp64-move-1.c, to
    accept only vmov.f64 instead of vmov.f32.
    
    Tested on arm-none-eabi with:
    qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto
    
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve
    
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve.fp
    
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve.fp+fp.dp
    
    2024-08-21  Christophe Lyon  <christophe.l...@linaro.org>
    
            gcc/
            * config/arm/vfp.md (movdi_vfp, thumb2_movdf_vfp): Handle MVE
            case.
    
            gcc/testsuite/
            * gcc.target/arm/armv8_1m-fp64-move-1.c: Update expected code.

Diff:
---
 gcc/config/arm/vfp.md                               | 8 ++++----
 gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c | 8 +-------
 2 files changed, 5 insertions(+), 11 deletions(-)

diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index 773f55664a95..3212d9c7aa17 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -367,7 +367,7 @@
     case 8:
       return \"vmov%?\\t%Q0, %R0, %P1\\t%@ int\";
     case 9:
-      if (TARGET_VFP_SINGLE || TARGET_HAVE_MVE)
+      if (TARGET_VFP_SINGLE && !TARGET_HAVE_MVE)
        return \"vmov%?.f32\\t%0, %1\\t%@ int\;vmov%?.f32\\t%p0, %p1\\t%@ int\";
       else
        return \"vmov%?.f64\\t%P0, %P1\\t%@ int\";
@@ -385,7 +385,7 @@
                               (symbol_ref "arm_count_output_move_double_insns 
(operands) * 4")
                               (eq_attr "alternative" "9")
                                (if_then_else
-                                 (match_test "TARGET_VFP_SINGLE")
+                                 (match_test "TARGET_VFP_SINGLE && 
!TARGET_HAVE_MVE")
                                  (const_int 8)
                                  (const_int 4))]
                               (const_int 4)))
@@ -744,7 +744,7 @@
       case 6: case 7: case 9:
        return output_move_double (operands, true, NULL);
       case 8:
-       if (TARGET_VFP_SINGLE)
+       if (TARGET_VFP_SINGLE && !TARGET_HAVE_MVE)
          return \"vmov%?.f32\\t%0, %1\;vmov%?.f32\\t%p0, %p1\";
        else
          return \"vmov%?.f64\\t%P0, %P1\";
@@ -758,7 +758,7 @@
    (set (attr "length") (cond [(eq_attr "alternative" "6,7,9") (const_int 8)
                               (eq_attr "alternative" "8")
                                (if_then_else
-                                (match_test "TARGET_VFP_SINGLE")
+                                (match_test "TARGET_VFP_SINGLE && 
!TARGET_HAVE_MVE")
                                 (const_int 8)
                                 (const_int 4))]
                              (const_int 4)))
diff --git a/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c 
b/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c
index 39e8c4063247..b7e82f60149c 100644
--- a/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c
+++ b/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c
@@ -33,13 +33,7 @@ w_r ()
 
 /*
 ** w_w:
-** (
-**     vmov.f32        s2, s0
-**     vmov.f32        s3, s1
-** |
-**     vmov.f32        s3, s1
-**     vmov.f32        s2, s0
-** )
+**     vmov.f64        d1, d0
 **     bx      lr
 */
 void

Reply via email to