With MVE, vmov.f64 is always supported (no need for +fp.dp extension).

This patch updates two patterns:
- in movdi_vfp, we incorrectly checked
  TARGET_VFP_SINGLE || TARGET_HAVE_MVE instead of
  TARGET_VFP_SINGLE && !TARGET_HAVE_MVE, and didn't take into account
  these two possibilities when computing the length attribute.

- in thumb2_movdf_vfp, we checked only TARGET_VFP_SINGLE.

No need to update movdf_vfp, since it is enabled only for TARGET_ARM
(which is not the case when MVE is enabled).

The patch also updates gcc.target/arm/armv8_1m-fp64-move-1.c, to
accept only vmov.f64 instead of vmov.f32.

Tested on arm-none-eabi with:
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve.fp
qemu/-mthumb/-mtune=cortex-m55/-mfloat-abi=hard/-mfpu=auto/-march=armv8.1-m.main+mve.fp+fp.dp

2024-08-21  Christophe Lyon  <christophe.l...@linaro.org>

        gcc/
        * config/arm/vfp.md (movdi_vfp, thumb2_movdf_vfp): Handle MVE
        case.

        gcc/testsuite/
        * gcc.target/arm/armv8_1m-fp64-move-1.c: Update expected code.
---
 gcc/config/arm/vfp.md                               | 8 ++++----
 gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c | 8 +-------
 2 files changed, 5 insertions(+), 11 deletions(-)

diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index 773f55664a9..3212d9c7aa1 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -367,7 +367,7 @@
     case 8:
       return \"vmov%?\\t%Q0, %R0, %P1\\t%@ int\";
     case 9:
-      if (TARGET_VFP_SINGLE || TARGET_HAVE_MVE)
+      if (TARGET_VFP_SINGLE && !TARGET_HAVE_MVE)
        return \"vmov%?.f32\\t%0, %1\\t%@ int\;vmov%?.f32\\t%p0, %p1\\t%@ int\";
       else
        return \"vmov%?.f64\\t%P0, %P1\\t%@ int\";
@@ -385,7 +385,7 @@
                               (symbol_ref "arm_count_output_move_double_insns 
(operands) * 4")
                               (eq_attr "alternative" "9")
                                (if_then_else
-                                 (match_test "TARGET_VFP_SINGLE")
+                                 (match_test "TARGET_VFP_SINGLE && 
!TARGET_HAVE_MVE")
                                  (const_int 8)
                                  (const_int 4))]
                               (const_int 4)))
@@ -744,7 +744,7 @@
       case 6: case 7: case 9:
        return output_move_double (operands, true, NULL);
       case 8:
-       if (TARGET_VFP_SINGLE)
+       if (TARGET_VFP_SINGLE && !TARGET_HAVE_MVE)
          return \"vmov%?.f32\\t%0, %1\;vmov%?.f32\\t%p0, %p1\";
        else
          return \"vmov%?.f64\\t%P0, %P1\";
@@ -758,7 +758,7 @@
    (set (attr "length") (cond [(eq_attr "alternative" "6,7,9") (const_int 8)
                               (eq_attr "alternative" "8")
                                (if_then_else
-                                (match_test "TARGET_VFP_SINGLE")
+                                (match_test "TARGET_VFP_SINGLE && 
!TARGET_HAVE_MVE")
                                 (const_int 8)
                                 (const_int 4))]
                              (const_int 4)))
diff --git a/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c 
b/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c
index d236f0826c3..4a3cf0a5afb 100644
--- a/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c
+++ b/gcc/testsuite/gcc.target/arm/armv8_1m-fp64-move-1.c
@@ -33,13 +33,7 @@ w_r ()
 
 /*
 ** w_w:
-** (
-**     vmov.f32        s2, s0
-**     vmov.f32        s3, s1
-** |
-**     vmov.f32        s3, s1
-**     vmov.f32        s2, s0
-** )
+**     vmov.f64        d1, d0
 **     bx      lr
 */
 void
-- 
2.34.1

Reply via email to