On 5/2/23 18:19, Kyrylo Tkachov wrote:


-----Original Message-----
From: Christophe Lyon <christophe.l...@arm.com>
Sent: Tuesday, April 18, 2023 2:46 PM
To: gcc-patches@gcc.gnu.org; Kyrylo Tkachov <kyrylo.tkac...@arm.com>;
Richard Earnshaw <richard.earns...@arm.com>; Richard Sandiford
<richard.sandif...@arm.com>
Cc: Christophe Lyon <christophe.l...@arm.com>
Subject: [PATCH 07/22] arm: [MVE intrinsics] factorize vadd vsubq vmulq

In order to avoid using a huge switch when generating all the
intrinsics (e.g. mve_vaddq_n_sv4si, ...), we want to generate a single
function taking the builtin code as parameter (e.g. mve_q_n (VADDQ_S,
....)
This is achieved by using the new mve_insn iterator.

Having done that, it becomes easier to share similar patterns, to
avoid useless/error-prone code duplication.

Nice!
Ok but...


2022-09-08  Christophe Lyon  <christophe.l...@arm.com>

gcc/ChangeLog:

        * config/arm/iterators.md (MVE_INT_BINARY_RTX,
MVE_INT_M_BINARY)
        (MVE_INT_M_N_BINARY, MVE_INT_N_BINARY, MVE_FP_M_BINARY)
        (MVE_FP_M_N_BINARY, MVE_FP_N_BINARY, mve_addsubmul,
mve_insn): New
        iterators.
        * config/arm/mve.md
        (mve_vsubq_n_f<mode>, mve_vaddq_n_f<mode>,
mve_vmulq_n_f<mode>):
        Factorize into ...
        (@mve_<mve_insn>q_n_f<mode>): ... this.
        (mve_vaddq_n_<supf><mode>, mve_vmulq_n_<supf><mode>)
        (mve_vsubq_n_<supf><mode>): Factorize into ...
        (@mve_<mve_insn>q_n_<supf><mode>): ... this.
        (mve_vaddq<mode>, mve_vmulq<mode>, mve_vsubq<mode>):
Factorize
        into ...
        (mve_<mve_addsubmul>q<mode>): ... this.
        (mve_vaddq_f<mode>, mve_vmulq_f<mode>,
mve_vsubq_f<mode>):
        Factorize into ...
        (mve_<mve_addsubmul>q_f<mode>): ... this.
        (mve_vaddq_m_<supf><mode>, mve_vmulq_m_<supf><mode>)
        (mve_vsubq_m_<supf><mode>): Factorize into ...
        (@mve_<mve_insn>q_m_<supf><mode>): ... this,
        (mve_vaddq_m_n_<supf><mode>,
mve_vmulq_m_n_<supf><mode>)
        (mve_vsubq_m_n_<supf><mode>): Factorize into ...
        (@mve_<mve_insn>q_m_n_<supf><mode>): ... this.
        (mve_vaddq_m_f<mode>, mve_vmulq_m_f<mode>,
mve_vsubq_m_f<mode>):
        Factorize into ...
        (@mve_<mve_insn>q_m_f<mode>): ... this.
        (mve_vaddq_m_n_f<mode>, mve_vmulq_m_n_f<mode>)
        (mve_vsubq_m_n_f<mode>): Factorize into ...
        (@mve_<mve_insn>q_m_n_f<mode>): ... this.
---
  gcc/config/arm/iterators.md |  57 +++++++
  gcc/config/arm/mve.md       | 317 +++++-------------------------------
  2 files changed, 99 insertions(+), 275 deletions(-)

diff --git a/gcc/config/arm/iterators.md b/gcc/config/arm/iterators.md
index 39895ad62aa..d3bef594775 100644
--- a/gcc/config/arm/iterators.md
+++ b/gcc/config/arm/iterators.md
@@ -330,6 +330,63 @@ (define_code_iterator FCVT [unsigned_float float])
  ;; Saturating addition, subtraction
  (define_code_iterator SSPLUSMINUS [ss_plus ss_minus])

+;; MVE integer binary operations.
+(define_code_iterator MVE_INT_BINARY_RTX [plus minus mult])
+
+(define_int_iterator MVE_INT_M_BINARY   [
+                    VADDQ_M_S VADDQ_M_U
+                    VMULQ_M_S VMULQ_M_U
+                    VSUBQ_M_S VSUBQ_M_U
+                    ])
+
+(define_int_iterator MVE_INT_M_N_BINARY [
+                    VADDQ_M_N_S VADDQ_M_N_U
+                    VMULQ_M_N_S VMULQ_M_N_U
+                    VSUBQ_M_N_S VSUBQ_M_N_U
+                    ])
+
+(define_int_iterator MVE_INT_N_BINARY   [
+                    VADDQ_N_S VADDQ_N_U
+                    VMULQ_N_S VMULQ_N_U
+                    VSUBQ_N_S VSUBQ_N_U
+                    ])
+
+(define_int_iterator MVE_FP_M_BINARY   [
+                    VADDQ_M_F
+                    VMULQ_M_F
+                    VSUBQ_M_F
+                    ])
+
+(define_int_iterator MVE_FP_M_N_BINARY [
+                    VADDQ_M_N_F
+                    VMULQ_M_N_F
+                    VSUBQ_M_N_F
+                    ])
+
+(define_int_iterator MVE_FP_N_BINARY   [
+                    VADDQ_N_F
+                    VMULQ_N_F
+                    VSUBQ_N_F
+                    ])
+
+(define_code_attr mve_addsubmul [
+                (minus "vsub")
+                (mult "vmul")
+                (plus "vadd")
+                ])
+
+(define_int_attr mve_insn [
+                (VADDQ_M_N_S "vadd") (VADDQ_M_N_U "vadd")
(VADDQ_M_N_F "vadd")
+                (VADDQ_M_S "vadd") (VADDQ_M_U "vadd") (VADDQ_M_F
"vadd")
+                (VADDQ_N_S "vadd") (VADDQ_N_U "vadd") (VADDQ_N_F
"vadd")
+                (VMULQ_M_N_S "vmul") (VMULQ_M_N_U "vmul")
(VMULQ_M_N_F "vmul")
+                (VMULQ_M_S "vmul") (VMULQ_M_U "vmul") (VMULQ_M_F
"vmul")
+                (VMULQ_N_S "vmul") (VMULQ_N_U "vmul") (VMULQ_N_F
"vmul")
+                (VSUBQ_M_N_S "vsub") (VSUBQ_M_N_U "vsub")
(VSUBQ_M_N_F "vsub")
+                (VSUBQ_M_S "vsub") (VSUBQ_M_U "vsub") (VSUBQ_M_F
"vsub")
+                (VSUBQ_N_S "vsub") (VSUBQ_N_U "vsub") (VSUBQ_N_F
"vsub")
+                ])
+
  ;; plus and minus are the only SHIFTABLE_OPS for which Thumb2 allows
  ;; a stack pointer operand.  The minus operation is a candidate for an rsub
  ;; and hence only plus is supported.
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index ab688396f97..5167fbc6add 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -668,21 +668,6 @@ (define_insn "mve_vpnotv16bi"
    [(set_attr "type" "mve_move")
  ])

-;;
-;; [vsubq_n_f])
-;;
-(define_insn "mve_vsubq_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand"
"w")
-                      (match_operand:<V_elem> 2 "s_register_operand" "r")]
-        VSUBQ_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vsub.f<V_sz_elem>\t%q0, %q1, %2"
-  [(set_attr "type" "mve_move")
-])
-
  ;;
  ;; [vbrsrq_n_f])
  ;;
@@ -871,16 +856,18 @@ (define_insn "mve_vabdq_<supf><mode>"

  ;;
  ;; [vaddq_n_s, vaddq_n_u])
+;; [vsubq_n_s, vsubq_n_u])
+;; [vmulq_n_s, vmulq_n_u])
  ;;

... This trailing ')' is a pre-existing copy-pasto I think. Let's remove it.

Yes, as you can see (almost?) everywhere in mve.md, such patterns have this '])' ending. (Indeed, I just moved those two lines from the previous patterns)

Christophe



Thanks,
Kyrill

-(define_insn "mve_vaddq_n_<supf><mode>"
+(define_insn "@mve_<mve_insn>q_n_<supf><mode>"
    [
     (set (match_operand:MVE_2 0 "s_register_operand" "=w")
        (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
"w")
                       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-        VADDQ_N))
+        MVE_INT_N_BINARY))
    ]
    "TARGET_HAVE_MVE"
-  "vadd.i%#<V_sz_elem>\t%q0, %q1, %2"
+  "<mve_insn>.i%#<V_sz_elem>\t%q0, %q1, %2"
    [(set_attr "type" "mve_move")
  ])

@@ -1362,26 +1349,13 @@ (define_insn "mve_vmulltq_int_<supf><mode>"
  ])

  ;;
-;; [vmulq_n_u, vmulq_n_s])
-;;
-(define_insn "mve_vmulq_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
"w")
-                      (match_operand:<V_elem> 2 "s_register_operand" "r")]
-        VMULQ_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vmul.i%#<V_sz_elem>\t%q0, %q1, %2"
-  [(set_attr "type" "mve_move")
-])
-
-;;
+;; [vaddq_s, vaddq_u])
  ;; [vmulq_u, vmulq_s])
+;; [vsubq_s, vsubq_u])
  ;;
  (define_insn "mve_vmulq_<supf><mode>"
    [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+    (set (match_operand:MVE_2 0 "s_register_operand" "=w")
        (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
"w")
                       (match_operand:MVE_2 2 "s_register_operand" "w")]
         VMULQ))
@@ -1391,14 +1365,14 @@ (define_insn "mve_vmulq_<supf><mode>"
    [(set_attr "type" "mve_move")
  ])

-(define_insn "mve_vmulq<mode>"
+(define_insn "mve_<mve_addsubmul>q<mode>"
    [
     (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (mult:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
-                   (match_operand:MVE_2 2 "s_register_operand" "w")))
+       (MVE_INT_BINARY_RTX:MVE_2 (match_operand:MVE_2 1
"s_register_operand" "w")
+                             (match_operand:MVE_2 2 "s_register_operand"
"w")))
    ]
    "TARGET_HAVE_MVE"
-  "vmul.i%#<V_sz_elem>\t%q0, %q1, %q2"
+  "<mve_addsubmul>.i%#<V_sz_elem>\t%q0, %q1, %q2"
    [(set_attr "type" "mve_move")
  ])

@@ -1768,21 +1742,6 @@ (define_insn "mve_vshlq_r_<supf><mode>"
    [(set_attr "type" "mve_move")
  ])

-;;
-;; [vsubq_n_s, vsubq_n_u])
-;;
-(define_insn "mve_vsubq_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand"
"w")
-                      (match_operand:<V_elem> 2 "s_register_operand" "r")]
-        VSUBQ_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vsub.i%#<V_sz_elem>\t%q0, %q1, %2"
-  [(set_attr "type" "mve_move")
-])
-
  ;;
  ;; [vsubq_s, vsubq_u])
  ;;
@@ -1798,17 +1757,6 @@ (define_insn "mve_vsubq_<supf><mode>"
    [(set_attr "type" "mve_move")
  ])

-(define_insn "mve_vsubq<mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (minus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
-                    (match_operand:MVE_2 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE"
-  "vsub.i%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
  ;;
  ;; [vabdq_f])
  ;;
@@ -1841,16 +1789,18 @@ (define_insn "mve_vaddlvaq_<supf>v4si"

  ;;
  ;; [vaddq_n_f])
+;; [vsubq_n_f])
+;; [vmulq_n_f])
  ;;
-(define_insn "mve_vaddq_n_f<mode>"
+(define_insn "@mve_<mve_insn>q_n_f<mode>"
    [
     (set (match_operand:MVE_0 0 "s_register_operand" "=w")
        (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand"
"w")
                       (match_operand:<V_elem> 2 "s_register_operand" "r")]
-        VADDQ_N_F))
+        MVE_FP_N_BINARY))
    ]
    "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vadd.f%#<V_sz_elem>\t%q0, %q1, %2"
+  "<mve_insn>.f%#<V_sz_elem>\t%q0, %q1, %2"
    [(set_attr "type" "mve_move")
  ])

@@ -2224,31 +2174,18 @@ (define_insn "mve_vmovntq_<supf><mode>"
  ])

  ;;
+;; [vaddq_f])
  ;; [vmulq_f])
+;; [vsubq_f])
  ;;
-(define_insn "mve_vmulq_f<mode>"
+(define_insn "mve_<mve_addsubmul>q_f<mode>"
    [
     (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (mult:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
+       (MVE_INT_BINARY_RTX:MVE_0 (match_operand:MVE_0 1
"s_register_operand" "w")
                    (match_operand:MVE_0 2 "s_register_operand" "w")))
    ]
    "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vmul.f%#<V_sz_elem>      %q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vmulq_n_f])
-;;
-(define_insn "mve_vmulq_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand"
"w")
-                      (match_operand:<V_elem> 2 "s_register_operand" "r")]
-        VMULQ_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vmul.f%#<V_sz_elem>      %q0, %q1, %2"
+  "<mve_addsubmul>.f%#<V_sz_elem>\t%q0, %q1, %q2"
    [(set_attr "type" "mve_move")
  ])

@@ -2490,20 +2427,6 @@ (define_insn "mve_vshlltq_n_<supf><mode>"
    [(set_attr "type" "mve_move")
  ])

-;;
-;; [vsubq_f])
-;;
-(define_insn "mve_vsubq_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (minus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
-                    (match_operand:MVE_0 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vsub.f%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
  ;;
  ;; [vmulltq_poly_p])
  ;;
@@ -5032,23 +4955,6 @@ (define_insn "mve_vsriq_m_n_<supf><mode>"
    [(set_attr "type" "mve_move")
     (set_attr "length" "8")])

-;;
-;; [vsubq_m_u, vsubq_m_s])
-;;
-(define_insn "mve_vsubq_m_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-                      (match_operand:MVE_2 2 "s_register_operand" "w")
-                      (match_operand:MVE_2 3 "s_register_operand" "w")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VSUBQ_M))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vsubt.i%#<V_sz_elem>\t%q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length" "8")])
-
  ;;
  ;; [vcvtq_m_n_to_f_u, vcvtq_m_n_to_f_s])
  ;;
@@ -5084,35 +4990,39 @@ (define_insn "mve_vabdq_m_<supf><mode>"

  ;;
  ;; [vaddq_m_n_s, vaddq_m_n_u])
+;; [vsubq_m_n_s, vsubq_m_n_u])
+;; [vmulq_m_n_s, vmulq_m_n_u])
  ;;
-(define_insn "mve_vaddq_m_n_<supf><mode>"
+(define_insn "@mve_<mve_insn>q_m_n_<supf><mode>"
    [
     (set (match_operand:MVE_2 0 "s_register_operand" "=w")
        (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
                       (match_operand:MVE_2 2 "s_register_operand" "w")
                       (match_operand:<V_elem> 3 "s_register_operand" "r")
                       (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VADDQ_M_N))
+        MVE_INT_M_N_BINARY))
    ]
    "TARGET_HAVE_MVE"
-  "vpst\;vaddt.i%#<V_sz_elem>       %q0, %q2, %3"
+  "vpst\;<mve_insn>t.i%#<V_sz_elem>   %q0, %q2, %3"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

  ;;
  ;; [vaddq_m_u, vaddq_m_s])
+;; [vsubq_m_u, vsubq_m_s])
+;; [vmulq_m_u, vmulq_m_s])
  ;;
-(define_insn "mve_vaddq_m_<supf><mode>"
+(define_insn "@mve_<mve_insn>q_m_<supf><mode>"
    [
     (set (match_operand:MVE_2 0 "s_register_operand" "=w")
        (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
                       (match_operand:MVE_2 2 "s_register_operand" "w")
                       (match_operand:MVE_2 3 "s_register_operand" "w")
                       (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VADDQ_M))
+        MVE_INT_M_BINARY))
    ]
    "TARGET_HAVE_MVE"
-  "vpst\;vaddt.i%#<V_sz_elem>       %q0, %q2, %q3"
+  "vpst\;<mve_insn>t.i%#<V_sz_elem>   %q0, %q2, %q3"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

@@ -5422,40 +5332,6 @@ (define_insn
"mve_vmulltq_int_m_<supf><mode>"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

-;;
-;; [vmulq_m_n_u, vmulq_m_n_s])
-;;
-(define_insn "mve_vmulq_m_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-                      (match_operand:MVE_2 2 "s_register_operand" "w")
-                      (match_operand:<V_elem> 3 "s_register_operand" "r")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VMULQ_M_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vmult.i%#<V_sz_elem>       %q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
-;;
-;; [vmulq_m_s, vmulq_m_u])
-;;
-(define_insn "mve_vmulq_m_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-                      (match_operand:MVE_2 2 "s_register_operand" "w")
-                      (match_operand:MVE_2 3 "s_register_operand" "w")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VMULQ_M))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vmult.i%#<V_sz_elem>       %q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
  ;;
  ;; [vornq_m_u, vornq_m_s])
  ;;
@@ -5796,23 +5672,6 @@ (define_insn "mve_vsliq_m_n_<supf><mode>"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

-;;
-;; [vsubq_m_n_s, vsubq_m_n_u])
-;;
-(define_insn "mve_vsubq_m_n_<supf><mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
-                      (match_operand:MVE_2 2 "s_register_operand" "w")
-                      (match_operand:<V_elem> 3 "s_register_operand" "r")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VSUBQ_M_N))
-  ]
-  "TARGET_HAVE_MVE"
-  "vpst\;vsubt.i%#<V_sz_elem>\t%q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
  ;;
  ;; [vhcaddq_rot270_m_s])
  ;;
@@ -6613,35 +6472,39 @@ (define_insn "mve_vabdq_m_f<mode>"

  ;;
  ;; [vaddq_m_f])
+;; [vsubq_m_f])
+;; [vmulq_m_f])
  ;;
-(define_insn "mve_vaddq_m_f<mode>"
+(define_insn "@mve_<mve_insn>q_m_f<mode>"
    [
     (set (match_operand:MVE_0 0 "s_register_operand" "=w")
        (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
                       (match_operand:MVE_0 2 "s_register_operand" "w")
                       (match_operand:MVE_0 3 "s_register_operand" "w")
                       (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VADDQ_M_F))
+        MVE_FP_M_BINARY))
    ]
    "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vaddt.f%#<V_sz_elem>       %q0, %q2, %q3"
+  "vpst\;<mve_insn>t.f%#<V_sz_elem>   %q0, %q2, %q3"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

  ;;
  ;; [vaddq_m_n_f])
+;; [vsubq_m_n_f])
+;; [vmulq_m_n_f])
  ;;
-(define_insn "mve_vaddq_m_n_f<mode>"
+(define_insn "@mve_<mve_insn>q_m_n_f<mode>"
    [
     (set (match_operand:MVE_0 0 "s_register_operand" "=w")
        (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
                       (match_operand:MVE_0 2 "s_register_operand" "w")
                       (match_operand:<V_elem> 3 "s_register_operand" "r")
                       (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VADDQ_M_N_F))
+        MVE_FP_M_N_BINARY))
    ]
    "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vaddt.f%#<V_sz_elem>       %q0, %q2, %3"
+  "vpst\;<mve_insn>t.f%#<V_sz_elem>   %q0, %q2, %3"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

@@ -6985,40 +6848,6 @@ (define_insn "mve_vminnmq_m_f<mode>"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

-;;
-;; [vmulq_m_f])
-;;
-(define_insn "mve_vmulq_m_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-                      (match_operand:MVE_0 2 "s_register_operand" "w")
-                      (match_operand:MVE_0 3 "s_register_operand" "w")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VMULQ_M_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vmult.f%#<V_sz_elem>       %q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
-;;
-;; [vmulq_m_n_f])
-;;
-(define_insn "mve_vmulq_m_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-                      (match_operand:MVE_0 2 "s_register_operand" "w")
-                      (match_operand:<V_elem> 3 "s_register_operand" "r")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VMULQ_M_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vmult.f%#<V_sz_elem>       %q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
  ;;
  ;; [vornq_m_f])
  ;;
@@ -7053,40 +6882,6 @@ (define_insn "mve_vorrq_m_f<mode>"
    [(set_attr "type" "mve_move")
     (set_attr "length""8")])

-;;
-;; [vsubq_m_f])
-;;
-(define_insn "mve_vsubq_m_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-                      (match_operand:MVE_0 2 "s_register_operand" "w")
-                      (match_operand:MVE_0 3 "s_register_operand" "w")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VSUBQ_M_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vsubt.f%#<V_sz_elem>\t%q0, %q2, %q3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
-;;
-;; [vsubq_m_n_f])
-;;
-(define_insn "mve_vsubq_m_n_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (unspec:MVE_0 [(match_operand:MVE_0 1 "s_register_operand" "0")
-                      (match_operand:MVE_0 2 "s_register_operand" "w")
-                      (match_operand:<V_elem> 3 "s_register_operand" "r")
-                      (match_operand:<MVE_VPRED> 4
"vpr_register_operand" "Up")]
-        VSUBQ_M_N_F))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vpst\;vsubt.f%#<V_sz_elem>\t%q0, %q2, %3"
-  [(set_attr "type" "mve_move")
-   (set_attr "length""8")])
-
  ;;
  ;; [vstrbq_s vstrbq_u]
  ;;
@@ -8927,34 +8722,6 @@ (define_insn
"mve_vstrwq_scatter_shifted_offset_<supf>v4si_insn"
    "vstrw.32\t%q2, [%0, %q1, uxtw #2]"
    [(set_attr "length" "4")])

-;;
-;; [vaddq_s, vaddq_u])
-;;
-(define_insn "mve_vaddq<mode>"
-  [
-   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
-       (plus:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
-                   (match_operand:MVE_2 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE"
-  "vadd.i%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
-;;
-;; [vaddq_f])
-;;
-(define_insn "mve_vaddq_f<mode>"
-  [
-   (set (match_operand:MVE_0 0 "s_register_operand" "=w")
-       (plus:MVE_0 (match_operand:MVE_0 1 "s_register_operand" "w")
-                   (match_operand:MVE_0 2 "s_register_operand" "w")))
-  ]
-  "TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT"
-  "vadd.f%#<V_sz_elem>\t%q0, %q1, %q2"
-  [(set_attr "type" "mve_move")
-])
-
  ;;
  ;; [vidupq_n_u])
  ;;
--
2.34.1

Reply via email to