When rs1 is the immediate 0, the following ICE occurs:

error: unrecognizable insn:
(insn 8 5 12 2 (set (reg:RVVM1DI 134 [ <retval> ])
        (if_then_else:RVVM1DI (unspec:RVVMF64BI [
                    (const_vector:RVVMF64BI repeat [
                            (const_int 1 [0x1])
                       ])
                    (reg/v:DI 137 [ vl ])
                    (const_int 2 [0x2]) repeated x2
                    (const_int 0 [0])
                    (reg:SI 66 vl)
                    (reg:SI 67 vtype)
                ] UNSPEC_VPREDICATE)
            (plus:RVVM1DI (mult:RVVM1DI (vec_duplicate:RVVM1DI (const_int 0 
[0]))
                    (reg/v:RVVM1DI 136 [ vs2 ]))
                (reg/v:RVVM1DI 135 [ vd ]))
            (reg/v:RVVM1DI 135 [ vd ])))

gcc/ChangeLog:

        * config/riscv/vector.md: Allow scalar operand to be 0.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/base/bug-7.c: New test.
        * gcc.target/riscv/rvv/base/bug-8.c: New test.
---
 gcc/config/riscv/vector.md                    | 80 +++++++++----------
 .../gcc.target/riscv/rvv/base/bug-7.c         | 26 ++++++
 .../gcc.target/riscv/rvv/base/bug-8.c         | 26 ++++++
 3 files changed, 92 insertions(+), 40 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c

diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index fb625f611d5..20420b74964 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -5331,16 +5331,16 @@ (define_insn "*pred_madd<mode>_scalar"
          (plus:V_VLSI
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      "  0,  vr,  0,  
vr"))
            (match_operand:V_VLSI 4 "register_operand"        " vr,  vr, vr,  
vr"))
          (match_dup 3)))]
   "TARGET_VECTOR"
   "@
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%2,%4%p1"
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%z2,%4%p1
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vmadd.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
@@ -5363,16 +5363,16 @@ (define_insn "*pred_macc<mode>_scalar"
          (plus:V_VLSI
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      " vr,  vr, vr,  
vr"))
            (match_operand:V_VLSI 4 "register_operand"        "  0,  vr,  0,  
vr"))
          (match_dup 4)))]
   "TARGET_VECTOR"
   "@
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
@@ -5431,16 +5431,16 @@ (define_insn "*pred_madd<mode>_extended_scalar"
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   
r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  
rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         "  0,  vr,  
0,  vr"))
            (match_operand:V_VLSI_D 4 "register_operand"           " vr,  vr, 
vr,  vr"))
          (match_dup 3)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1
-   vmadd.vx\t%0,%2,%4%p1
-   vmv%m2r.v\t%0,%2\;vmadd.vx\t%0,%2,%4%p1"
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m2r.v\t%0,%z2\;vmadd.vx\t%0,%z2,%4%p1
+   vmadd.vx\t%0,%z2,%4%p1
+   vmv%m2r.v\t%0,%z2\;vmadd.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
@@ -5464,16 +5464,16 @@ (define_insn "*pred_macc<mode>_extended_scalar"
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   
r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  
rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         " vr,  vr, 
vr,  vr"))
            (match_operand:V_VLSI_D 4 "register_operand"           "  0,  vr,  
0,  vr"))
          (match_dup 4)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1
-   vmacc.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%2,%3%p1"
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1
+   vmacc.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vmacc.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
@@ -5630,15 +5630,15 @@ (define_insn "*pred_nmsub<mode>_scalar"
            (match_operand:V_VLSI 4 "register_operand"        " vr,  vr, vr,  
vr")
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      "  0,  vr,  0,  
vr")))
          (match_dup 3)))]
   "TARGET_VECTOR"
   "@
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
@@ -5662,15 +5662,15 @@ (define_insn "*pred_nmsac<mode>_scalar"
            (match_operand:V_VLSI 4 "register_operand"        "  0,  vr,  0,  
vr")
            (mult:V_VLSI
              (vec_duplicate:V_VLSI
-               (match_operand:<VEL> 2 "register_operand" "  r,   r,  r,   r"))
+               (match_operand:<VEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  rJ"))
              (match_operand:V_VLSI 3 "register_operand"      " vr,  vr, vr,  
vr")))
          (match_dup 4)))]
   "TARGET_VECTOR"
   "@
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
@@ -5730,15 +5730,15 @@ (define_insn "*pred_nmsub<mode>_extended_scalar"
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   
r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  
rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         "  0,  vr,  
0,  vr")))
          (match_dup 3)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1
-   vnmsub.vx\t%0,%2,%4%p1
-   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%2,%4%p1"
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1
+   vnmsub.vx\t%0,%z2,%4%p1
+   vmv%m3r.v\t%0,%3\;vnmsub.vx\t%0,%z2,%4%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "3")
@@ -5763,15 +5763,15 @@ (define_insn "*pred_nmsac<mode>_extended_scalar"
            (mult:V_VLSI_D
              (vec_duplicate:V_VLSI_D
                (sign_extend:<VEL>
-                 (match_operand:<VSUBEL> 2 "register_operand" "  r,   r,  r,   
r")))
+                 (match_operand:<VSUBEL> 2 "reg_or_0_operand" " rJ,  rJ, rJ,  
rJ")))
              (match_operand:V_VLSI_D 3 "register_operand"         " vr,  vr, 
vr,  vr")))
          (match_dup 4)))]
   "TARGET_VECTOR && !TARGET_64BIT"
   "@
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1
-   vnmsac.vx\t%0,%2,%3%p1
-   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%2,%3%p1"
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1
+   vnmsac.vx\t%0,%z2,%3%p1
+   vmv%m4r.v\t%0,%4\;vnmsac.vx\t%0,%z2,%3%p1"
   [(set_attr "type" "vimuladd")
    (set_attr "mode" "<MODE>")
    (set_attr "merge_op_idx" "4")
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c
new file mode 100644
index 00000000000..28766ce860f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-7.c
@@ -0,0 +1,26 @@
+/* Test that we do not have ice when compile */
+/* { dg-do assemble } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O2"  { target { rv64 } } } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O2"  { target { rv32 } } } */
+
+#include <riscv_vector.h>
+
+vint64m1_t f1 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vmacc_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f2 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsac_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m8_t f3 (vint64m8_t vd, vint64m8_t vs2, size_t vl)
+{
+  return __riscv_vmadd_vx_i64m8 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f4 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsub_vx_i64m1 (vd, 0, vs2, vl);
+}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c
new file mode 100644
index 00000000000..975f75581a1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-8.c
@@ -0,0 +1,26 @@
+/* Test that we do not have ice when compile */
+/* { dg-do assemble } */
+/* { dg-options "-march=rv64gcv -mabi=lp64d -O0"  { target { rv64 } } } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32d -O0"  { target { rv32 } } } */
+
+#include <riscv_vector.h>
+
+vint64m1_t f1 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vmacc_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f2 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsac_vx_i64m1 (vd, 0, vs2, vl);
+}
+
+vint64m8_t f3 (vint64m8_t vd, vint64m8_t vs2, size_t vl)
+{
+  return __riscv_vmadd_vx_i64m8 (vd, 0, vs2, vl);
+}
+
+vint64m1_t f4 (vint64m1_t vd, vint64m1_t vs2, size_t vl)
+{
+  return __riscv_vnmsub_vx_i64m1 (vd, 0, vs2, vl);
+}
-- 
2.17.1

Reply via email to