Committed with git log tweak: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c413abed869e7e34a86855a015413418f3c6b595
On Mon, Mar 13, 2023 at 3:52 PM <juzhe.zh...@rivai.ai> wrote: > > From: Ju-Zhe Zhong <juzhe.zh...@rivai.ai> > > Co-authored-by: kito-cheng <kito.ch...@sifive.com> > Co-authored-by: kito-cheng <kito.ch...@gmail.com> > > This patch fixed a bunch of bugs reported by kito.ch...@sifive.com. > > gcc/ChangeLog: > > * config/riscv/riscv-v.cc (legitimize_move): Handle undef value. > * config/riscv/riscv-vector-builtins.cc > (function_expander::use_ternop_insn): Fix bugs of ternary intrinsic. > (function_expander::use_widen_ternop_insn): Ditto. > * config/riscv/vector.md (@vundefined<mode>): New pattern. > (pred_mul_<optab><mode>_undef_merge): Fix bugs. > (*pred_mul_<optab><mode>_undef_merge_scalar): Ditto. > (*pred_mul_<optab><mode>_undef_merge_extended_scalar): Ditto. > (pred_neg_mul_<optab><mode>_undef_merge): Ditto. > (*pred_neg_mul_<optab><mode>_undef_merge_scalar): Ditto. > > gcc/testsuite/ChangeLog: > > * gcc.target/riscv/rvv/base/binop_vv_constraint-4.c: Adapt the test. > * gcc.target/riscv/rvv/base/binop_vv_constraint-6.c: Ditto. > * gcc.target/riscv/rvv/base/binop_vx_constraint-127.c: Ditto. > * g++.target/riscv/rvv/base/bug-1.C: New test. > * gcc.target/riscv/rvv/base/bug-2.c: New test. > > Signed-off-by: Ju-Zhe Zhong <juzhe.zh...@rivai.ai> > Co-authored-by: kito-cheng <kito.ch...@sifive.com> > Co-authored-by: kito-cheng <kito.ch...@gmail.com> > > --- > gcc/config/riscv/riscv-v.cc | 4 + > gcc/config/riscv/riscv-vector-builtins.cc | 17 +- > gcc/config/riscv/vector.md | 889 +++++++----------- > .../g++.target/riscv/rvv/base/bug-1.C | 40 + > .../riscv/rvv/base/binop_vv_constraint-4.c | 1 - > .../riscv/rvv/base/binop_vv_constraint-6.c | 2 +- > .../riscv/rvv/base/binop_vx_constraint-127.c | 2 +- > .../gcc.target/riscv/rvv/base/bug-2.c | 86 ++ > 8 files changed, 482 insertions(+), 559 deletions(-) > create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/bug-1.C > create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/base/bug-2.c > > diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc > index d65c65b26cd..9b83ef6ea5e 100644 > --- a/gcc/config/riscv/riscv-v.cc > +++ b/gcc/config/riscv/riscv-v.cc > @@ -283,6 +283,10 @@ legitimize_move (rtx dest, rtx src, machine_mode > mask_mode) > emit_move_insn (tmp, src); > src = tmp; > } > + > + if (satisfies_constraint_vu (src)) > + return false; > + > emit_vlmax_op (code_for_pred_mov (mode), dest, src, mask_mode); > return true; > } > diff --git a/gcc/config/riscv/riscv-vector-builtins.cc > b/gcc/config/riscv/riscv-vector-builtins.cc > index 75e65091db3..0df3cd15119 100644 > --- a/gcc/config/riscv/riscv-vector-builtins.cc > +++ b/gcc/config/riscv/riscv-vector-builtins.cc > @@ -3129,7 +3129,6 @@ function_expander::use_ternop_insn (bool vd_accum_p, > insn_code icode) > rtx vd = expand_normal (CALL_EXPR_ARG (exp, arg_offset++)); > rtx vs1 = expand_normal (CALL_EXPR_ARG (exp, arg_offset++)); > rtx vs2 = expand_normal (CALL_EXPR_ARG (exp, arg_offset++)); > - rtx merge = use_real_merge_p (pred) ? vd : RVV_VUNDEF (mode); > > if (VECTOR_MODE_P (GET_MODE (vs1))) > { > @@ -3139,7 +3138,7 @@ function_expander::use_ternop_insn (bool vd_accum_p, > insn_code icode) > add_input_operand (mode, vs2); > if (vd_accum_p) > add_input_operand (mode, vd); > - add_input_operand (mode, merge); > + add_input_operand (mode, vd); > } > else > { > @@ -3154,7 +3153,7 @@ function_expander::use_ternop_insn (bool vd_accum_p, > insn_code icode) > add_input_operand (mode, vd); > add_input_operand (mode, vs2); > } > - add_input_operand (mode, merge); > + add_input_operand (mode, vd); > } > > for (int argno = arg_offset; argno < call_expr_nargs (exp); argno++) > @@ -3171,8 +3170,6 @@ function_expander::use_ternop_insn (bool vd_accum_p, > insn_code icode) > rtx > function_expander::use_widen_ternop_insn (insn_code icode) > { > - machine_mode mode = TYPE_MODE (builtin_types[type.index].vector); > - > /* Record the offset to get the argument. */ > int arg_offset = 0; > > @@ -3181,16 +3178,8 @@ function_expander::use_widen_ternop_insn (insn_code > icode) > else > add_all_one_mask_operand (mask_mode ()); > > - rtx merge = RVV_VUNDEF (mode); > - if (use_real_merge_p (pred)) > - merge = expand_normal (CALL_EXPR_ARG (exp, arg_offset)); > - > for (int argno = arg_offset; argno < call_expr_nargs (exp); argno++) > - { > - if (argno == call_expr_nargs (exp) - 1) > - add_input_operand (mode, merge); > - add_input_operand (argno); > - } > + add_input_operand (argno); > > add_input_operand (Pmode, get_tail_policy_for_pred (pred)); > add_input_operand (Pmode, get_mask_policy_for_pred (pred)); > diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md > index 977d3f2042c..37a539b4852 100644 > --- a/gcc/config/riscv/vector.md > +++ b/gcc/config/riscv/vector.md > @@ -215,13 +215,13 @@ > > vfwcvtftoi,vfwcvtftof,vfncvtitof,vfncvtftoi,vfncvtftof,vfclass,\ > > vired,viwred,vfredu,vfredo,vfwredu,vfwredo,vimovxv,vfmovfv,\ > > vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\ > - vgather,vldff") > + vgather,vldff,viwmuladd,vfwmuladd") > (const_int 2) > > (eq_attr "type" "vimerge,vfmerge,vcompress") > (const_int 1) > > - (eq_attr "type" "vimuladd,viwmuladd,vfmuladd,vfwmuladd") > + (eq_attr "type" "vimuladd,vfmuladd") > (const_int 5)] > (const_int INVALID_ATTRIBUTE))) > > @@ -245,10 +245,10 @@ > > vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\ > > vfsgnj,vfmerge,vired,viwred,vfredu,vfredo,vfwredu,vfwredo,\ > > vslideup,vslidedown,vislide1up,vislide1down,vfslide1up,vfslide1down,\ > - vgather") > + vgather,viwmuladd,vfwmuladd") > (const_int 5) > > - (eq_attr "type" "vicmp,vimuladd,viwmuladd,vfcmp,vfmuladd,vfwmuladd") > + (eq_attr "type" "vicmp,vimuladd,vfcmp,vfmuladd") > (const_int 6) > > (eq_attr "type" "vmpop,vmffs,vmidx") > @@ -275,10 +275,10 @@ > vsshift,vnclip,vfalu,vfmul,vfminmax,vfdiv,\ > vfwalu,vfwmul,vfsgnj,vfmerge,vired,viwred,vfredu,\ > > vfredo,vfwredu,vfwredo,vslideup,vslidedown,vislide1up,\ > - vislide1down,vfslide1up,vfslide1down,vgather") > + > vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd") > (symbol_ref "riscv_vector::get_ta(operands[6])") > > - (eq_attr "type" "vimuladd,viwmuladd,vfmuladd,vfwmuladd") > + (eq_attr "type" "vimuladd,vfmuladd") > (symbol_ref "riscv_vector::get_ta(operands[7])") > > (eq_attr "type" "vmidx") > @@ -303,10 +303,11 @@ > viwalu,viwmul,vnshift,vaalu,vsmul,vsshift,\ > vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,\ > vfwalu,vfwmul,vfsgnj,vfcmp,vslideup,vslidedown,\ > - > vislide1up,vislide1down,vfslide1up,vfslide1down,vgather") > + > vislide1up,vislide1down,vfslide1up,vfslide1down,vgather,\ > + viwmuladd,vfwmuladd") > (symbol_ref "riscv_vector::get_ma(operands[7])") > > - (eq_attr "type" "vimuladd,viwmuladd,vfmuladd,vfwmuladd") > + (eq_attr "type" "vimuladd,vfmuladd") > (symbol_ref "riscv_vector::get_ma(operands[8])") > > (eq_attr "type" "vmsfs,vmidx") > @@ -335,12 +336,12 @@ > viwalu,viwmul,vnshift,vimuladd,vaalu,vsmul,vsshift,\ > > vnclip,vicmp,vfalu,vfmul,vfminmax,vfdiv,vfwalu,vfwmul,\ > > vfsgnj,vfcmp,vfmuladd,vslideup,vslidedown,vislide1up,\ > - vislide1down,vfslide1up,vfslide1down,vgather") > + > vislide1down,vfslide1up,vfslide1down,vgather,viwmuladd,vfwmuladd") > (symbol_ref "INTVAL (operands[8])") > (eq_attr "type" "vstux,vstox") > (symbol_ref "INTVAL (operands[5])") > > - (eq_attr "type" "vimuladd,viwmuladd,vfwmuladd") > + (eq_attr "type" "vimuladd") > (symbol_ref "INTVAL (operands[9])") > > (eq_attr "type" "vmsfs,vmidx,vcompress") > @@ -360,6 +361,12 @@ > "TARGET_VECTOR" > "") > > +(define_insn "@vundefined<mode>" > + [(set (match_operand:VB 0 "register_operand" "=vr") > + (unspec:VB [(reg:SI X0_REGNUM)] UNSPEC_VUNDEF))] > + "TARGET_VECTOR" > + "") > + > (define_expand "@vreinterpret<mode>" > [(set (match_operand:V 0 "register_operand") > (match_operand 1 "vector_any_register_operand"))] > @@ -1008,20 +1015,19 @@ > (set_attr "vl_op_idx" "3")]) > > (define_insn "@pred_merge<mode>" > - [(set (match_operand:V 0 "register_operand" "=vd,vd,vd,vd") > + [(set (match_operand:V 0 "register_operand" "=vd,vd,vd,vd") > (if_then_else:V > - (match_operand:<VM> 4 "register_operand" " vm,vm,vm,vm") > - (if_then_else:V > - (unspec:<VM> > - [(match_dup 4) > - (match_operand 5 "vector_length_operand" " rK,rK,rK,rK") > - (match_operand 6 "const_int_operand" " i, i, i, i") > - (match_operand 7 "const_int_operand" " i, i, i, i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (match_operand:V 3 "vector_arith_operand" " vr,vr,vi,vi") > - (match_operand:V 2 "register_operand" " vr,vr,vr,vr")) > - (match_operand:V 1 "vector_merge_operand" " vu, 0,vu, 0")))] > + (unspec:<VM> > + [(match_operand 5 "vector_length_operand" " rK,rK,rK,rK") > + (match_operand 6 "const_int_operand" " i, i, i, i") > + (match_operand 7 "const_int_operand" " i, i, i, i") > + (reg:SI VL_REGNUM) > + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (vec_merge:V > + (match_operand:V 3 "vector_arith_operand" " vr,vr,vi,vi") > + (match_operand:V 2 "register_operand" " vr,vr,vr,vr") > + (match_operand:<VM> 4 "register_operand" " vm,vm,vm,vm")) > + (match_operand:V 1 "vector_merge_operand" " vu, 0,vu, 0")))] > "TARGET_VECTOR" > "vmerge.v%o3m\t%0,%2,%v3,%4" > [(set_attr "type" "vimerge") > @@ -1030,18 +1036,17 @@ > (define_insn "@pred_merge<mode>_scalar" > [(set (match_operand:VI_QHS 0 "register_operand" "=vd,vd") > (if_then_else:VI_QHS > - (match_operand:<VM> 4 "register_operand" " vm,vm") > - (if_then_else:VI_QHS > - (unspec:<VM> > - [(match_dup 4) > - (match_operand 5 "vector_length_operand" " rK,rK") > - (match_operand 6 "const_int_operand" " i, i") > - (match_operand 7 "const_int_operand" " i, i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (unspec:<VM> > + [(match_operand 5 "vector_length_operand" " rK,rK") > + (match_operand 6 "const_int_operand" " i, i") > + (match_operand 7 "const_int_operand" " i, i") > + (reg:SI VL_REGNUM) > + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (vec_merge:VI_QHS > (vec_duplicate:VI_QHS > (match_operand:<VEL> 3 "register_operand" " r, r")) > - (match_operand:VI_QHS 2 "register_operand" " vr,vr")) > + (match_operand:VI_QHS 2 "register_operand" " vr,vr") > + (match_operand:<VM> 4 "register_operand" " vm,vm")) > (match_operand:VI_QHS 1 "vector_merge_operand" " vu, 0")))] > "TARGET_VECTOR" > "vmerge.vxm\t%0,%2,%3,%4" > @@ -1051,18 +1056,17 @@ > (define_expand "@pred_merge<mode>_scalar" > [(set (match_operand:VI_D 0 "register_operand") > (if_then_else:VI_D > - (match_operand:<VM> 4 "register_operand") > - (if_then_else:VI_D > - (unspec:<VM> > - [(match_dup 4) > - (match_operand 5 "vector_length_operand") > - (match_operand 6 "const_int_operand") > - (match_operand 7 "const_int_operand") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (unspec:<VM> > + [(match_operand 5 "vector_length_operand") > + (match_operand 6 "const_int_operand") > + (match_operand 7 "const_int_operand") > + (reg:SI VL_REGNUM) > + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (vec_merge:VI_D > (vec_duplicate:VI_D > (match_operand:<VEL> 3 "reg_or_int_operand")) > - (match_operand:VI_D 2 "register_operand")) > + (match_operand:VI_D 2 "register_operand") > + (match_operand:<VM> 4 "register_operand")) > (match_operand:VI_D 1 "vector_merge_operand")))] > "TARGET_VECTOR" > { > @@ -1084,18 +1088,17 @@ > (define_insn "*pred_merge<mode>_scalar" > [(set (match_operand:VI_D 0 "register_operand" "=vd,vd") > (if_then_else:VI_D > - (match_operand:<VM> 4 "register_operand" " vm,vm") > - (if_then_else:VI_D > - (unspec:<VM> > - [(match_dup 4) > - (match_operand 5 "vector_length_operand" " rK,rK") > - (match_operand 6 "const_int_operand" " i, i") > - (match_operand 7 "const_int_operand" " i, i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (unspec:<VM> > + [(match_operand 5 "vector_length_operand" " rK,rK") > + (match_operand 6 "const_int_operand" " i, i") > + (match_operand 7 "const_int_operand" " i, i") > + (reg:SI VL_REGNUM) > + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (vec_merge:VI_D > (vec_duplicate:VI_D > (match_operand:<VEL> 3 "register_operand" " r, r")) > - (match_operand:VI_D 2 "register_operand" " vr,vr")) > + (match_operand:VI_D 2 "register_operand" " vr,vr") > + (match_operand:<VM> 4 "register_operand" " vm,vm")) > (match_operand:VI_D 1 "vector_merge_operand" " vu, 0")))] > "TARGET_VECTOR" > "vmerge.vxm\t%0,%2,%3,%4" > @@ -1105,19 +1108,18 @@ > (define_insn "*pred_merge<mode>_extended_scalar" > [(set (match_operand:VI_D 0 "register_operand" "=vd,vd") > (if_then_else:VI_D > - (match_operand:<VM> 4 "register_operand" " vm,vm") > - (if_then_else:VI_D > - (unspec:<VM> > - [(match_dup 4) > - (match_operand 5 "vector_length_operand" " rK,rK") > - (match_operand 6 "const_int_operand" " i, i") > - (match_operand 7 "const_int_operand" " i, i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (unspec:<VM> > + [(match_operand 5 "vector_length_operand" " rK,rK") > + (match_operand 6 "const_int_operand" " i, i") > + (match_operand 7 "const_int_operand" " i, i") > + (reg:SI VL_REGNUM) > + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (vec_merge:VI_D > (vec_duplicate:VI_D > (sign_extend:<VEL> > (match_operand:<VSUBEL> 3 "register_operand" " r, r"))) > - (match_operand:VI_D 2 "register_operand" " vr,vr")) > + (match_operand:VI_D 2 "register_operand" " vr,vr") > + (match_operand:<VM> 4 "register_operand" " vm,vm")) > (match_operand:VI_D 1 "vector_merge_operand" " vu, 0")))] > "TARGET_VECTOR" > "vmerge.vxm\t%0,%2,%3,%4" > @@ -3561,19 +3563,19 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_cmp<mode>" > - [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr, > vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK") > - (match_operand 7 "const_int_operand" " i, i") > - (match_operand 8 "const_int_operand" " i, i") > + [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1,vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK, > rK, rK") > + (match_operand 7 "const_int_operand" " i, i, > i, i") > + (match_operand 8 "const_int_operand" " i, i, > i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_ltge_operator" > - [(match_operand:VI 4 "register_operand" " vr, vr") > - (match_operand:VI 5 "vector_arith_operand" " vr, vi")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu, > 0vu")))] > + [(match_operand:VI 4 "register_operand" " vr, vr, > vr, vr") > + (match_operand:VI 5 "vector_arith_operand" " vr, vr, > vi, vi")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, 0, > vu, 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.v%o5\t%0,%4,%v5%p1" > [(set_attr "type" "vicmp") > @@ -3581,19 +3583,19 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_cmp<mode>_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr, > &vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK") > - (match_operand 7 "const_int_operand" " i, i") > - (match_operand 8 "const_int_operand" " i, i") > + [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1,vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK, > rK, rK") > + (match_operand 7 "const_int_operand" " i, i, > i, i") > + (match_operand 8 "const_int_operand" " i, i, > i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_ltge_operator" > - [(match_operand:VI 4 "register_operand" " vr, vr") > - (match_operand:VI 5 "vector_arith_operand" " vr, vi")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu, > 0vu")))] > + [(match_operand:VI 4 "register_operand" " vr, vr, > vr, vr") > + (match_operand:VI 5 "vector_arith_operand" " vr, vr, > vi, vi")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, 0, > vu, 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.v%o5\t%0,%4,%v5%p1" > [(set_attr "type" "vicmp") > @@ -3618,19 +3620,19 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_ltge<mode>" > - [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr, > vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK") > - (match_operand 7 "const_int_operand" " i, i") > - (match_operand 8 "const_int_operand" " i, i") > + [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1,vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK, > rK, rK") > + (match_operand 7 "const_int_operand" " i, i, > i, i") > + (match_operand 8 "const_int_operand" " i, i, > i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "ltge_operator" > - [(match_operand:VI 4 "register_operand" " vr, vr") > - (match_operand:VI 5 "vector_neg_arith_operand" " vr, vj")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu, > 0vu")))] > + [(match_operand:VI 4 "register_operand" " vr, vr, > vr, vr") > + (match_operand:VI 5 "vector_neg_arith_operand" " vr, vr, > vj, vj")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, 0, > vu, 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.v%o5\t%0,%4,%v5%p1" > [(set_attr "type" "vicmp") > @@ -3638,19 +3640,19 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_ltge<mode>_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr, > &vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK") > - (match_operand 7 "const_int_operand" " i, i") > - (match_operand 8 "const_int_operand" " i, i") > + [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1,vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK, > rK, rK") > + (match_operand 7 "const_int_operand" " i, i, > i, i") > + (match_operand 8 "const_int_operand" " i, i, > i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "ltge_operator" > - [(match_operand:VI 4 "register_operand" " vr, vr") > - (match_operand:VI 5 "vector_neg_arith_operand" " vr, vj")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu, > 0vu")))] > + [(match_operand:VI 4 "register_operand" " vr, vr, > vr, vr") > + (match_operand:VI 5 "vector_neg_arith_operand" " vr, vr, > vj, vj")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, 0, > vu, 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.v%o5\t%0,%4,%v5%p1" > [(set_attr "type" "vicmp") > @@ -3676,20 +3678,20 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_cmp<mode>_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_eqge_operator" > - [(match_operand:VI_QHS 4 "register_operand" " vr") > + [(match_operand:VI_QHS 4 "register_operand" " vr, vr") > (vec_duplicate:VI_QHS > - (match_operand:<VEL> 5 "register_operand" " r"))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, > r"))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3697,20 +3699,20 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_cmp<mode>_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_eqge_operator" > - [(match_operand:VI_QHS 4 "register_operand" " vr") > + [(match_operand:VI_QHS 4 "register_operand" " vr, vr") > (vec_duplicate:VI_QHS > - (match_operand:<VEL> 5 "register_operand" " r"))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, > r"))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3736,20 +3738,20 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_eqne<mode>_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VI_QHS > - (match_operand:<VEL> 5 "register_operand" " r")) > - (match_operand:VI_QHS 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, r")) > + (match_operand:VI_QHS 4 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3757,20 +3759,20 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_eqne<mode>_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VI_QHS > - (match_operand:<VEL> 5 "register_operand" " r")) > - (match_operand:VI_QHS 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, r")) > + (match_operand:VI_QHS 4 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3853,20 +3855,20 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_cmp<mode>_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_eqge_operator" > - [(match_operand:VI_D 4 "register_operand" " vr") > + [(match_operand:VI_D 4 "register_operand" " vr, vr") > (vec_duplicate:VI_D > - (match_operand:<VEL> 5 "register_operand" " r"))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, > r"))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3874,20 +3876,20 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_cmp<mode>_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_eqge_operator" > - [(match_operand:VI_D 4 "register_operand" " vr") > + [(match_operand:VI_D 4 "register_operand" " vr, vr") > (vec_duplicate:VI_D > - (match_operand:<VEL> 5 "register_operand" " r"))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, > r"))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3895,20 +3897,20 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_eqne<mode>_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VI_D > - (match_operand:<VEL> 5 "register_operand" " r")) > - (match_operand:VI_D 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, r")) > + (match_operand:VI_D 4 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3916,20 +3918,20 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_eqne<mode>_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VI_D > - (match_operand:<VEL> 5 "register_operand" " r")) > - (match_operand:VI_D 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " r, r")) > + (match_operand:VI_D 4 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3937,42 +3939,42 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_cmp<mode>_extended_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_eqge_operator" > - [(match_operand:VI_D 4 "register_operand" " vr") > + [(match_operand:VI_D 4 "register_operand" " vr, vr") > (vec_duplicate:VI_D > (sign_extend:<VEL> > - (match_operand:<VSUBEL> 5 "register_operand" " r")))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VSUBEL> 5 "register_operand" " r, > r")))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > (set_attr "mode" "<MODE>")]) > > (define_insn "*pred_cmp<mode>_extended_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "comparison_except_eqge_operator" > - [(match_operand:VI_D 4 "register_operand" " vr") > + [(match_operand:VI_D 4 "register_operand" " vr, vr") > (vec_duplicate:VI_D > (sign_extend:<VEL> > - (match_operand:<VSUBEL> 5 "register_operand" " r")))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VSUBEL> 5 "register_operand" " r, > r")))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -3980,42 +3982,42 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_eqne<mode>_extended_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VI_D > (sign_extend:<VEL> > - (match_operand:<VSUBEL> 5 "register_operand" " r"))) > - (match_operand:VI_D 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VSUBEL> 5 "register_operand" " r, > r"))) > + (match_operand:VI_D 4 "register_operand" " vr, > vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > (set_attr "mode" "<MODE>")]) > > (define_insn "*pred_eqne<mode>_extended_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VI_D > (sign_extend:<VEL> > - (match_operand:<VSUBEL> 5 "register_operand" " r"))) > - (match_operand:VI_D 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VSUBEL> 5 "register_operand" " r, > r"))) > + (match_operand:VI_D 4 "register_operand" " vr, > vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vms%B3.vx\t%0,%4,%5%p1" > [(set_attr "type" "vicmp") > @@ -4162,7 +4164,7 @@ > (match_operand:VI 2 "register_operand") > (match_operand:VI 3 "register_operand")) > (match_operand:VI 4 "register_operand")) > - (match_operand:VI 5 "vector_merge_operand")))] > + (match_operand:VI 5 "register_operand")))] > "TARGET_VECTOR" > { > /* Swap the multiplication operands if the fallback value is the > @@ -4171,33 +4173,6 @@ > std::swap (operands[2], operands[3]); > }) > > -(define_insn "pred_mul_<optab><mode>_undef_merge" > - [(set (match_operand:VI 0 "register_operand" "=vd, vr, vd, vr, > ?&vr") > - (if_then_else:VI > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, rK, > rK") > - (match_operand 7 "const_int_operand" " i, i, i, i, > i") > - (match_operand 8 "const_int_operand" " i, i, i, i, > i") > - (match_operand 9 "const_int_operand" " i, i, i, i, > i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (plus_minus:VI > - (mult:VI > - (match_operand:VI 2 "register_operand" " %0, 0, vr, vr, > vr") > - (match_operand:VI 3 "register_operand" " vr, vr, vr, vr, > vr")) > - (match_operand:VI 4 "register_operand" " vr, vr, 0, 0, > vr")) > - (match_operand:VI 5 "vector_undef_operand" " vu, vu, vu, vu, > vu")))] > - "TARGET_VECTOR" > - "@ > - v<madd_nmsub>.vv\t%0,%3,%4%p1 > - v<madd_nmsub>.vv\t%0,%3,%4%p1 > - v<macc_nmsac>.vv\t%0,%2,%3%p1 > - v<macc_nmsac>.vv\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;v<macc_nmsac>.vv\t%0,%2,%3%p1" > - [(set_attr "type" "vimuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<madd_nmsub><mode>" > [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr") > (if_then_else:VI > @@ -4326,40 +4301,12 @@ > (match_operand:<VEL> 2 "reg_or_int_operand")) > (match_operand:VI_QHS 3 "register_operand")) > (match_operand:VI_QHS 4 "register_operand")) > - (match_operand:VI_QHS 5 "vector_merge_operand")))] > + (match_operand:VI_QHS 5 "register_operand")))] > "TARGET_VECTOR" > { > operands[2] = force_reg (<VEL>mode, operands[2]); > }) > > -(define_insn "*pred_mul_<optab><mode>_undef_merge_scalar" > - [(set (match_operand:VI 0 "register_operand" "=vd, vr, vd, vr, > ?&vr") > - (if_then_else:VI > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, rK, > rK") > - (match_operand 7 "const_int_operand" " i, i, i, i, > i") > - (match_operand 8 "const_int_operand" " i, i, i, i, > i") > - (match_operand 9 "const_int_operand" " i, i, i, i, > i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (plus_minus:VI > - (mult:VI > - (vec_duplicate:VI > - (match_operand:<VEL> 2 "register_operand" " r, r, r, r, > r")) > - (match_operand:VI 3 "register_operand" " 0, 0, vr, vr, > vr")) > - (match_operand:VI 4 "register_operand" " vr, vr, 0, 0, > vr")) > - (match_operand:VI 5 "vector_undef_operand" " vu, vu, vu, vu, > vu")))] > - "TARGET_VECTOR" > - "@ > - v<madd_nmsub>.vx\t%0,%2,%4%p1 > - v<madd_nmsub>.vx\t%0,%2,%4%p1 > - v<macc_nmsac>.vx\t%0,%2,%3%p1 > - v<macc_nmsac>.vx\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;v<macc_nmsac>.vx\t%0,%2,%3%p1" > - [(set_attr "type" "vimuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<madd_nmsub><mode>_scalar" > [(set (match_operand:VI 0 "register_operand" "=vd, vr, ?&vr") > (if_then_else:VI > @@ -4486,7 +4433,7 @@ > (match_operand:<VEL> 2 "reg_or_int_operand")) > (match_operand:VI_D 3 "register_operand")) > (match_operand:VI_D 4 "register_operand")) > - (match_operand:VI_D 5 "vector_merge_operand")))] > + (match_operand:VI_D 5 "register_operand")))] > "TARGET_VECTOR" > { > if (riscv_vector::sew64_scalar_helper ( > @@ -4504,35 +4451,6 @@ > DONE; > }) > > -(define_insn "*pred_mul_<optab><mode>_undef_merge_extended_scalar" > - [(set (match_operand:VI_D 0 "register_operand" "=vd, vr, vd, > vr, ?&vr") > - (if_then_else:VI_D > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, > rK, rK") > - (match_operand 7 "const_int_operand" " i, i, i, > i, i") > - (match_operand 8 "const_int_operand" " i, i, i, > i, i") > - (match_operand 9 "const_int_operand" " i, i, i, > i, i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (plus_minus:VI_D > - (mult:VI_D > - (vec_duplicate:VI_D > - (sign_extend:<VEL> > - (match_operand:<VSUBEL> 2 "register_operand" " r, r, r, > r, r"))) > - (match_operand:VI_D 3 "register_operand" " 0, 0, vr, > vr, vr")) > - (match_operand:VI_D 4 "register_operand" " vr, vr, 0, > 0, vr")) > - (match_operand:VI_D 5 "vector_undef_operand" " vu, vu, vu, > vu, vu")))] > - "TARGET_VECTOR" > - "@ > - v<madd_nmsub>.vx\t%0,%2,%4%p1 > - v<madd_nmsub>.vx\t%0,%2,%4%p1 > - v<macc_nmsac>.vx\t%0,%2,%3%p1 > - v<macc_nmsac>.vx\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;v<macc_nmsac>.vx\t%0,%2,%3%p1" > - [(set_attr "type" "vimuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<madd_nmsub><mode>_extended_scalar" > [(set (match_operand:VI_D 0 "register_operand" "=vd, vr, > ?&vr") > (if_then_else:VI_D > @@ -4653,123 +4571,123 @@ > ;; > ------------------------------------------------------------------------------- > > (define_insn "@pred_widen_mul_plus<su><mode>" > - [(set (match_operand:VWEXTI 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTI 0 "register_operand" "=&vr") > (if_then_else:VWEXTI > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus:VWEXTI > (mult:VWEXTI > (any_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, > vr")) > + (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr")) > (any_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr"))) > - (match_operand:VWEXTI 2 "register_operand" " 0, > 0")) > - (match_operand:VWEXTI 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))) > + (match_operand:VWEXTI 2 "register_operand" " 0")) > + (match_dup 2)))] > "TARGET_VECTOR" > "vwmacc<u>.vv\t%0,%3,%4%p1" > [(set_attr "type" "viwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_mul_plus<su><mode>_scalar" > - [(set (match_operand:VWEXTI 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTI 0 "register_operand" "=&vr") > (if_then_else:VWEXTI > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus:VWEXTI > (mult:VWEXTI > (any_extend:VWEXTI > (vec_duplicate:<V_DOUBLE_TRUNC> > - (match_operand:<VSUBEL> 3 "register_operand" " r, > r"))) > + (match_operand:<VSUBEL> 3 "register_operand" " > r"))) > (any_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr"))) > - (match_operand:VWEXTI 2 "register_operand" " 0, > 0")) > - (match_operand:VWEXTI 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))) > + (match_operand:VWEXTI 2 "register_operand" " 0")) > + (match_dup 2)))] > "TARGET_VECTOR" > "vwmacc<u>.vx\t%0,%3,%4%p1" > [(set_attr "type" "viwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_mul_plussu<mode>" > - [(set (match_operand:VWEXTI 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTI 0 "register_operand" "=&vr") > (if_then_else:VWEXTI > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus:VWEXTI > (mult:VWEXTI > (sign_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, > vr")) > + (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr")) > (zero_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr"))) > - (match_operand:VWEXTI 2 "register_operand" " 0, > 0")) > - (match_operand:VWEXTI 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))) > + (match_operand:VWEXTI 2 "register_operand" " 0")) > + (match_dup 2)))] > "TARGET_VECTOR" > "vwmaccsu.vv\t%0,%3,%4%p1" > [(set_attr "type" "viwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_mul_plussu<mode>_scalar" > - [(set (match_operand:VWEXTI 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTI 0 "register_operand" "=&vr") > (if_then_else:VWEXTI > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus:VWEXTI > (mult:VWEXTI > (sign_extend:VWEXTI > (vec_duplicate:<V_DOUBLE_TRUNC> > - (match_operand:<VSUBEL> 3 "register_operand" " r, > r"))) > + (match_operand:<VSUBEL> 3 "register_operand" " > r"))) > (zero_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr"))) > - (match_operand:VWEXTI 2 "register_operand" " 0, > 0")) > - (match_operand:VWEXTI 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))) > + (match_operand:VWEXTI 2 "register_operand" " 0")) > + (match_dup 2)))] > "TARGET_VECTOR" > "vwmaccsu.vx\t%0,%3,%4%p1" > [(set_attr "type" "viwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_mul_plusus<mode>_scalar" > - [(set (match_operand:VWEXTI 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTI 0 "register_operand" "=&vr") > (if_then_else:VWEXTI > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus:VWEXTI > (mult:VWEXTI > (zero_extend:VWEXTI > (vec_duplicate:<V_DOUBLE_TRUNC> > - (match_operand:<VSUBEL> 3 "register_operand" " r, > r"))) > + (match_operand:<VSUBEL> 3 "register_operand" " > r"))) > (sign_extend:VWEXTI > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr"))) > - (match_operand:VWEXTI 2 "register_operand" " 0, > 0")) > - (match_operand:VWEXTI 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))) > + (match_operand:VWEXTI 2 "register_operand" " 0")) > + (match_dup 2)))] > "TARGET_VECTOR" > "vwmaccus.vx\t%0,%3,%4%p1" > [(set_attr "type" "viwmuladd") > @@ -5119,7 +5037,7 @@ > (match_operand:VF 2 "register_operand") > (match_operand:VF 3 "register_operand")) > (match_operand:VF 4 "register_operand")) > - (match_operand:VF 5 "vector_merge_operand")))] > + (match_operand:VF 5 "register_operand")))] > "TARGET_VECTOR" > { > /* Swap the multiplication operands if the fallback value is the > @@ -5128,33 +5046,6 @@ > std::swap (operands[2], operands[3]); > }) > > -(define_insn "pred_mul_<optab><mode>_undef_merge" > - [(set (match_operand:VF 0 "register_operand" "=vd, vr, vd, vr, > ?&vr") > - (if_then_else:VF > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, rK, > rK") > - (match_operand 7 "const_int_operand" " i, i, i, i, > i") > - (match_operand 8 "const_int_operand" " i, i, i, i, > i") > - (match_operand 9 "const_int_operand" " i, i, i, i, > i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (plus_minus:VF > - (mult:VF > - (match_operand:VF 2 "register_operand" " %0, 0, vr, vr, > vr") > - (match_operand:VF 3 "register_operand" " vr, vr, vr, vr, > vr")) > - (match_operand:VF 4 "register_operand" " vr, vr, 0, 0, > vr")) > - (match_operand:VF 5 "vector_undef_operand" " vu, vu, vu, vu, > vu")))] > - "TARGET_VECTOR" > - "@ > - vf<madd_nmsub>.vv\t%0,%3,%4%p1 > - vf<madd_nmsub>.vv\t%0,%3,%4%p1 > - vf<macc_nmsac>.vv\t%0,%2,%3%p1 > - vf<macc_nmsac>.vv\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;vf<macc_nmsac>.vv\t%0,%2,%3%p1" > - [(set_attr "type" "vfmuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<madd_nmsub><mode>" > [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr") > (if_then_else:VF > @@ -5267,38 +5158,10 @@ > (match_operand:<VEL> 2 "register_operand")) > (match_operand:VF 3 "register_operand")) > (match_operand:VF 4 "register_operand")) > - (match_operand:VF 5 "vector_merge_operand")))] > + (match_operand:VF 5 "register_operand")))] > "TARGET_VECTOR" > {}) > > -(define_insn "*pred_mul_<optab><mode>_undef_merge_scalar" > - [(set (match_operand:VF 0 "register_operand" "=vd, vr, vd, vr, > ?&vr") > - (if_then_else:VF > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, rK, > rK") > - (match_operand 7 "const_int_operand" " i, i, i, i, > i") > - (match_operand 8 "const_int_operand" " i, i, i, i, > i") > - (match_operand 9 "const_int_operand" " i, i, i, i, > i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (plus_minus:VF > - (mult:VF > - (vec_duplicate:VF > - (match_operand:<VEL> 2 "register_operand" " f, f, f, f, > f")) > - (match_operand:VF 3 "register_operand" " 0, 0, vr, vr, > vr")) > - (match_operand:VF 4 "register_operand" " vr, vr, 0, 0, > vr")) > - (match_operand:VF 5 "vector_undef_operand" " vu, vu, vu, vu, > vu")))] > - "TARGET_VECTOR" > - "@ > - vf<madd_nmsub>.vf\t%0,%2,%4%p1 > - vf<madd_nmsub>.vf\t%0,%2,%4%p1 > - vf<macc_nmsac>.vf\t%0,%2,%3%p1 > - vf<macc_nmsac>.vf\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;vf<macc_nmsac>.vf\t%0,%2,%3%p1" > - [(set_attr "type" "vfmuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<madd_nmsub><mode>_scalar" > [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr") > (if_then_else:VF > @@ -5413,7 +5276,7 @@ > (mult:VF > (match_operand:VF 2 "register_operand") > (match_operand:VF 3 "register_operand")))) > - (match_operand:VF 5 "vector_merge_operand")))] > + (match_operand:VF 5 "register_operand")))] > "TARGET_VECTOR" > { > /* Swap the multiplication operands if the fallback value is the > @@ -5422,34 +5285,6 @@ > std::swap (operands[2], operands[3]); > }) > > -(define_insn "pred_neg_mul_<optab><mode>_undef_merge" > - [(set (match_operand:VF 0 "register_operand" "=vd, vr, vd, vr, > ?&vr") > - (if_then_else:VF > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, rK, > rK") > - (match_operand 7 "const_int_operand" " i, i, i, i, > i") > - (match_operand 8 "const_int_operand" " i, i, i, i, > i") > - (match_operand 9 "const_int_operand" " i, i, i, i, > i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (neg:VF > - (plus_minus:VF > - (match_operand:VF 4 "register_operand" " vr, vr, 0, 0, > vr") > - (mult:VF > - (match_operand:VF 2 "register_operand" " %0, 0, vr, vr, > vr") > - (match_operand:VF 3 "register_operand" " vr, vr, vr, vr, > vr")))) > - (match_operand:VF 5 "vector_undef_operand" " vu, vu, vu, vu, > vu")))] > - "TARGET_VECTOR" > - "@ > - vf<nmadd_msub>.vv\t%0,%3,%4%p1 > - vf<nmadd_msub>.vv\t%0,%3,%4%p1 > - vf<nmacc_msac>.vv\t%0,%2,%3%p1 > - vf<nmacc_msac>.vv\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;vf<nmacc_msac>.vv\t%0,%2,%3%p1" > - [(set_attr "type" "vfmuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<nmadd_msub><mode>" > [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr") > (if_then_else:VF > @@ -5566,39 +5401,10 @@ > (vec_duplicate:VF > (match_operand:<VEL> 2 "register_operand")) > (match_operand:VF 3 "register_operand")))) > - (match_operand:VF 5 "vector_merge_operand")))] > + (match_operand:VF 5 "register_operand")))] > "TARGET_VECTOR" > {}) > > -(define_insn "*pred_neg_mul_<optab><mode>_undef_merge_scalar" > - [(set (match_operand:VF 0 "register_operand" "=vd, vr, vd, vr, > ?&vr") > - (if_then_else:VF > - (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" " vm,Wc1, > vm,Wc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, rK, rK, rK, > rK") > - (match_operand 7 "const_int_operand" " i, i, i, i, > i") > - (match_operand 8 "const_int_operand" " i, i, i, i, > i") > - (match_operand 9 "const_int_operand" " i, i, i, i, > i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > - (neg:VF > - (plus_minus:VF > - (match_operand:VF 4 "register_operand" " vr, vr, 0, > 0, vr") > - (mult:VF > - (vec_duplicate:VF > - (match_operand:<VEL> 2 "register_operand" " f, f, f, > f, f")) > - (match_operand:VF 3 "register_operand" " 0, 0, vr, > vr, vr")))) > - (match_operand:VF 5 "vector_undef_operand" " vu, vu, vu, vu, > vu")))] > - "TARGET_VECTOR" > - "@ > - vf<nmadd_msub>.vf\t%0,%2,%4%p1 > - vf<nmadd_msub>.vf\t%0,%2,%4%p1 > - vf<nmacc_msac>.vf\t%0,%2,%3%p1 > - vf<nmacc_msac>.vf\t%0,%2,%3%p1 > - vmv.v.v\t%0,%4\;vf<nmacc_msac>.vf\t%0,%2,%3%p1" > - [(set_attr "type" "vfmuladd") > - (set_attr "mode" "<MODE>")]) > - > (define_insn "*pred_<nmadd_msub><mode>_scalar" > [(set (match_operand:VF 0 "register_operand" "=vd, vr, ?&vr") > (if_then_else:VF > @@ -5876,100 +5682,100 @@ > ;; > ------------------------------------------------------------------------------- > > (define_insn "@pred_widen_mul_<optab><mode>" > - [(set (match_operand:VWEXTF 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTF 0 "register_operand" "=&vr") > (if_then_else:VWEXTF > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus_minus:VWEXTF > - (match_operand:VWEXTF 2 "register_operand" " 0, > 0") > + (match_operand:VWEXTF 2 "register_operand" " 0") > (mult:VWEXTF > (float_extend:VWEXTF > - (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr, > vr")) > + (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " vr")) > (float_extend:VWEXTF > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr")))) > - (match_operand:VWEXTF 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr")))) > + (match_dup 2)))] > "TARGET_VECTOR" > "vfw<macc_nmsac>.vv\t%0,%3,%4%p1" > [(set_attr "type" "vfwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_mul_<optab><mode>_scalar" > - [(set (match_operand:VWEXTF 0 "register_operand" "=&vr, > &vr") > + [(set (match_operand:VWEXTF 0 "register_operand" "=&vr") > (if_then_else:VWEXTF > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " rK, > rK") > - (match_operand 7 "const_int_operand" " i, > i") > - (match_operand 8 "const_int_operand" " i, > i") > - (match_operand 9 "const_int_operand" " i, > i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > + (match_operand 5 "vector_length_operand" " rK") > + (match_operand 6 "const_int_operand" " i") > + (match_operand 7 "const_int_operand" " i") > + (match_operand 8 "const_int_operand" " i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (plus_minus:VWEXTF > - (match_operand:VWEXTF 2 "register_operand" " 0, > 0") > + (match_operand:VWEXTF 2 "register_operand" " 0") > (mult:VWEXTF > (float_extend:VWEXTF > (vec_duplicate:<V_DOUBLE_TRUNC> > - (match_operand:<VSUBEL> 3 "register_operand" " f, > f"))) > + (match_operand:<VSUBEL> 3 "register_operand" " > f"))) > (float_extend:VWEXTF > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " vr, > vr")))) > - (match_operand:VWEXTF 5 "vector_merge_operand" " vu, > 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr")))) > + (match_dup 2)))] > "TARGET_VECTOR" > "vfw<macc_nmsac>.vf\t%0,%3,%4%p1" > [(set_attr "type" "vfwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_neg_mul_<optab><mode>" > - [(set (match_operand:VWEXTF 0 "register_operand" > "=&vr, &vr") > + [(set (match_operand:VWEXTF 0 "register_operand" > "=&vr") > (if_then_else:VWEXTF > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " > rK, rK") > - (match_operand 7 "const_int_operand" " > i, i") > - (match_operand 8 "const_int_operand" " > i, i") > - (match_operand 9 "const_int_operand" " > i, i") > + [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1") > + (match_operand 5 "vector_length_operand" " > rK") > + (match_operand 6 "const_int_operand" " > i") > + (match_operand 7 "const_int_operand" " > i") > + (match_operand 8 "const_int_operand" " > i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (neg:VWEXTF > (plus_minus:VWEXTF > - (match_operand:VWEXTF 2 "register_operand" " > 0, 0") > + (match_operand:VWEXTF 2 "register_operand" " > 0") > (mult:VWEXTF > (float_extend:VWEXTF > - (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " > vr, vr")) > + (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand" " > vr")) > (float_extend:VWEXTF > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr, vr"))))) > - (match_operand:VWEXTF 5 "vector_merge_operand" " > vu, 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))))) > + (match_dup 2)))] > "TARGET_VECTOR" > "vfw<nmacc_msac>.vv\t%0,%3,%4%p1" > [(set_attr "type" "vfwmuladd") > (set_attr "mode" "<V_DOUBLE_TRUNC>")]) > > (define_insn "@pred_widen_neg_mul_<optab><mode>_scalar" > - [(set (match_operand:VWEXTF 0 "register_operand" > "=&vr, &vr") > + [(set (match_operand:VWEXTF 0 "register_operand" > "=&vr") > (if_then_else:VWEXTF > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1,vmWc1") > - (match_operand 6 "vector_length_operand" " > rK, rK") > - (match_operand 7 "const_int_operand" " > i, i") > - (match_operand 8 "const_int_operand" " > i, i") > - (match_operand 9 "const_int_operand" " > i, i") > + [(match_operand:<VM> 1 "vector_mask_operand" > "vmWc1") > + (match_operand 5 "vector_length_operand" " > rK") > + (match_operand 6 "const_int_operand" " > i") > + (match_operand 7 "const_int_operand" " > i") > + (match_operand 8 "const_int_operand" " > i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (neg:VWEXTF > (plus_minus:VWEXTF > - (match_operand:VWEXTF 2 "register_operand" " > 0, 0") > + (match_operand:VWEXTF 2 "register_operand" " > 0") > (mult:VWEXTF > (float_extend:VWEXTF > (vec_duplicate:<V_DOUBLE_TRUNC> > - (match_operand:<VSUBEL> 3 "register_operand" " > f, f"))) > + (match_operand:<VSUBEL> 3 "register_operand" " > f"))) > (float_extend:VWEXTF > - (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr, vr"))))) > - (match_operand:VWEXTF 5 "vector_merge_operand" " > vu, 0")))] > + (match_operand:<V_DOUBLE_TRUNC> 4 "register_operand" " > vr"))))) > + (match_dup 2)))] > "TARGET_VECTOR" > "vfw<nmacc_msac>.vf\t%0,%3,%4%p1" > [(set_attr "type" "vfwmuladd") > @@ -6001,19 +5807,19 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_cmp<mode>" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "signed_order_operator" > - [(match_operand:VF 4 "register_operand" " vr") > - (match_operand:VF 5 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + [(match_operand:VF 4 "register_operand" " vr, vr") > + (match_operand:VF 5 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vmf%B3.vv\t%0,%4,%5%p1" > [(set_attr "type" "vfcmp") > @@ -6021,19 +5827,19 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_cmp<mode>_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "signed_order_operator" > - [(match_operand:VF 4 "register_operand" " vr") > - (match_operand:VF 5 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + [(match_operand:VF 4 "register_operand" " vr, vr") > + (match_operand:VF 5 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vmf%B3.vv\t%0,%4,%5%p1" > [(set_attr "type" "vfcmp") > @@ -6059,20 +5865,20 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_cmp<mode>_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "signed_order_operator" > - [(match_operand:VF 4 "register_operand" " vr") > + [(match_operand:VF 4 "register_operand" " vr, vr") > (vec_duplicate:VF > - (match_operand:<VEL> 5 "register_operand" " f"))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " f, > f"))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vmf%B3.vf\t%0,%4,%5%p1" > [(set_attr "type" "vfcmp") > @@ -6080,20 +5886,20 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_cmp<mode>_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "signed_order_operator" > - [(match_operand:VF 4 "register_operand" " vr") > + [(match_operand:VF 4 "register_operand" " vr, vr") > (vec_duplicate:VF > - (match_operand:<VEL> 5 "register_operand" " f"))]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " f, > f"))]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vmf%B3.vf\t%0,%4,%5%p1" > [(set_attr "type" "vfcmp") > @@ -6119,20 +5925,20 @@ > > ;; We don't use early-clobber for LMUL <= 1 to get better codegen. > (define_insn "*pred_eqne<mode>_scalar" > - [(set (match_operand:<VM> 0 "register_operand" "=vr") > + [(set (match_operand:<VM> 0 "register_operand" "=vr, vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VF > - (match_operand:<VEL> 5 "register_operand" " f")) > - (match_operand:VF 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " f, f")) > + (match_operand:VF 4 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_le (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vmf%B3.vf\t%0,%4,%5%p1" > [(set_attr "type" "vfcmp") > @@ -6140,20 +5946,20 @@ > > ;; We use early-clobber for source LMUL > dest LMUL. > (define_insn "*pred_eqne<mode>_scalar_narrow" > - [(set (match_operand:<VM> 0 "register_operand" "=&vr") > + [(set (match_operand:<VM> 0 "register_operand" "=&vr, &vr") > (if_then_else:<VM> > (unspec:<VM> > - [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1") > - (match_operand 6 "vector_length_operand" " rK") > - (match_operand 7 "const_int_operand" " i") > - (match_operand 8 "const_int_operand" " i") > + [(match_operand:<VM> 1 "vector_mask_operand" "vmWc1,vmWc1") > + (match_operand 6 "vector_length_operand" " rK, rK") > + (match_operand 7 "const_int_operand" " i, i") > + (match_operand 8 "const_int_operand" " i, i") > (reg:SI VL_REGNUM) > (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > (match_operator:<VM> 3 "equality_operator" > [(vec_duplicate:VF > - (match_operand:<VEL> 5 "register_operand" " f")) > - (match_operand:VF 4 "register_operand" " vr")]) > - (match_operand:<VM> 2 "vector_merge_operand" " 0vu")))] > + (match_operand:<VEL> 5 "register_operand" " f, f")) > + (match_operand:VF 4 "register_operand" " vr, vr")]) > + (match_operand:<VM> 2 "vector_merge_operand" " vu, > 0")))] > "TARGET_VECTOR && known_gt (GET_MODE_SIZE (<MODE>mode), > BYTES_PER_RISCV_VECTOR)" > "vmf%B3.vf\t%0,%4,%5%p1" > [(set_attr "type" "vfcmp") > @@ -6169,18 +5975,17 @@ > (define_insn "@pred_merge<mode>_scalar" > [(set (match_operand:VF 0 "register_operand" "=vd,vd") > (if_then_else:VF > - (match_operand:<VM> 4 "register_operand" " vm,vm") > - (if_then_else:VF > - (unspec:<VM> > - [(match_dup 4) > - (match_operand 5 "vector_length_operand" " rK,rK") > - (match_operand 6 "const_int_operand" " i, i") > - (match_operand 7 "const_int_operand" " i, i") > - (reg:SI VL_REGNUM) > - (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (unspec:<VM> > + [(match_operand 5 "vector_length_operand" " rK,rK") > + (match_operand 6 "const_int_operand" " i, i") > + (match_operand 7 "const_int_operand" " i, i") > + (reg:SI VL_REGNUM) > + (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE) > + (vec_merge:VF > (vec_duplicate:VF > (match_operand:<VEL> 3 "register_operand" " f, f")) > - (match_operand:VF 2 "register_operand" " vr,vr")) > + (match_operand:VF 2 "register_operand" " vr,vr") > + (match_operand:<VM> 4 "register_operand" " vm,vm")) > (match_operand:VF 1 "vector_merge_operand" " vu, 0")))] > "TARGET_VECTOR" > "vfmerge.vfm\t%0,%2,%3,%4" > diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/bug-1.C > b/gcc/testsuite/g++.target/riscv/rvv/base/bug-1.C > new file mode 100644 > index 00000000000..c1070f9eb16 > --- /dev/null > +++ b/gcc/testsuite/g++.target/riscv/rvv/base/bug-1.C > @@ -0,0 +1,40 @@ > +/* { dg-do compile } */ > +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3" } */ > + > +#include "riscv_vector.h" > +template < class T > > +bool __attribute__(()) check(T *, T *, size_t ); > +int main() { > + size_t var_44 = 132u; > + float var_43[41]; > + int16_t var_32[41]; > + int16_t var_31[41]; > + float var_30[41]; > + float var_29[41]; > + float var_28[41]; > + float var_27[41]; > + float var_26[41]; > + float var_25[41]; > + float var_23 = (2732844647u); > + int16_t var_22 = 23867; > + vint16m4_t var_14 = __riscv_vle16_v_i16m4(var_32, 41); > + vint16m4_t var_15 = __riscv_vle16_v_i16m4(var_31, 41); > + vfloat32m8_t var_16 = __riscv_vle32_v_f32m8(var_30, 33); > + vfloat32m8_t var_17 = __riscv_vle32_v_f32m8(var_29, 33); > + vfloat32m8_t var_18 = __riscv_vle32_v_f32m8(var_28, 33); > + vfloat32m8_t var_19 = __riscv_vle32_v_f32m8(var_27, 41); > + vfloat32m8_t var_20 = __riscv_vle32_v_f32m8(var_26, 41); > + vint16m4_t var_8 = __riscv_vmin_vv_i16m4(var_14, var_15, 41); > + vfloat32m8_t var_7 = __riscv_vfmsac_vv_f32m8(var_16, var_17, var_18, 33); > + vbool4_t var_6 = __riscv_vmsle_vx_i16m4_b4(var_8, var_22, 41); > + float var_5 = __riscv_vfmv_f_s_f32m8_f32(var_7); > + vfloat32m8_t var_4 = __riscv_vfnmsac_vf_f32m8_m(var_6, var_19, var_23, > var_20, 41); > + vfloat32m8_t var_0 = __riscv_vmerge_vvm_f32m8(var_4, var_4, var_6,41); > + vfloat32m8_t var_1 = __riscv_vfmsub_vf_f32m8(var_0, var_5, var_4, 33); > + __riscv_vse32_v_f32m8(var_25, var_1, 33); > + if (!check(var_25, var_43, var_44)) > + ; > + return 0; > +} > + > +/* { dg-final { scan-assembler-not {vmv} } } */ > diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-4.c > b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-4.c > index e16db932f15..1b0afed037a 100644 > --- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-4.c > +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-4.c > @@ -24,4 +24,3 @@ void f2 (void * in, void *out, int32_t x) > __riscv_vsm_v_b32 (out, m4, 4); > } > > -/* { dg-final { scan-assembler-not {vmv} } } */ > diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-6.c > b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-6.c > index ad5441f3404..384e2301a69 100644 > --- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-6.c > +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vv_constraint-6.c > @@ -24,4 +24,4 @@ void f2 (void * in, void *out, int32_t x) > __riscv_vsm_v_b32 (out, m4, 4); > } > > -/* { dg-final { scan-assembler-times {vmv} 2 } } */ > + > diff --git > a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-127.c > b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-127.c > index 3933c35f4ce..a353a7ab2d5 100644 > --- a/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-127.c > +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/binop_vx_constraint-127.c > @@ -24,4 +24,4 @@ void f2 (void * in, void *out, int32_t x) > __riscv_vsm_v_b32 (out, m4, 4); > } > > -/* { dg-final { scan-assembler-times {vmv} 2 } } */ > + > diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/bug-2.c > b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-2.c > new file mode 100644 > index 00000000000..b779516cd2f > --- /dev/null > +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/bug-2.c > @@ -0,0 +1,86 @@ > +/* { dg-do compile } */ > +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O2" } */ > + > +#include "riscv_vector.h" > + > +int f0() { > + float var_84[8]; > + float var_83[8]; > + float var_82[8]; > + uint32_t var_81[8]; > + float var_77[8]; > + uint32_t var_76[8]; > + uint32_t var_75[8]; > + uint32_t var_74[8]; > + uint32_t var_73[8]; > + uint64_t var_72[8]; > + double var_71[8]; > + double var_70[8]; > + double var_69[8]; > + double var_68[8]; > + double var_67[8]; > + double var_66[8]; > + int8_t var_65[8]; > + int8_t var_64[8]; > + float var_63[8]; > + double var_62[8]; > + uint8_t var_61[8]; > + uint8_t var_60[8]; > + double var_59[8]; > + double var_58[8]; > + double var_57[8]; > + double var_56[8]; > + float var_55[8]; > + float var_54[8]; > + size_t var_53 ; > + size_t var_52 ; > + size_t var_51 ; > + float var_50 = (3048723213u); > + uint32_t var_49 ; > + uint64_t var_48 ; > + vfloat32m4_t var_11 = __riscv_vle32_v_f32m4(var_84, 8); > + vfloat32m4_t var_12 = __riscv_vle32_v_f32m4(var_83, 8); > + vfloat32m4_t var_13 = __riscv_vle32_v_f32m4(var_82, 8); > + vuint32m4_t var_14 = __riscv_vle32_v_u32m4(var_81, 8); > + vfloat32m4_t var_19 = __riscv_vle32_v_f32m4(var_77, 8); > + vuint32m4_t var_20 = __riscv_vle32_v_u32m4(var_76, 8); > + vuint32m4_t var_21 = __riscv_vle32_v_u32m4(var_75, 8); > + vuint32m4_t var_23 = __riscv_vle32_v_u32m4(var_74, 18); > + vuint64m8_t var_25 = __riscv_vle64_v_u64m8(var_72, 18); > + vfloat64m8_t var_27 = __riscv_vle64_v_f64m8(var_71, 4); > + vfloat64m8_t var_28 = __riscv_vle64_v_f64m8(var_70, 4); > + vfloat64m8_t var_30 = __riscv_vle64_v_f64m8(var_68, 4); > + vfloat64m8_t var_31 = __riscv_vle64_v_f64m8(var_67, 4); > + vfloat64m8_t var_33 = __riscv_vle64_v_f64m8(var_66, 4); > + vint8m1_t var_34 = __riscv_vle8_v_i8m1(var_65, 4); > + vint8m1_t var_35 = __riscv_vle8_v_i8m1(var_64, 4); > + vfloat32m4_t var_37 = __riscv_vle32_v_f32m4(var_63, 8); > + vuint8m1_t var_39 = __riscv_vle8_v_u8m1(var_61, 8); > + vuint8m1_t var_40 = __riscv_vle8_v_u8m1(var_60, 8); > + vfloat64m8_t var_41 = __riscv_vle64_v_f64m8(var_59, 18); > + vfloat64m8_t var_42 = __riscv_vle64_v_f64m8(var_58, 18); > + vfloat64m8_t var_43 = __riscv_vle64_v_f64m8(var_57, 18); > + vfloat64m8_t var_45 = __riscv_vle64_v_f64m8(var_56, 18); > + vfloat32m4_t var_46 = __riscv_vle32_v_f32m4(var_55, 18); > + vfloat32m4_t var_47 = __riscv_vle32_v_f32m4(var_54, 18); > + vbool8_t var_10 = __riscv_vmsltu_vx_u32m4_b8(var_14, var_49, 8); > + vbool8_t var_18 = __riscv_vmsltu_vv_u32m4_b8(var_20, var_21, 8); > + vbool8_t var_22 = __riscv_vmsgeu_vx_u64m8_b8(var_25, var_48, 18); > + vbool8_t var_26 = __riscv_vmfne_vv_f64m8_b8(var_30, var_31, 4); > + vbool8_t var_32 = __riscv_vmsge_vv_i8m1_b8(var_34, var_35, 4); > + vbool8_t var_36 = __riscv_vmseq_vv_u8m1_b8(var_39, var_40, 8); > + vfloat64m8_t var_2 = __riscv_vslideup_vx_f64m8(var_41, var_42, var_52, 18); > + vbool8_t var_44 = __riscv_vmfne_vv_f32m4_b8(var_46, var_47, 18); > + vfloat32m4_t var_9 = __riscv_vfsgnj_vv_f32m4_mu(var_10, var_11, var_12, > var_13, 8); > + vfloat64m8_t var_0 = __riscv_vfmin_vv_f64m8_mu(var_44, var_2, var_2, > var_45, 18); > + vfloat32m4_t var_7 = __riscv_vfmax_vf_f32m4_mu(var_18, var_9, var_19, > var_50, 8); > + vfloat64m8_t var_1 = __riscv_vslidedown_vx_f64m8_tu(var_0, var_43, var_53, > 18); > + vfloat64m8_t var_4 = __riscv_vrgather_vx_f64m8_mu(var_32, var_0, var_33, > var_51, 4); > + vfloat64m8_t var_3 = __riscv_vfwnmsac_vv_f64m8_mu(var_36, var_1, var_7, > var_37, 4); > + vuint32m4_t var_6 = __riscv_vfncvt_xu_f_w_u32m4_mu(var_22, var_23, var_1, > 4); > + vfloat64m8_t var_5 = __riscv_vfsgnjx_vv_f64m8_mu(var_26, var_4, var_27, > var_28, 4); > + __riscv_vse64_v_f64m8(var_62, var_3, 4); > + __riscv_vse32_v_u32m4(var_73, var_6, 4); > + __riscv_vse64_v_f64m8(var_69, var_5, 4); > + return 0; > +} > -- > 2.36.3 >