On Mon, Dec 9, 2024 at 3:02 PM Richard Sandiford <richard.sandif...@arm.com> wrote: > > Prompted by Richard E's arm patch, this one removes the aarch64 > support for the vcond{,u} optabs. > > Tested on aarch64-linux-gnu. I'll push tomorrow if there are no > objections. There might still be some fallout from the transition > that we need to fix (haven't checked yet), but this code wouldn't > form part of the solution.
At least there is no testsuite fallout from the transition when I solved the last one with r15-2840-g7223c64745530d (I had ran the full aarch64 testsuite with the vcond{,u} removed back then to double check). Thanks, Andrew Pinski > > Richard > > > gcc/ > * config/aarch64/aarch64-protos.h (aarch64_expand_sve_vcond): Delete. > * config/aarch64/aarch64-simd.md (<su><maxmin>v2di3): Expand into > separate vec_cmp and vcond_mask instructions, instead of using vcond. > (vcond<mode><mode>, vcond<v_cmp_mixed><mode>, vcondu<mode><mode>) > (vcondu<mode><v_cmp_mixed>): Delete. > * config/aarch64/aarch64-sve.md (vcond<SVE_ALL:mode><SVE_I:mode>) > (vcondu<SVE_ALL:mode><SVE_I:mode>, vcond<mode><v_fp_equiv>): Likewise. > * config/aarch64/aarch64.cc (aarch64_expand_sve_vcond): Likewise. > * config/aarch64/iterators.md (V_FP_EQUIV, v_fp_equiv, V_cmp_mixed) > (v_cmp_mixed): Likewise. > --- > gcc/config/aarch64/aarch64-protos.h | 1 - > gcc/config/aarch64/aarch64-simd.md | 126 +--------------------------- > gcc/config/aarch64/aarch64-sve.md | 58 ------------- > gcc/config/aarch64/aarch64.cc | 30 ------- > gcc/config/aarch64/iterators.md | 20 ----- > 5 files changed, 4 insertions(+), 231 deletions(-) > > diff --git a/gcc/config/aarch64/aarch64-protos.h > b/gcc/config/aarch64/aarch64-protos.h > index d058864e507..51951ae8002 100644 > --- a/gcc/config/aarch64/aarch64-protos.h > +++ b/gcc/config/aarch64/aarch64-protos.h > @@ -1094,7 +1094,6 @@ void aarch64_finish_ldpstp_peephole (rtx *, bool, > > void aarch64_expand_sve_vec_cmp_int (rtx, rtx_code, rtx, rtx); > bool aarch64_expand_sve_vec_cmp_float (rtx, rtx_code, rtx, rtx, bool); > -void aarch64_expand_sve_vcond (machine_mode, machine_mode, rtx *); > > bool aarch64_prepare_sve_int_fma (rtx *, rtx_code); > bool aarch64_prepare_sve_cond_int_fma (rtx *, rtx_code); > diff --git a/gcc/config/aarch64/aarch64-simd.md > b/gcc/config/aarch64/aarch64-simd.md > index fd6965d9e86..36b0e052e15 100644 > --- a/gcc/config/aarch64/aarch64-simd.md > +++ b/gcc/config/aarch64/aarch64-simd.md > @@ -1783,9 +1783,11 @@ (define_expand "<su><maxmin>v2di3" > gcc_unreachable (); > } > > + rtx mask = gen_reg_rtx (V2DImode); > cmp_fmt = gen_rtx_fmt_ee (cmp_operator, V2DImode, operands[1], > operands[2]); > - emit_insn (gen_vcondv2div2di (operands[0], operands[1], > - operands[2], cmp_fmt, operands[1], operands[2])); > + emit_insn (gen_vec_cmpv2div2di (mask, cmp_fmt, operands[1], operands[2])); > + emit_insn (gen_vcond_mask_v2div2di (operands[0], operands[1], > + operands[2], mask)); > DONE; > }) > > @@ -4199,126 +4201,6 @@ (define_expand "vec_cmpu<mode><mode>" > DONE; > }) > > -(define_expand "vcond<mode><mode>" > - [(set (match_operand:VALLDI 0 "register_operand") > - (if_then_else:VALLDI > - (match_operator 3 "comparison_operator" > - [(match_operand:VALLDI 4 "register_operand") > - (match_operand:VALLDI 5 "nonmemory_operand")]) > - (match_operand:VALLDI 1 "nonmemory_operand") > - (match_operand:VALLDI 2 "nonmemory_operand")))] > - "TARGET_SIMD" > -{ > - rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode); > - enum rtx_code code = GET_CODE (operands[3]); > - > - /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert > - it as well as switch operands 1/2 in order to avoid the additional > - NOT instruction. */ > - if (code == NE) > - { > - operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]), > - operands[4], operands[5]); > - std::swap (operands[1], operands[2]); > - } > - emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3], > - operands[4], operands[5])); > - emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1], > - operands[2], mask)); > - > - DONE; > -}) > - > -(define_expand "vcond<v_cmp_mixed><mode>" > - [(set (match_operand:<V_cmp_mixed> 0 "register_operand") > - (if_then_else:<V_cmp_mixed> > - (match_operator 3 "comparison_operator" > - [(match_operand:VDQF_COND 4 "register_operand") > - (match_operand:VDQF_COND 5 "nonmemory_operand")]) > - (match_operand:<V_cmp_mixed> 1 "nonmemory_operand") > - (match_operand:<V_cmp_mixed> 2 "nonmemory_operand")))] > - "TARGET_SIMD" > -{ > - rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode); > - enum rtx_code code = GET_CODE (operands[3]); > - > - /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert > - it as well as switch operands 1/2 in order to avoid the additional > - NOT instruction. */ > - if (code == NE) > - { > - operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]), > - operands[4], operands[5]); > - std::swap (operands[1], operands[2]); > - } > - emit_insn (gen_vec_cmp<mode><v_int_equiv> (mask, operands[3], > - operands[4], operands[5])); > - emit_insn (gen_vcond_mask_<v_cmp_mixed><v_int_equiv> ( > - operands[0], operands[1], > - operands[2], mask)); > - > - DONE; > -}) > - > -(define_expand "vcondu<mode><mode>" > - [(set (match_operand:VSDQ_I_DI 0 "register_operand") > - (if_then_else:VSDQ_I_DI > - (match_operator 3 "comparison_operator" > - [(match_operand:VSDQ_I_DI 4 "register_operand") > - (match_operand:VSDQ_I_DI 5 "nonmemory_operand")]) > - (match_operand:VSDQ_I_DI 1 "nonmemory_operand") > - (match_operand:VSDQ_I_DI 2 "nonmemory_operand")))] > - "TARGET_SIMD" > -{ > - rtx mask = gen_reg_rtx (<MODE>mode); > - enum rtx_code code = GET_CODE (operands[3]); > - > - /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert > - it as well as switch operands 1/2 in order to avoid the additional > - NOT instruction. */ > - if (code == NE) > - { > - operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]), > - operands[4], operands[5]); > - std::swap (operands[1], operands[2]); > - } > - emit_insn (gen_vec_cmp<mode><mode> (mask, operands[3], > - operands[4], operands[5])); > - emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1], > - operands[2], mask)); > - DONE; > -}) > - > -(define_expand "vcondu<mode><v_cmp_mixed>" > - [(set (match_operand:VDQF 0 "register_operand") > - (if_then_else:VDQF > - (match_operator 3 "comparison_operator" > - [(match_operand:<V_cmp_mixed> 4 "register_operand") > - (match_operand:<V_cmp_mixed> 5 "nonmemory_operand")]) > - (match_operand:VDQF 1 "nonmemory_operand") > - (match_operand:VDQF 2 "nonmemory_operand")))] > - "TARGET_SIMD" > -{ > - rtx mask = gen_reg_rtx (<V_INT_EQUIV>mode); > - enum rtx_code code = GET_CODE (operands[3]); > - > - /* NE is handled as !EQ in vec_cmp patterns, we can explicitly invert > - it as well as switch operands 1/2 in order to avoid the additional > - NOT instruction. */ > - if (code == NE) > - { > - operands[3] = gen_rtx_fmt_ee (EQ, GET_MODE (operands[3]), > - operands[4], operands[5]); > - std::swap (operands[1], operands[2]); > - } > - emit_insn (gen_vec_cmp<v_cmp_mixed><v_cmp_mixed> ( > - mask, operands[3], > - operands[4], operands[5])); > - emit_insn (gen_vcond_mask_<mode><v_int_equiv> (operands[0], operands[1], > - operands[2], mask)); > - DONE; > -}) > - > ;; Patterns for AArch64 SIMD Intrinsics. > > ;; Lane extraction with sign extension to general purpose register. > diff --git a/gcc/config/aarch64/aarch64-sve.md > b/gcc/config/aarch64/aarch64-sve.md > index 9afd11d3476..04326bca0e7 100644 > --- a/gcc/config/aarch64/aarch64-sve.md > +++ b/gcc/config/aarch64/aarch64-sve.md > @@ -124,7 +124,6 @@ > ;; > ;; == Comparisons and selects > ;; ---- [INT,FP] Select based on predicates > -;; ---- [INT,FP] Compare and select > ;; ---- [INT] Comparisons > ;; ---- [INT] While tests > ;; ---- [FP] Direct comparisons > @@ -8062,63 +8061,6 @@ (define_insn "@aarch64_sel_dup<mode>" > } > ) > > -;; ------------------------------------------------------------------------- > -;; ---- [INT,FP] Compare and select > -;; ------------------------------------------------------------------------- > -;; The patterns in this section are synthetic. > -;; ------------------------------------------------------------------------- > - > -;; Integer (signed) vcond. Don't enforce an immediate range here, since it > -;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead. > -(define_expand "vcond<SVE_ALL:mode><SVE_I:mode>" > - [(set (match_operand:SVE_ALL 0 "register_operand") > - (if_then_else:SVE_ALL > - (match_operator 3 "comparison_operator" > - [(match_operand:SVE_I 4 "register_operand") > - (match_operand:SVE_I 5 "nonmemory_operand")]) > - (match_operand:SVE_ALL 1 "nonmemory_operand") > - (match_operand:SVE_ALL 2 "nonmemory_operand")))] > - "TARGET_SVE && <SVE_ALL:container_bits> == <SVE_I:container_bits>" > - { > - aarch64_expand_sve_vcond (<SVE_ALL:MODE>mode, <SVE_I:MODE>mode, > operands); > - DONE; > - } > -) > - > -;; Integer vcondu. Don't enforce an immediate range here, since it > -;; depends on the comparison; leave it to aarch64_expand_sve_vcond instead. > -(define_expand "vcondu<SVE_ALL:mode><SVE_I:mode>" > - [(set (match_operand:SVE_ALL 0 "register_operand") > - (if_then_else:SVE_ALL > - (match_operator 3 "comparison_operator" > - [(match_operand:SVE_I 4 "register_operand") > - (match_operand:SVE_I 5 "nonmemory_operand")]) > - (match_operand:SVE_ALL 1 "nonmemory_operand") > - (match_operand:SVE_ALL 2 "nonmemory_operand")))] > - "TARGET_SVE && <SVE_ALL:container_bits> == <SVE_I:container_bits>" > - { > - aarch64_expand_sve_vcond (<SVE_ALL:MODE>mode, <SVE_I:MODE>mode, > operands); > - DONE; > - } > -) > - > -;; Floating-point vcond. All comparisons except FCMUO allow a zero operand; > -;; aarch64_expand_sve_vcond handles the case of an FCMUO with zero. > -(define_expand "vcond<mode><v_fp_equiv>" > - [(set (match_operand:SVE_FULL_HSD 0 "register_operand") > - (if_then_else:SVE_FULL_HSD > - (match_operator 3 "comparison_operator" > - [(match_operand:<V_FP_EQUIV> 4 "register_operand") > - (match_operand:<V_FP_EQUIV> 5 "aarch64_simd_reg_or_zero")]) > - (match_operand:SVE_FULL_HSD 1 "nonmemory_operand") > - (match_operand:SVE_FULL_HSD 2 "nonmemory_operand")))] > - "TARGET_SVE" > - { > - aarch64_expand_sve_vcond (<MODE>mode, <V_FP_EQUIV>mode, operands); > - DONE; > - } > -) > - > ;; ------------------------------------------------------------------------- > ;; ---- [INT] Comparisons > ;; ------------------------------------------------------------------------- > diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc > index de6a4fba3b6..ac93d076bd1 100644 > --- a/gcc/config/aarch64/aarch64.cc > +++ b/gcc/config/aarch64/aarch64.cc > @@ -27026,36 +27026,6 @@ aarch64_expand_sve_vec_cmp_float (rtx target, > rtx_code code, > return false; > } > > -/* Expand an SVE vcond pattern with operands OPS. DATA_MODE is the mode > - of the data being selected and CMP_MODE is the mode of the values being > - compared. */ > - > -void > -aarch64_expand_sve_vcond (machine_mode data_mode, machine_mode cmp_mode, > - rtx *ops) > -{ > - machine_mode pred_mode = aarch64_get_mask_mode (cmp_mode).require (); > - rtx pred = gen_reg_rtx (pred_mode); > - if (FLOAT_MODE_P (cmp_mode)) > - { > - if (aarch64_expand_sve_vec_cmp_float (pred, GET_CODE (ops[3]), > - ops[4], ops[5], true)) > - std::swap (ops[1], ops[2]); > - } > - else > - aarch64_expand_sve_vec_cmp_int (pred, GET_CODE (ops[3]), ops[4], ops[5]); > - > - if (!aarch64_sve_reg_or_dup_imm (ops[1], data_mode)) > - ops[1] = force_reg (data_mode, ops[1]); > - /* The "false" value can only be zero if the "true" value is a constant. > */ > - if (register_operand (ops[1], data_mode) > - || !aarch64_simd_reg_or_zero (ops[2], data_mode)) > - ops[2] = force_reg (data_mode, ops[2]); > - > - rtvec vec = gen_rtvec (3, pred, ops[1], ops[2]); > - emit_set_insn (ops[0], gen_rtx_UNSPEC (data_mode, vec, UNSPEC_SEL)); > -} > - > /* Return true if: > > (a) MODE1 and MODE2 use the same layout for bytes that are common > diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md > index c7f87c4f6c7..e2081fde046 100644 > --- a/gcc/config/aarch64/iterators.md > +++ b/gcc/config/aarch64/iterators.md > @@ -2001,16 +2001,6 @@ (define_mode_attr VQ_INT_EQUIV [(DF "V2DI") (SF > "V4SI") > (define_mode_attr vq_int_equiv [(DF "v2di") (SF "v4si") > ]) > > -;; Floating-point equivalent of selected modes. > -(define_mode_attr V_FP_EQUIV [(VNx8HI "VNx8HF") (VNx8HF "VNx8HF") > - (VNx8BF "VNx8HF") > - (VNx4SI "VNx4SF") (VNx4SF "VNx4SF") > - (VNx2DI "VNx2DF") (VNx2DF "VNx2DF")]) > -(define_mode_attr v_fp_equiv [(VNx8HI "vnx8hf") (VNx8HF "vnx8hf") > - (VNx8BF "vnx8hf") > - (VNx4SI "vnx4sf") (VNx4SF "vnx4sf") > - (VNx2DI "vnx2df") (VNx2DF "vnx2df")]) > - > ;; Maps full and partial vector modes of any element type to a full-vector > ;; integer mode with the same number of units. > (define_mode_attr V_INT_CONTAINER [(VNx16QI "VNx16QI") (VNx8QI "VNx8HI") > @@ -2040,16 +2030,6 @@ (define_mode_attr v_int_container [(VNx16QI "vnx16qi") > (VNx8QI "vnx8hi") > (VNx4SF "vnx4si") (VNx2SF "vnx2di") > (VNx2DF "vnx2di")]) > > -;; Mode for vector conditional operations where the comparison has > -;; different type from the lhs. > -(define_mode_attr V_cmp_mixed [(V2SI "V2SF") (V4SI "V4SF") > - (V2DI "V2DF") (V2SF "V2SI") > - (V4SF "V4SI") (V2DF "V2DI")]) > - > -(define_mode_attr v_cmp_mixed [(V2SI "v2sf") (V4SI "v4sf") > - (V2DI "v2df") (V2SF "v2si") > - (V4SF "v4si") (V2DF "v2di")]) > - > ;; Lower case element modes (as used in shift immediate patterns). > (define_mode_attr ve_mode [(V8QI "qi") (V16QI "qi") > (V4HI "hi") (V8HI "hi") > -- > 2.25.1 >