ping^2?

On Wed, 30 Dec 2020 at 11:34, Christophe Lyon
<christophe.l...@linaro.org> wrote:
>
> ping?
>
> On Thu, 17 Dec 2020 at 18:48, Christophe Lyon
> <christophe.l...@linaro.org> wrote:
> >
> > This patch enables MVE vshr instructions for auto-vectorization.  New
> > MVE patterns are introduced that take a vector of constants as second
> > operand, all constants being equal.
> >
> > The existing mve_vshrq_n_<supf><mode> is kept, as it takes a single
> > immediate as second operand, and is used by arm_mve.h.
> >
> > The vashr<mode>3 and vlshr<mode>3 expanders are moved fron neon.md to
> > vec-common.md, updated to rely on the normal expansion scheme to
> > generate shifts by immediate.
> >
> > 2020-12-03  Christophe Lyon  <christophe.l...@linaro.org>
> >
> >         gcc/
> >         * config/arm/mve.md (mve_vshrq_n_s<mode>_imm): New entry.
> >         (mve_vshrq_n_u<mode>_imm): Likewise.
> >         * config/arm/neon.md (vashr<mode>3, vlshr<mode>3): Move to ...
> >         * config/arm/vec-common.md: ... here.
> >
> >         gcc/testsuite/
> >         * gcc.target/arm/simd/mve-vshr.c: Add tests for vshr.
> > ---
> >  gcc/config/arm/mve.md                        | 34 ++++++++++++++++
> >  gcc/config/arm/neon.md                       | 34 ----------------
> >  gcc/config/arm/vec-common.md                 | 38 +++++++++++++++++-
> >  gcc/testsuite/gcc.target/arm/simd/mve-vshr.c | 59 
> > ++++++++++++++++++++++++++++
> >  4 files changed, 130 insertions(+), 35 deletions(-)
> >  create mode 100644 gcc/testsuite/gcc.target/arm/simd/mve-vshr.c
> >
> > diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
> > index 8bdb451..eea8b20 100644
> > --- a/gcc/config/arm/mve.md
> > +++ b/gcc/config/arm/mve.md
> > @@ -763,6 +763,7 @@ (define_insn "mve_vcreateq_<supf><mode>"
> >  ;;
> >  ;; [vshrq_n_s, vshrq_n_u])
> >  ;;
> > +;; Version that takes an immediate as operand 2.
> >  (define_insn "mve_vshrq_n_<supf><mode>"
> >    [
> >     (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> > @@ -775,6 +776,39 @@ (define_insn "mve_vshrq_n_<supf><mode>"
> >    [(set_attr "type" "mve_move")
> >  ])
> >
> > +;; Versions that take constant vectors as operand 2 (with all elements
> > +;; equal).
> > +(define_insn "mve_vshrq_n_s<mode>_imm"
> > +  [
> > +   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> > +       (ashiftrt:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
> > +                       (match_operand:MVE_2 2 
> > "imm_for_neon_rshift_operand" "i")))
> > +  ]
> > +  "TARGET_HAVE_MVE"
> > +  {
> > +    return neon_output_shift_immediate ("vshr", 's', &operands[2],
> > +                                       <MODE>mode,
> > +                                       VALID_NEON_QREG_MODE (<MODE>mode),
> > +                                       true);
> > +  }
> > +  [(set_attr "type" "mve_move")
> > +])
> > +(define_insn "mve_vshrq_n_u<mode>_imm"
> > +  [
> > +   (set (match_operand:MVE_2 0 "s_register_operand" "=w")
> > +       (lshiftrt:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
> > +                       (match_operand:MVE_2 2 
> > "imm_for_neon_rshift_operand" "i")))
> > +  ]
> > +  "TARGET_HAVE_MVE"
> > +  {
> > +    return neon_output_shift_immediate ("vshr", 'u', &operands[2],
> > +                                       <MODE>mode,
> > +                                       VALID_NEON_QREG_MODE (<MODE>mode),
> > +                                       true);
> > +  }
> > +  [(set_attr "type" "mve_move")
> > +])
> > +
> >  ;;
> >  ;; [vcvtq_n_from_f_s, vcvtq_n_from_f_u])
> >  ;;
> > diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md
> > index ac9bf74..a0e8d7a 100644
> > --- a/gcc/config/arm/neon.md
> > +++ b/gcc/config/arm/neon.md
> > @@ -899,40 +899,6 @@ (define_insn "ashl<mode>3_unsigned"
> >    [(set_attr "type" "neon_shift_reg<q>")]
> >  )
> >
> > -(define_expand "vashr<mode>3"
> > -  [(set (match_operand:VDQIW 0 "s_register_operand")
> > -       (ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
> > -                       (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
> > -  "TARGET_NEON"
> > -{
> > -  if (s_register_operand (operands[2], <MODE>mode))
> > -    {
> > -      rtx neg = gen_reg_rtx (<MODE>mode);
> > -      emit_insn (gen_neon_neg<mode>2 (neg, operands[2]));
> > -      emit_insn (gen_ashl<mode>3_signed (operands[0], operands[1], neg));
> > -    }
> > -  else
> > -    emit_insn (gen_vashr<mode>3_imm (operands[0], operands[1], 
> > operands[2]));
> > -  DONE;
> > -})
> > -
> > -(define_expand "vlshr<mode>3"
> > -  [(set (match_operand:VDQIW 0 "s_register_operand")
> > -       (lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
> > -                       (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
> > -  "TARGET_NEON"
> > -{
> > -  if (s_register_operand (operands[2], <MODE>mode))
> > -    {
> > -      rtx neg = gen_reg_rtx (<MODE>mode);
> > -      emit_insn (gen_neon_neg<mode>2 (neg, operands[2]));
> > -      emit_insn (gen_ashl<mode>3_unsigned (operands[0], operands[1], neg));
> > -    }
> > -  else
> > -    emit_insn (gen_vlshr<mode>3_imm (operands[0], operands[1], 
> > operands[2]));
> > -  DONE;
> > -})
> > -
> >  ;; 64-bit shifts
> >
> >  ;; This pattern loads a 32-bit shift count into a 64-bit NEON register,
> > diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md
> > index 3a282f0..e126557 100644
> > --- a/gcc/config/arm/vec-common.md
> > +++ b/gcc/config/arm/vec-common.md
> > @@ -258,4 +258,40 @@ (define_expand "vashl<mode>3"
> >  {
> >    emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], 
> > operands[2]));
> >    DONE;
> > -})
> > \ No newline at end of file
> > +})
> > +
> > +;; When operand 2 is an immediate, use the normal expansion to match
> > +;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_s<mode>_imm for
> > +;; MVE.
> > +(define_expand "vashr<mode>3"
> > +  [(set (match_operand:VDQIW 0 "s_register_operand")
> > +       (ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
> > +                       (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
> > +  "ARM_HAVE_<MODE>_ARITH"
> > +{
> > +  if (s_register_operand (operands[2], <MODE>mode))
> > +    {
> > +      rtx neg = gen_reg_rtx (<MODE>mode);
> > +      emit_insn (gen_neg<mode>2 (neg, operands[2]));
> > +      emit_insn (gen_mve_vshlq_s<mode> (operands[0], operands[1], neg));
> > +      DONE;
> > +    }
> > +})
> > +
> > +;; When operand 2 is an immediate, use the normal expansion to match
> > +;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_u<mode>_imm for
> > +;; MVE.
> > +(define_expand "vlshr<mode>3"
> > +  [(set (match_operand:VDQIW 0 "s_register_operand")
> > +       (lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
> > +                       (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
> > +  "ARM_HAVE_<MODE>_ARITH"
> > +{
> > +  if (s_register_operand (operands[2], <MODE>mode))
> > +    {
> > +      rtx neg = gen_reg_rtx (<MODE>mode);
> > +      emit_insn (gen_neg<mode>2 (neg, operands[2]));
> > +      emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], neg));
> > +      DONE;
> > +    }
> > +})
> > diff --git a/gcc/testsuite/gcc.target/arm/simd/mve-vshr.c 
> > b/gcc/testsuite/gcc.target/arm/simd/mve-vshr.c
> > new file mode 100644
> > index 0000000..d4e658c
> > --- /dev/null
> > +++ b/gcc/testsuite/gcc.target/arm/simd/mve-vshr.c
> > @@ -0,0 +1,59 @@
> > +/* { dg-do assemble } */
> > +/* { dg-require-effective-target arm_v8_1m_mve_ok } */
> > +/* { dg-add-options arm_v8_1m_mve } */
> > +/* { dg-additional-options "-O3" } */
> > +
> > +#include <stdint.h>
> > +
> > +#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME)                           \
> > +  void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * 
> > __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \
> > +    int i;                                                             \
> > +    for (i=0; i<NB; i++) {                                             \
> > +      dest[i] = a[i] OP b[i];                                          \
> > +    }                                                                  \
> > +}
> > +
> > +#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME)                           
> >     \
> > +  void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * 
> > __restrict__ dest, TYPE##BITS##_t *a) { \
> > +    int i;                                                             \
> > +    for (i=0; i<NB; i++) {                                             \
> > +      dest[i] = a[i] OP 5;                                             \
> > +    }                                                                  \
> > +}
> > +
> > +/* 64-bit vectors.  */
> > +FUNC(s, int, 32, 2, >>, vshr)
> > +FUNC(u, uint, 32, 2, >>, vshr)
> > +FUNC(s, int, 16, 4, >>, vshr)
> > +FUNC(u, uint, 16, 4, >>, vshr)
> > +FUNC(s, int, 8, 8, >>, vshr)
> > +FUNC(u, uint, 8, 8, >>, vshr)
> > +
> > +/* 128-bit vectors.  */
> > +FUNC(s, int, 32, 4, >>, vshr)
> > +FUNC(u, uint, 32, 4, >>, vshr)
> > +FUNC(s, int, 16, 8, >>, vshr)
> > +FUNC(u, uint, 16, 8, >>, vshr)
> > +FUNC(s, int, 8, 16, >>, vshr)
> > +FUNC(u, uint, 8, 16, >>, vshr)
> > +
> > +/* 64-bit vectors.  */
> > +FUNC_IMM(s, int, 32, 2, >>, vshrimm)
> > +FUNC_IMM(u, uint, 32, 2, >>, vshrimm)
> > +FUNC_IMM(s, int, 16, 4, >>, vshrimm)
> > +FUNC_IMM(u, uint, 16, 4, >>, vshrimm)
> > +FUNC_IMM(s, int, 8, 8, >>, vshrimm)
> > +FUNC_IMM(u, uint, 8, 8, >>, vshrimm)
> > +
> > +/* 128-bit vectors.  */
> > +FUNC_IMM(s, int, 32, 4, >>, vshrimm)
> > +FUNC_IMM(u, uint, 32, 4, >>, vshrimm)
> > +FUNC_IMM(s, int, 16, 8, >>, vshrimm)
> > +FUNC_IMM(u, uint, 16, 8, >>, vshrimm)
> > +FUNC_IMM(s, int, 8, 16, >>, vshrimm)
> > +FUNC_IMM(u, uint, 8, 16, >>, vshrimm)
> > +
> > +/* MVE has only 128-bit vectors, so we can vectorize only half of the
> > +   functions above.  */
> > +/* { dg-final { scan-assembler-times {vshr.s[0-9]+\tq[0-9]+, q[0-9]+} 3 } 
> > } */
> > +/* { dg-final { scan-assembler-times {vshr.u[0-9]+\tq[0-9]+, q[0-9]+} 3 } 
> > } */
> > --
> > 2.7.4
> >

Reply via email to