Hi Tamar,

> -----Original Message-----
> From: Tamar Christina <tamar.christ...@arm.com>
> Sent: Wednesday, September 29, 2021 5:19 PM
> To: gcc-patches@gcc.gnu.org
> Cc: nd <n...@arm.com>; Richard Earnshaw <richard.earns...@arm.com>;
> Marcus Shawcroft <marcus.shawcr...@arm.com>; Kyrylo Tkachov
> <kyrylo.tkac...@arm.com>; Richard Sandiford
> <richard.sandif...@arm.com>
> Subject: [PATCH 1/7]AArch64 Add combine patterns for right shift and
> narrow
> 
> Hi All,
> 
> This adds a simple pattern for combining right shifts and narrows into
> shifted narrows.
> 
> i.e.
> 
> typedef short int16_t;
> typedef unsigned short uint16_t;
> 
> void foo (uint16_t * restrict a, int16_t * restrict d, int n)
> {
>     for( int i = 0; i < n; i++ )
>       d[i] = (a[i] * a[i]) >> 10;
> }
> 
> now generates:
> 
> .L4:
>         ldr     q0, [x0, x3]
>         umull   v1.4s, v0.4h, v0.4h
>         umull2  v0.4s, v0.8h, v0.8h
>         shrn    v1.4h, v1.4s, 10
>         shrn2   v1.8h, v0.4s, 10
>         str     q1, [x1, x3]
>         add     x3, x3, 16
>         cmp     x4, x3
>         bne     .L4
> 
> instead of:
> 
> .L4:
>         ldr     q0, [x0, x3]
>         umull   v1.4s, v0.4h, v0.4h
>         umull2  v0.4s, v0.8h, v0.8h
>         sshr    v1.4s, v1.4s, 10
>         sshr    v0.4s, v0.4s, 10
>         xtn     v1.4h, v1.4s
>         xtn2    v1.8h, v0.4s
>         str     q1, [x1, x3]
>         add     x3, x3, 16
>         cmp     x4, x3
>         bne     .L4
> 
> Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
> 
> Ok for master?
> 
> Thanks,
> Tamar
> 
> gcc/ChangeLog:
> 
>       * config/aarch64/aarch64-simd.md
> (*aarch64_<srn_op>shrn<mode>_vect,
>       *aarch64_<srn_op>shrn<mode>2_vect): New.
>       * config/aarch64/iterators.md (srn_op): New.
> 
> gcc/testsuite/ChangeLog:
> 
>       * gcc.target/aarch64/shrn-combine.c: New test.
> 
> --- inline copy of patch --
> diff --git a/gcc/config/aarch64/aarch64-simd.md
> b/gcc/config/aarch64/aarch64-simd.md
> index
> 48eddf64e05afe3788abfa05141f6544a9323ea1..d7b6cae424622d259f97a3d5
> fa9093c0fb0bd5ce 100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -1818,6 +1818,28 @@ (define_insn "aarch64_shrn<mode>_insn_be"
>    [(set_attr "type" "neon_shift_imm_narrow_q")]
>  )
> 
> +(define_insn "*aarch64_<srn_op>shrn<mode>_vect"
> +  [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
> +        (truncate:<VNARROWQ>
> +          (SHIFTRT:VQN (match_operand:VQN 1 "register_operand" "w")
> +            (match_operand:VQN 2
> "aarch64_simd_shift_imm_vec_<vn_mode>"))))]
> +  "TARGET_SIMD"
> +  "shrn\\t%0.<Vntype>, %1.<Vtype>, %2"
> +  [(set_attr "type" "neon_shift_imm_narrow_q")]
> +)
> +
> +(define_insn "*aarch64_<srn_op>shrn<mode>2_vect"
> +  [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
> +     (vec_concat:<VNARROWQ2>
> +       (match_operand:<VNARROWQ> 1 "register_operand" "0")
> +       (truncate:<VNARROWQ>
> +         (SHIFTRT:VQN (match_operand:VQN 2 "register_operand" "w")
> +           (match_operand:VQN 3
> "aarch64_simd_shift_imm_vec_<vn_mode>")))))]
> +  "TARGET_SIMD"
> +  "shrn2\\t%0.<V2ntype>, %2.<Vtype>, %3"
> +  [(set_attr "type" "neon_shift_imm_narrow_q")]
> +)

I think this needs to be guarded on !BYTES_BIG_ENDIAN and a similar pattern 
added for BYTES_BIG_ENDIAN with the vec_concat operands swapped around.
This is similar to the aarch64_xtn2<mode>_insn_be pattern, for example.

Thanks,
Kyrill

> +
>  (define_expand "aarch64_shrn<mode>"
>    [(set (match_operand:<VNARROWQ> 0 "register_operand")
>       (truncate:<VNARROWQ>
> diff --git a/gcc/config/aarch64/iterators.md
> b/gcc/config/aarch64/iterators.md
> index
> caa42f8f169fbf2cf46a90cf73dee05619acc300..8dbeed3b0d4a44cdc17dd333e
> d397b39a33f386a 100644
> --- a/gcc/config/aarch64/iterators.md
> +++ b/gcc/config/aarch64/iterators.md
> @@ -2003,6 +2003,9 @@ (define_code_attr shift [(ashift "lsl") (ashiftrt "asr")
>  ;; Op prefix for shift right and accumulate.
>  (define_code_attr sra_op [(ashiftrt "s") (lshiftrt "u")])
> 
> +;; op prefix for shift right and narrow.
> +(define_code_attr srn_op [(ashiftrt "r") (lshiftrt "")])
> +
>  ;; Map shift operators onto underlying bit-field instructions
>  (define_code_attr bfshift [(ashift "ubfiz") (ashiftrt "sbfx")
>                          (lshiftrt "ubfx") (rotatert "extr")])
> diff --git a/gcc/testsuite/gcc.target/aarch64/shrn-combine.c
> b/gcc/testsuite/gcc.target/aarch64/shrn-combine.c
> new file mode 100644
> index
> 0000000000000000000000000000000000000000..0187f49f4dcc76182c90366c
> aaf00d294e835707
> --- /dev/null
> +++ b/gcc/testsuite/gcc.target/aarch64/shrn-combine.c
> @@ -0,0 +1,14 @@
> +/* { dg-do assemble } */
> +/* { dg-options "-O3 --save-temps --param=vect-epilogues-nomask=0" } */
> +
> +typedef short int16_t;
> +typedef unsigned short uint16_t;
> +
> +void foo (uint16_t * restrict a, int16_t * restrict d, int n)
> +{
> +    for( int i = 0; i < n; i++ )
> +      d[i] = (a[i] * a[i]) >> 10;
> +}
> +
> +/* { dg-final { scan-assembler-times {\tshrn\t} 1 } } */
> +/* { dg-final { scan-assembler-times {\tshrn2\t} 1 } } */
> 
> 
> --

Reply via email to