Jonathan Wright <jonathan.wri...@arm.com> writes:
> Updated the patch to be more consistent with the others in the series.
>
> Tested and bootstrapped on aarch64-none-linux-gnu - no issues.
>
> Ok for master?

OK, thanks.

Richard

>
> Thanks,
> Jonathan
> -------------------------------------------------------------------------------
> From: Gcc-patches <gcc-patches-boun...@gcc.gnu.org> on behalf of Jonathan
> Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
> Sent: 28 April 2021 15:42
> To: gcc-patches@gcc.gnu.org <gcc-patches@gcc.gnu.org>
> Subject: [PATCH 13/20] aarch64: Use RTL builtins for FP ml[as][q]_laneq
> intrinsics
>  
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_laneq Neon
> intrinsics to use RTL builtins rather than relying on the GCC vector
> extensions. Using RTL builtins allows control over the emission of
> fmla/fmls instructions (which we don't want here.)
>
> With this commit, the code generated by these intrinsics changes from
> a fused multiply-add/subtract instruction to an fmul followed by an
> fadd/fsub instruction. If the programmer really wants fmla/fmls
> instructions, they can use the vfm[as] intrinsics.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-17  Jonathan Wright  <jonathan.wri...@arm.com>
>
>         * config/aarch64/aarch64-simd-builtins.def: Add
>         float_ml[as][q]_laneq builtin generator macros.
>         * config/aarch64/aarch64-simd.md (mul_laneq<mode>3): Define.
>         (aarch64_float_mla_laneq<mode>): Define.
>         (aarch64_float_mls_laneq<mode>): Define.
>         * config/aarch64/arm_neon.h (vmla_laneq_f32): Use RTL builtin
>         instead of GCC vector extensions.
>         (vmlaq_laneq_f32): Likewise.
>         (vmls_laneq_f32): Likewise.
>         (vmlsq_laneq_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
> b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 
> 8e4b4edc8a46ffba777a42058f06ce7204152824..1e81bb53287e9797f3539c2c64ed11c6c26d6e4e
>  100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -674,6 +674,8 @@
>    BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
>    BUILTIN_VDQSF (QUADOP_LANE, float_mla_lane, 0, FP)
>    BUILTIN_VDQSF (QUADOP_LANE, float_mls_lane, 0, FP)
> +  BUILTIN_VDQSF (QUADOP_LANE, float_mla_laneq, 0, FP)
> +  BUILTIN_VDQSF (QUADOP_LANE, float_mls_laneq, 0, FP)
>  
>    /* Implemented by aarch64_simd_bsl<mode>.  */
>    BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 
> bdee49f74f4725409d33af733bb55be290b3f0e7..234762960bd6df057394f753072ef65a6628a43d
>  100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -734,6 +734,22 @@
>    [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
>  )
>  
> +(define_insn "mul_laneq<mode>3"
> +  [(set (match_operand:VDQSF 0 "register_operand" "=w")
> +     (mult:VDQSF
> +       (vec_duplicate:VDQSF
> +         (vec_select:<VEL>
> +           (match_operand:V4SF 2 "register_operand" "w")
> +           (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
> +       (match_operand:VDQSF 1 "register_operand" "w")))]
> +  "TARGET_SIMD"
> +  {
> +    operands[3] = aarch64_endian_lane_rtx (V4SFmode, INTVAL (operands[3]));
> +    return "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]";
> +  }
> +  [(set_attr "type" "neon_fp_mul_s_scalar<q>")]
> +)
> +
>  (define_insn "*aarch64_mul3_elt_<vswap_width_name><mode>"
>    [(set (match_operand:VMUL_CHANGE_NLANES 0 "register_operand" "=w")
>       (mult:VMUL_CHANGE_NLANES
> @@ -2742,6 +2758,46 @@
>    }
>  )
>  
> +(define_expand "aarch64_float_mla_laneq<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +     (plus:VDQSF
> +       (mult:VDQSF
> +         (vec_duplicate:VDQSF
> +           (vec_select:<VEL>
> +             (match_operand:V4SF 3 "register_operand")
> +             (parallel [(match_operand:SI 4 "immediate_operand")])))
> +         (match_operand:VDQSF 2 "register_operand"))
> +       (match_operand:VDQSF 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
> +                                  operands[3], operands[4]));
> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
> +(define_expand "aarch64_float_mls_laneq<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +     (minus:VDQSF
> +       (match_operand:VDQSF 1 "register_operand")
> +       (mult:VDQSF
> +         (vec_duplicate:VDQSF
> +           (vec_select:<VEL>
> +             (match_operand:V4SF 3 "register_operand")
> +             (parallel [(match_operand:SI 4 "immediate_operand")])))
> +         (match_operand:VDQSF 2 "register_operand"))))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_laneq<mode>3 (scratch, operands[2],
> +                                  operands[3], operands[4]));
> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
>  (define_insn "fma<mode>4"
>    [(set (match_operand:VHSDF 0 "register_operand" "=w")
>         (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 
> 5328d447a424fdf4ce1941abf3c1218d4fe8f42a..17e059efb80fa86a8a32127ace4fc7f43e2040a8
>  100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20420,7 +20420,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, 
> __artificial__))
>  vmla_laneq_f32 (float32x2_t __a, float32x2_t __b,
>               float32x4_t __c, const int __lane)
>  {
> -  return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> +  return __builtin_aarch64_float_mla_laneqv2sf (__a, __b, __c, __lane);
>  }
>  
>  __extension__ extern __inline int16x4_t
> @@ -20504,7 +20504,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, 
> __artificial__))
>  vmlaq_laneq_f32 (float32x4_t __a, float32x4_t __b,
>                float32x4_t __c, const int __lane)
>  {
> -  return (__a + (__b * __aarch64_vget_lane_any (__c, __lane)));
> +  return __builtin_aarch64_float_mla_laneqv4sf (__a, __b, __c, __lane);
>  }
>  
>  __extension__ extern __inline int16x8_t
> @@ -20618,7 +20618,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, 
> __artificial__))
>  vmls_laneq_f32 (float32x2_t __a, float32x2_t __b,
>              float32x4_t __c, const int __lane)
>  {
> -  return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> +  return __builtin_aarch64_float_mls_laneqv2sf (__a, __b, __c, __lane);
>  }
>  
>  __extension__ extern __inline int16x4_t
> @@ -20702,7 +20702,7 @@ __attribute__ ((__always_inline__, __gnu_inline__, 
> __artificial__))
>  vmlsq_laneq_f32 (float32x4_t __a, float32x4_t __b,
>               float32x4_t __c, const int __lane)
>  {
> -  return (__a - (__b * __aarch64_vget_lane_any (__c, __lane)));
> +  return __builtin_aarch64_float_mls_laneqv4sf (__a, __b, __c, __lane);
>  }
>  
>  __extension__ extern __inline int16x8_t

Reply via email to