Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q] Neon intrinsics
> to use RTL builtins rather than relying on the GCC vector extensions.
> Using RTL builtins allows control over the emission of fmla/fmls
> instructions (which we don't want here.)
>
> With this commit, the code generated by these intrinsics changes from
> a fused multiply-add/subtract instruction to an fmul followed by an
> fadd/fsub instruction. If the programmer really wants fmla/fmls
> instructions, they can use the vfm[as] intrinsics.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?

OK, thanks.

Richard

> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-02-16  Jonathan Wright  <jonathan.wri...@arm.com>
>
>       * config/aarch64/aarch64-simd-builtins.def: Add float_ml[as]
>       builtin generator macros.
>       * config/aarch64/aarch64-simd.md (aarch64_float_mla<mode>):
>       Define.
>       (aarch64_float_mls<mode>): Define.
>       * config/aarch64/arm_neon.h (vmla_f32): Use RTL builtin
>       instead of relying on GCC vector extensions.
>       (vmla_f64): Likewise.
>       (vmlaq_f32): Likewise.
>       (vmlaq_f64): Likewise.
>       (vmls_f32): Likewise.
>       (vmls_f64): Likewise.
>       (vmlsq_f32): Likewise.
>       (vmlsq_f64): Likewise.
>       * config/aarch64/iterators.md: Define VDQF_DF mode iterator.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
> b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 
> 547509474c23daf6882ed2f8407ddb5caf1d1b91..55a5682baeb13041053ef9e6eaa831182ea8b10c
>  100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -664,6 +664,8 @@
>    BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
>    VAR1 (TERNOP, fnma, 4, FP, hf)
>  
> +  BUILTIN_VDQF_DF (TERNOP, float_mla, 0, FP)
> +  BUILTIN_VDQF_DF (TERNOP, float_mls, 0, FP)
>    BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
>    BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
>  
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 
> d016970a2c278405b270a0ac745221e69f0f625e..95363d7b5ad11f775aa03f24bbcb0b66d20abb7c
>  100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2662,6 +2662,38 @@
>    [(set_attr "type" "neon_fp_abs_<stype><q>")]
>  )
>  
> +(define_expand "aarch64_float_mla<mode>"
> +  [(set (match_operand:VDQF_DF 0 "register_operand")
> +     (plus:VDQF_DF
> +       (mult:VDQF_DF
> +         (match_operand:VDQF_DF 2 "register_operand")
> +         (match_operand:VDQF_DF 3 "register_operand"))
> +       (match_operand:VDQF_DF 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
> +(define_expand "aarch64_float_mls<mode>"
> +  [(set (match_operand:VDQF_DF 0 "register_operand")
> +     (minus:VDQF_DF
> +       (match_operand:VDQF_DF 1 "register_operand")
> +       (mult:VDQF_DF
> +         (match_operand:VDQF_DF 2 "register_operand")
> +         (match_operand:VDQF_DF 3 "register_operand"))))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
>  (define_expand "aarch64_float_mla_n<mode>"
>    [(set (match_operand:VDQSF 0 "register_operand")
>       (plus:VDQSF
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 
> c0399c4dc428fe63c07fce0d12bb1580ead1542f..d4ed47249e3e39f8c88274657c809293e20bec9d
>  100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -20362,28 +20362,28 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmla_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
>  {
> -  return __a + __b * __c;
> +  return __builtin_aarch64_float_mlav2sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline float64x1_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmla_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
>  {
> -  return __a + __b * __c;
> +  return (float64x1_t) {__builtin_aarch64_float_mladf (__a[0], __b[0], 
> __c[0])};
>  }
>  
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlaq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
>  {
> -  return __a + __b * __c;
> +  return __builtin_aarch64_float_mlav4sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline float64x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlaq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
>  {
> -  return __a + __b * __c;
> +  return __builtin_aarch64_float_mlav2df (__a, __b, __c);
>  }
>  
>  /* vmla_lane  */
> @@ -20560,28 +20560,28 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmls_f32 (float32x2_t __a, float32x2_t __b, float32x2_t __c)
>  {
> -  return __a - __b * __c;
> +  return __builtin_aarch64_float_mlsv2sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline float64x1_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmls_f64 (float64x1_t __a, float64x1_t __b, float64x1_t __c)
>  {
> -  return __a - __b * __c;
> +  return (float64x1_t) {__builtin_aarch64_float_mlsdf (__a[0], __b[0], 
> __c[0])};
>  }
>  
>  __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlsq_f32 (float32x4_t __a, float32x4_t __b, float32x4_t __c)
>  {
> -  return __a - __b * __c;
> +  return __builtin_aarch64_float_mlsv4sf (__a, __b, __c);
>  }
>  
>  __extension__ extern __inline float64x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlsq_f64 (float64x2_t __a, float64x2_t __b, float64x2_t __c)
>  {
> -  return __a - __b * __c;
> +  return __builtin_aarch64_float_mlsv2df (__a, __b, __c);
>  }
>  
>  /* vmls_lane  */
> diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
> index 
> d0c0c24e420ae69cd58e56231a336b08ac1677f5..65e728acb3bc0cbcc8be29d330bc1ee66ef9a504
>  100644
> --- a/gcc/config/aarch64/iterators.md
> +++ b/gcc/config/aarch64/iterators.md
> @@ -152,6 +152,7 @@
>                            V2SF V4SF V2DF])
>  
>  ;; Advanced SIMD Float modes, and DF.
> +(define_mode_iterator VDQF_DF [V2SF V4SF V2DF DF])
>  (define_mode_iterator VHSDF_DF [(V4HF "TARGET_SIMD_F16INST")
>                               (V8HF "TARGET_SIMD_F16INST")
>                               V2SF V4SF V2DF DF])

Reply via email to