Patch updated as per your suggestion.

Tested and bootstrapped on aarch64-none-linux-gnu - no issues.

Ok for master?

Thanks,
Jonathan
________________________________
From: Richard Sandiford <richard.sandif...@arm.com>
Sent: 28 April 2021 16:11
To: Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org>
Cc: Jonathan Wright <jonathan.wri...@arm.com>
Subject: Re: [PATCH 10/20] aarch64: Use RTL builtins for FP ml[as]_n intrinsics

Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes:
> Hi,
>
> As subject, this patch rewrites the floating-point vml[as][q]_n Neon
> intrinsics to use RTL builtins rather than inline assembly code, allowing
> for better scheduling and optimization.
>
> Regression tested and bootstrapped on aarch64-none-linux-gnu - no
> issues.
>
> Ok for master?
>
> Thanks,
> Jonathan
>
> ---
>
> gcc/ChangeLog:
>
> 2021-01-18  Jonathan Wright  <jonathan.wri...@arm.com>
>
>        * config/aarch64/aarch64-simd-builtins.def: Add
>        float_ml[as]_n builtin generator macros.
>        * config/aarch64/aarch64-simd.md (mul_n<mode>3): Define.
>        (aarch64_float_mla_n<mode>): Define.
>        (aarch64_float_mls_n<mode>): Define.
>        * config/aarch64/arm_neon.h (vmla_n_f32): Use RTL builtin
>        instead of inline asm.
>        (vmlaq_n_f32): Likewise.
>        (vmls_n_f32): Likewise.
>        (vmlsq_n_f32): Likewise.
>
> diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
> b/gcc/config/aarch64/aarch64-simd-builtins.def
> index 
> 0f44ed84ff9d08d808b1b2dfe528db5208b134f5..547509474c23daf6882ed2f8407ddb5caf1d1b91
>  100644
> --- a/gcc/config/aarch64/aarch64-simd-builtins.def
> +++ b/gcc/config/aarch64/aarch64-simd-builtins.def
> @@ -664,6 +664,9 @@
>    BUILTIN_VHSDF (TERNOP, fnma, 4, FP)
>    VAR1 (TERNOP, fnma, 4, FP, hf)
>
> +  BUILTIN_VDQSF (TERNOP, float_mla_n, 0, FP)
> +  BUILTIN_VDQSF (TERNOP, float_mls_n, 0, FP)
> +
>    /* Implemented by aarch64_simd_bsl<mode>.  */
>    BUILTIN_VDQQH (BSL_P, simd_bsl, 0, NONE)
>    VAR2 (BSL_P, simd_bsl,0, NONE, di, v2di)
> diff --git a/gcc/config/aarch64/aarch64-simd.md 
> b/gcc/config/aarch64/aarch64-simd.md
> index 
> 5f701dd2775290156634ef8c6feccecd359e9ec9..d016970a2c278405b270a0ac745221e69f0f625e
>  100644
> --- a/gcc/config/aarch64/aarch64-simd.md
> +++ b/gcc/config/aarch64/aarch64-simd.md
> @@ -2614,6 +2614,17 @@
>    [(set_attr "type" "neon_fp_mul_<stype><q>")]
>  )
>
> +(define_insn "mul_n<mode>3"
> + [(set (match_operand:VHSDF 0 "register_operand" "=w")
> +     (mult:VHSDF
> +       (vec_duplicate:VHSDF
> +         (match_operand:<VEL> 2 "register_operand" "w"))
> +       (match_operand:VHSDF 1 "register_operand" "w")))]
> + "TARGET_SIMD"
> + "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]"

This functionality should already be provided by:

(define_insn "*aarch64_mul3_elt_from_dup<mode>"
 [(set (match_operand:VMUL 0 "register_operand" "=w")
    (mult:VMUL
      (vec_duplicate:VMUL
            (match_operand:<VEL> 1 "register_operand" "<h_con>"))
      (match_operand:VMUL 2 "register_operand" "w")))]
  "TARGET_SIMD"
  "<f>mul\t%0.<Vtype>, %2.<Vtype>, %1.<Vetype>[0]";
  [(set_attr "type" "neon<fp>_mul_<stype>_scalar<q>")]
)

so I think we should instead rename that to mul_n<mode>3 and reorder
its operands.

Thanks,
Richard

> +  [(set_attr "type" "neon_fp_mul_<stype><q>")]
> +)
> +
>  (define_expand "div<mode>3"
>   [(set (match_operand:VHSDF 0 "register_operand")
>         (div:VHSDF (match_operand:VHSDF 1 "register_operand")
> @@ -2651,6 +2662,40 @@
>    [(set_attr "type" "neon_fp_abs_<stype><q>")]
>  )
>
> +(define_expand "aarch64_float_mla_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +     (plus:VDQSF
> +       (mult:VDQSF
> +         (vec_duplicate:VDQSF
> +           (match_operand:<VEL> 3 "register_operand"))
> +         (match_operand:VDQSF 2 "register_operand"))
> +       (match_operand:VDQSF 1 "register_operand")))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_add<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
> +(define_expand "aarch64_float_mls_n<mode>"
> +  [(set (match_operand:VDQSF 0 "register_operand")
> +     (minus:VDQSF
> +       (match_operand:VDQSF 1 "register_operand")
> +       (mult:VDQSF
> +         (vec_duplicate:VDQSF
> +           (match_operand:<VEL> 3 "register_operand"))
> +         (match_operand:VDQSF 2 "register_operand"))))]
> +  "TARGET_SIMD"
> +  {
> +    rtx scratch = gen_reg_rtx (<MODE>mode);
> +    emit_insn (gen_mul_n<mode>3 (scratch, operands[2], operands[3]));
> +    emit_insn (gen_sub<mode>3 (operands[0], operands[1], scratch));
> +    DONE;
> +  }
> +)
> +
>  (define_insn "fma<mode>4"
>    [(set (match_operand:VHSDF 0 "register_operand" "=w")
>         (fma:VHSDF (match_operand:VHSDF 1 "register_operand" "w")
> diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
> index 
> 1c48c166b5b9aaf052761f95121c26845221dae9..c0399c4dc428fe63c07fce0d12bb1580ead1542f
>  100644
> --- a/gcc/config/aarch64/arm_neon.h
> +++ b/gcc/config/aarch64/arm_neon.h
> @@ -7050,13 +7050,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmla_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fadd %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv2sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x4_t
> @@ -7403,13 +7397,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlaq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fadd %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mla_nv4sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x8_t
> @@ -7496,13 +7484,7 @@ __extension__ extern __inline float32x2_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmls_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c)
>  {
> -  float32x2_t __result;
> -  float32x2_t __t1;
> -  __asm__ ("fmul %1.2s, %3.2s, %4.s[0]; fsub %0.2s, %0.2s, %1.2s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv2sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x4_t
> @@ -7853,13 +7835,7 @@ __extension__ extern __inline float32x4_t
>  __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
>  vmlsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c)
>  {
> -  float32x4_t __result;
> -  float32x4_t __t1;
> -  __asm__ ("fmul %1.4s, %3.4s, %4.s[0]; fsub %0.4s, %0.4s, %1.4s"
> -           : "=w"(__result), "=w"(__t1)
> -           : "0"(__a), "w"(__b), "w"(__c)
> -           : /* No clobbers */);
> -  return __result;
> +  return __builtin_aarch64_float_mls_nv4sf (__a, __b, __c);
>  }
>
>  __extension__ extern __inline int16x8_t

Attachment: rb14042.patch
Description: rb14042.patch

Reply via email to