Hi, As subject, this patch rewrites [su]mlal Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no issues. Ok for master? Thanks, Jonathan --- gcc/ChangeLog: 2021-01-26 Jonathan Wright <jonathan.wri...@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add [su]mlal builtin generator macros. * config/aarch64/aarch64-simd.md (*aarch64_<su>mlal<mode>): Rename to... (aarch64_<su>mlal<mode>): This. * config/aarch64/arm_neon.h (vmlal_s8): Use RTL builtin instead of inline asm. (vmlal_s16): Likewise. (vmlal_s32): Likewise. (vmlal_u8): Likewise. (vmlal_u16): Likewise. (vmlal_u32): Likewise.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 32aee6024a89e6ca1f423717463fe67d011afd8b..a71ae4d724136c8b626d397bf6187e8b595a2b8a 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -192,6 +192,10 @@ BUILTIN_VD_BHSI (TERNOP, smlsl, 0, NONE) BUILTIN_VD_BHSI (TERNOPU, umlsl, 0, NONE) + /* Implemented by aarch64_<su>mlal<mode>. */ + BUILTIN_VD_BHSI (TERNOP, smlal, 0, NONE) + BUILTIN_VD_BHSI (TERNOPU, umlal, 0, NONE) + /* Implemented by aarch64_<su>mlsl_hi<mode>. */ BUILTIN_VQW (TERNOP, smlsl_hi, 0, NONE) BUILTIN_VQW (TERNOPU, umlsl_hi, 0, NONE) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index 544bac7dc9b62a9d5387465ec26d0e3204be6601..db56b61baf2093c88d8757b25580b3032f00a355 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -1825,17 +1825,17 @@ } ) -(define_insn "*aarch64_<su>mlal<mode>" +(define_insn "aarch64_<su>mlal<mode>" [(set (match_operand:<VWIDE> 0 "register_operand" "=w") (plus:<VWIDE> (mult:<VWIDE> (ANY_EXTEND:<VWIDE> - (match_operand:VD_BHSI 1 "register_operand" "w")) + (match_operand:VD_BHSI 2 "register_operand" "w")) (ANY_EXTEND:<VWIDE> - (match_operand:VD_BHSI 2 "register_operand" "w"))) - (match_operand:<VWIDE> 3 "register_operand" "0")))] + (match_operand:VD_BHSI 3 "register_operand" "w"))) + (match_operand:<VWIDE> 1 "register_operand" "0")))] "TARGET_SIMD" - "<su>mlal\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>" + "<su>mlal\t%0.<Vwtype>, %2.<Vtype>, %3.<Vtype>" [(set_attr "type" "neon_mla_<Vetype>_long")] ) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index d1ab3b7d54cd5b965f91e685139677864fcfe3e1..674ccc63b69ca1945dc684d2b06c1e31f52bfdb3 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -7656,72 +7656,42 @@ __extension__ extern __inline int16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_s8 (int16x8_t __a, int8x8_t __b, int8x8_t __c) { - int16x8_t __result; - __asm__ ("smlal %0.8h,%2.8b,%3.8b" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlalv8qi (__a, __b, __c); } __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c) { - int32x4_t __result; - __asm__ ("smlal %0.4s,%2.4h,%3.4h" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlalv4hi (__a, __b, __c); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c) { - int64x2_t __result; - __asm__ ("smlal %0.2d,%2.2s,%3.2s" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smlalv2si (__a, __b, __c); } __extension__ extern __inline uint16x8_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_u8 (uint16x8_t __a, uint8x8_t __b, uint8x8_t __c) { - uint16x8_t __result; - __asm__ ("umlal %0.8h,%2.8b,%3.8b" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlalv8qi_uuuu (__a, __b, __c); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_u16 (uint32x4_t __a, uint16x4_t __b, uint16x4_t __c) { - uint32x4_t __result; - __asm__ ("umlal %0.4s,%2.4h,%3.4h" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlalv4hi_uuuu (__a, __b, __c); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmlal_u32 (uint64x2_t __a, uint32x2_t __b, uint32x2_t __c) { - uint64x2_t __result; - __asm__ ("umlal %0.2d,%2.2s,%3.2s" - : "=w"(__result) - : "0"(__a), "w"(__b), "w"(__c) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umlalv2si_uuuu (__a, __b, __c); } __extension__ extern __inline float32x4_t