Hi, As subject, this patch rewrites [su]mull_n Neon intrinsics to use RTL builtins rather than inline assembly code, allowing for better scheduling and optimization.
Regression tested and bootstrapped on aarch64-none-linux-gnu - no issues. Ok for master? Thanks, Jonathan gcc/ChangeLog: 2021-01-19 Jonathan Wright <jonathan.wri...@arm.com> * config/aarch64/aarch64-simd-builtins.def: Add [su]mull_n builtin generator macros. * config/aarch64/aarch64-simd.md (aarch64_<su>mull_n<mode>): Define. * config/aarch64/arm_neon.h (vmull_n_s16): Use RTL builtin instead of inline asm. (vmull_n_s32): Likewise. (vmull_n_u16): Likewise. (vmull_n_u32): Likewise.
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 4913231ea55260fea1c7511a28a436e1e1e2ab20..198aa7e85423c8f5fd7abbdbaae6ce1fc6d9c37f 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -255,6 +255,9 @@ BUILTIN_VQW (BINOP, vec_widen_smult_hi_, 10, NONE) BUILTIN_VQW (BINOPU, vec_widen_umult_hi_, 10, NONE) + BUILTIN_VD_HSI (BINOP, smull_n, 0, NONE) + BUILTIN_VD_HSI (BINOPU, umull_n, 0, NONE) + BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_lane_, 0, ALL) BUILTIN_VD_HSI (QUADOP_LANE, vec_smlal_lane_, 0, ALL) BUILTIN_VD_HSI (TERNOP_LANE, vec_smult_laneq_, 0, ALL) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index adeec028d49f06156a5e84ce4dd83dbd6f151474..912b94bcfd731fdab9a813bf1a089d025fbd4a89 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -2061,6 +2061,19 @@ [(set_attr "type" "neon_mul_<Vetype>_scalar_long")] ) +(define_insn "aarch64_<su>mull_n<mode>" + [(set (match_operand:<VWIDE> 0 "register_operand" "=w") + (mult:<VWIDE> + (ANY_EXTEND:<VWIDE> + (vec_duplicate:<VCOND> + (match_operand:<VEL> 2 "register_operand" "<h_con>"))) + (ANY_EXTEND:<VWIDE> + (match_operand:VD_HSI 1 "register_operand" "w"))))] + "TARGET_SIMD" + "<su>mull\t%0.<Vwtype>, %1.<Vtype>, %2.<Vetype>[0]" + [(set_attr "type" "neon_mul_<Vetype>_scalar_long")] +) + ;; vmlal_lane_s16 intrinsics (define_insn "aarch64_vec_<su>mlal_lane<Qlane>" [(set (match_operand:<VWIDE> 0 "register_operand" "=w") diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 2a71ca9aa3c8c4095e99aa08c48e583f037a41ed..57959b6b0e22d44048e735e92ed7f578ec4153ea 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -8799,48 +8799,28 @@ __extension__ extern __inline int32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_n_s16 (int16x4_t __a, int16_t __b) { - int32x4_t __result; - __asm__ ("smull %0.4s,%1.4h,%2.h[0]" - : "=w"(__result) - : "w"(__a), "x"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smull_nv4hi (__a, __b); } __extension__ extern __inline int64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_n_s32 (int32x2_t __a, int32_t __b) { - int64x2_t __result; - __asm__ ("smull %0.2d,%1.2s,%2.s[0]" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_smull_nv2si (__a, __b); } __extension__ extern __inline uint32x4_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_n_u16 (uint16x4_t __a, uint16_t __b) { - uint32x4_t __result; - __asm__ ("umull %0.4s,%1.4h,%2.h[0]" - : "=w"(__result) - : "w"(__a), "x"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umull_nv4hi_uuu (__a, __b); } __extension__ extern __inline uint64x2_t __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) vmull_n_u32 (uint32x2_t __a, uint32_t __b) { - uint64x2_t __result; - __asm__ ("umull %0.2d,%1.2s,%2.s[0]" - : "=w"(__result) - : "w"(__a), "w"(__b) - : /* No clobbers */); - return __result; + return __builtin_aarch64_umull_nv2si_uuu (__a, __b); } __extension__ extern __inline poly16x8_t