Jonathan Wright via Gcc-patches <gcc-patches@gcc.gnu.org> writes: > Hi, > > As subject, this patch rewrites the vq[r]dmulh[q]_n Neon intrinsics to use > RTL builtins rather than inline assembly code, allowing for better scheduling > and optimization. > > Regression tested and bootstrapped on aarch64-none-linux-gnu - no > issues. > > Ok for master?
OK, thanks. Richard > Thanks, > Jonathan > > --- > > gcc/ChangeLog: > > 2021-02-08 Jonathan Wright <jonathan.wri...@arm.com> > > * config/aarch64/aarch64-simd-builtins.def: Add sq[r]dmulh_n > builtin generator macros. > * config/aarch64/aarch64-simd.md (aarch64_sq<r>dmulh_n<mode>): > Define. > * config/aarch64/arm_neon.h (vqdmulh_n_s16): Use RTL builtin > instead of inline asm. > (vqdmulh_n_s32): Likewise. > (vqdmulhq_n_s16): Likewise. > (vqdmulhq_n_s32): Likewise. > (vqrdmulh_n_s16): Likewise. > (vqrdmulh_n_s32): Likewise. > (vqrdmulhq_n_s16): Likewise. > (vqrdmulhq_n_s32): Likewise. > > diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def > b/gcc/config/aarch64/aarch64-simd-builtins.def > index > d8a62c9304b1c90e7b5216d9fb91a12ffce5916c..c29c492913c06fed078f24efb144022a7d6adbbb > 100644 > --- a/gcc/config/aarch64/aarch64-simd-builtins.def > +++ b/gcc/config/aarch64/aarch64-simd-builtins.def > @@ -350,6 +350,9 @@ > /* Implemented by aarch64_sq<r>dmulh<mode>. */ > BUILTIN_VSDQ_HSI (BINOP, sqdmulh, 0, NONE) > BUILTIN_VSDQ_HSI (BINOP, sqrdmulh, 0, NONE) > + /* Implemented by aarch64_sq<r>dmulh_n<mode>. */ > + BUILTIN_VDQHS (BINOP, sqdmulh_n, 0, NONE) > + BUILTIN_VDQHS (BINOP, sqrdmulh_n, 0, NONE) > /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>. */ > BUILTIN_VSDQ_HSI (TERNOP_LANE, sqdmulh_lane, 0, NONE) > BUILTIN_VSDQ_HSI (TERNOP_LANE, sqdmulh_laneq, 0, NONE) > diff --git a/gcc/config/aarch64/aarch64-simd.md > b/gcc/config/aarch64/aarch64-simd.md > index > 468f22ee475b30c18649e31e5c53f4efcd74384d..84db72478eb661ae4712e920bd4377c7c2af038b > 100644 > --- a/gcc/config/aarch64/aarch64-simd.md > +++ b/gcc/config/aarch64/aarch64-simd.md > @@ -4692,6 +4692,18 @@ > [(set_attr "type" "neon_sat_mul_<Vetype><q>")] > ) > > +(define_insn "aarch64_sq<r>dmulh_n<mode>" > + [(set (match_operand:VDQHS 0 "register_operand" "=w") > + (unspec:VDQHS > + [(match_operand:VDQHS 1 "register_operand" "w") > + (vec_duplicate:VDQHS > + (match_operand:<VEL> 2 "register_operand" "<h_con>"))] > + VQDMULH))] > + "TARGET_SIMD" > + "sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[0]" > + [(set_attr "type" "neon_sat_mul_<Vetype>_scalar<q>")] > +) > + > ;; sq<r>dmulh_lane > > (define_insn "aarch64_sq<r>dmulh_lane<mode>" > diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h > index > 31622eb68c85856a324d28d7c53c4846b2cec4c0..48cce8fed8a02f4fa791fb958e772eeacecd1de1 > 100644 > --- a/gcc/config/aarch64/arm_neon.h > +++ b/gcc/config/aarch64/arm_neon.h > @@ -8761,48 +8761,28 @@ __extension__ extern __inline int16x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqdmulh_n_s16 (int16x4_t __a, int16_t __b) > { > - int16x4_t __result; > - __asm__ ("sqdmulh %0.4h,%1.4h,%2.h[0]" > - : "=w"(__result) > - : "w"(__a), "x"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqdmulh_nv4hi (__a, __b); > } > > __extension__ extern __inline int32x2_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqdmulh_n_s32 (int32x2_t __a, int32_t __b) > { > - int32x2_t __result; > - __asm__ ("sqdmulh %0.2s,%1.2s,%2.s[0]" > - : "=w"(__result) > - : "w"(__a), "w"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqdmulh_nv2si (__a, __b); > } > > __extension__ extern __inline int16x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqdmulhq_n_s16 (int16x8_t __a, int16_t __b) > { > - int16x8_t __result; > - __asm__ ("sqdmulh %0.8h,%1.8h,%2.h[0]" > - : "=w"(__result) > - : "w"(__a), "x"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqdmulh_nv8hi (__a, __b); > } > > __extension__ extern __inline int32x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqdmulhq_n_s32 (int32x4_t __a, int32_t __b) > { > - int32x4_t __result; > - __asm__ ("sqdmulh %0.4s,%1.4s,%2.s[0]" > - : "=w"(__result) > - : "w"(__a), "w"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqdmulh_nv4si (__a, __b); > } > > __extension__ extern __inline int8x16_t > @@ -8872,48 +8852,28 @@ __extension__ extern __inline int16x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqrdmulh_n_s16 (int16x4_t __a, int16_t __b) > { > - int16x4_t __result; > - __asm__ ("sqrdmulh %0.4h,%1.4h,%2.h[0]" > - : "=w"(__result) > - : "w"(__a), "x"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqrdmulh_nv4hi (__a, __b); > } > > __extension__ extern __inline int32x2_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqrdmulh_n_s32 (int32x2_t __a, int32_t __b) > { > - int32x2_t __result; > - __asm__ ("sqrdmulh %0.2s,%1.2s,%2.s[0]" > - : "=w"(__result) > - : "w"(__a), "w"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqrdmulh_nv2si (__a, __b); > } > > __extension__ extern __inline int16x8_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqrdmulhq_n_s16 (int16x8_t __a, int16_t __b) > { > - int16x8_t __result; > - __asm__ ("sqrdmulh %0.8h,%1.8h,%2.h[0]" > - : "=w"(__result) > - : "w"(__a), "x"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqrdmulh_nv8hi (__a, __b); > } > > __extension__ extern __inline int32x4_t > __attribute__ ((__always_inline__, __gnu_inline__, __artificial__)) > vqrdmulhq_n_s32 (int32x4_t __a, int32_t __b) > { > - int32x4_t __result; > - __asm__ ("sqrdmulh %0.4s,%1.4s,%2.s[0]" > - : "=w"(__result) > - : "w"(__a), "w"(__b) > - : /* No clobbers */); > - return __result; > + return __builtin_aarch64_sqrdmulh_nv4si (__a, __b); > } > > __extension__ extern __inline int8x16_t