These intrinsics was implemented by inline assembly using "faddp"
instruction.
There was a pattern "aarch64_addpv4sf" which supportsV4SF mode only
while we can
extend this pattern to support VDQF mode, then we can reimplement these
intrinsics through builtlins.
gcc/
2016-05-23 Jiong Wang <jiong.w...@arm.com>
* config/aarch64/aarch64-builtins.def (faddp): New builtins for
modes in VDQF.
* config/aarch64/aarch64-simd.md (aarch64_faddp<mode>): New.
(arch64_addpv4sf): Delete.
(reduc_plus_scal_v4sf): Use "gen_aarch64_faddpv4sf" instead of
"gen_aarch64_addpv4sf".
* gcc/config/aarch64/iterators.md (UNSPEC_FADDP): New.
* config/aarch64/arm_neon.h (vpadd_f32): Remove inline
assembly. Use
builtin.
(vpaddq_f32): Likewise.
(vpaddq_f64): Likewise.
>From d97a40ac2e69403b64bcf53596581b49b86ef40c Mon Sep 17 00:00:00 2001
From: "Jiong.Wang" <jiong.w...@arm.com>
Date: Mon, 23 May 2016 12:13:13 +0100
Subject: [PATCH 6/6] 6
---
gcc/config/aarch64/aarch64-builtins.def | 3 ++
gcc/config/aarch64/aarch64-simd.md | 23 ++++++++-------
gcc/config/aarch64/arm_neon.h | 51 ++++++++++++---------------------
gcc/config/aarch64/iterators.md | 1 +
4 files changed, 34 insertions(+), 44 deletions(-)
diff --git a/gcc/config/aarch64/aarch64-builtins.def b/gcc/config/aarch64/aarch64-builtins.def
index 40baebe..37d8183 100644
--- a/gcc/config/aarch64/aarch64-builtins.def
+++ b/gcc/config/aarch64/aarch64-builtins.def
@@ -468,3 +468,6 @@
/* Implemented by fabd<mode>_3. */
BUILTIN_VALLF (BINOP, fabd, 3)
+
+ /* Implemented by aarch64_faddp<mode>. */
+ BUILTIN_VDQF (BINOP, faddp, 0)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index 71dd74a..9b9f8df 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -1992,6 +1992,16 @@
}
)
+(define_insn "aarch64_faddp<mode>"
+ [(set (match_operand:VDQF 0 "register_operand" "=w")
+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
+ (match_operand:VDQF 2 "register_operand" "w")]
+ UNSPEC_FADDP))]
+ "TARGET_SIMD"
+ "faddp\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
+ [(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
+)
+
(define_insn "aarch64_reduc_plus_internal<mode>"
[(set (match_operand:VDQV 0 "register_operand" "=w")
(unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
@@ -2019,15 +2029,6 @@
[(set_attr "type" "neon_fp_reduc_add_<Vetype><q>")]
)
-(define_insn "aarch64_addpv4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=w")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
- UNSPEC_FADDV))]
- "TARGET_SIMD"
- "faddp\\t%0.4s, %1.4s, %1.4s"
- [(set_attr "type" "neon_fp_reduc_add_s_q")]
-)
-
(define_expand "reduc_plus_scal_v4sf"
[(set (match_operand:SF 0 "register_operand")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand")]
@@ -2036,8 +2037,8 @@
{
rtx elt = GEN_INT (ENDIAN_LANE_N (V4SFmode, 0));
rtx scratch = gen_reg_rtx (V4SFmode);
- emit_insn (gen_aarch64_addpv4sf (scratch, operands[1]));
- emit_insn (gen_aarch64_addpv4sf (scratch, scratch));
+ emit_insn (gen_aarch64_faddpv4sf (scratch, operands[1], operands[1]));
+ emit_insn (gen_aarch64_faddpv4sf (scratch, scratch, scratch));
emit_insn (gen_aarch64_get_lanev4sf (operands[0], scratch, elt));
DONE;
})
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index ae4c429..a37ceeb 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -8225,17 +8225,6 @@ vpadalq_u32 (uint64x2_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vpadd_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("faddp %0.2s,%1.2s,%2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vpaddl_s8 (int8x8_t a)
{
@@ -8368,28 +8357,6 @@ vpaddlq_u32 (uint32x4_t a)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vpaddq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("faddp %0.4s,%1.4s,%2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vpaddq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("faddp %0.2d,%1.2d,%2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vpaddq_s8 (int8x16_t a, int8x16_t b)
{
@@ -18629,6 +18596,24 @@ vnegq_s64 (int64x2_t __a)
/* vpadd */
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vpadd_f32 (float32x2_t __a, float32x2_t __b)
+{
+ return __builtin_aarch64_faddpv2sf (__a, __b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vpaddq_f32 (float32x4_t __a, float32x4_t __b)
+{
+ return __builtin_aarch64_faddpv4sf (__a, __b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vpaddq_f64 (float64x2_t __a, float64x2_t __b)
+{
+ return __builtin_aarch64_faddpv2df (__a, __b);
+}
+
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vpadd_s8 (int8x8_t __a, int8x8_t __b)
{
diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
index 2264459..7323091 100644
--- a/gcc/config/aarch64/iterators.md
+++ b/gcc/config/aarch64/iterators.md
@@ -219,6 +219,7 @@
UNSPEC_FMIN ; Used in aarch64-simd.md.
UNSPEC_FMINNMV ; Used in aarch64-simd.md.
UNSPEC_FMINV ; Used in aarch64-simd.md.
+ UNSPEC_FADDP ; Used in aarch64-simd.md.
UNSPEC_FADDV ; Used in aarch64-simd.md.
UNSPEC_ADDV ; Used in aarch64-simd.md.
UNSPEC_SCVTF ; Used in aarch64-simd.md.
--
1.9.1