These intrinsics were implemented before "fabd<mode>_3" introduces.
Meanwhile
the patterns "fabd<mode>_3" and "*fabd_scalar<mode>3" can be merged into a
single "fabd<mode>3" using VALLF.
This patch migrate the implementation to builtins backed by this pattern.
gcc/
2016-05-23 Jiong Wang <jiong.w...@arm.com>
* config/aarch64/aarch64-builtins.def (fabd): New builtins for
modes
VALLF.
* config/aarch64/aarch64-simd.md (fabd<mode>_3): Extend modes
from VDQF
to VALLF.
"*fabd_scalar<mode>3): Delete.
* config/aarch64/arm_neon.h (vabds_f32): Remove inline assembly.
Use builtin.
(vabdd_f64): Likewise.
(vabd_f32): Likewise.
(vabdq_f32): Likewise.
(vabdq_f64): Likewise.
>From 9bafb58055d4e379df7b626acd6aa80bdb0d4b22 Mon Sep 17 00:00:00 2001
From: "Jiong.Wang" <jiong.w...@arm.com>
Date: Mon, 23 May 2016 12:12:53 +0100
Subject: [PATCH 5/6] 5
---
gcc/config/aarch64/aarch64-builtins.def | 3 ++
gcc/config/aarch64/aarch64-simd.md | 23 +++------
gcc/config/aarch64/arm_neon.h | 87 ++++++++++++---------------------
3 files changed, 42 insertions(+), 71 deletions(-)
diff --git a/gcc/config/aarch64/aarch64-builtins.def b/gcc/config/aarch64/aarch64-builtins.def
index 1955d17..40baebe 100644
--- a/gcc/config/aarch64/aarch64-builtins.def
+++ b/gcc/config/aarch64/aarch64-builtins.def
@@ -465,3 +465,6 @@
/* Implemented by aarch64_rsqrts<mode>. */
BUILTIN_VALLF (BINOP, rsqrts, 0)
+
+ /* Implemented by fabd<mode>_3. */
+ BUILTIN_VALLF (BINOP, fabd, 3)
diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
index cca6c1b..71dd74a 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -474,23 +474,14 @@
[(set_attr "type" "neon_arith_acc<q>")]
)
-(define_insn "fabd<mode>_3"
- [(set (match_operand:VDQF 0 "register_operand" "=w")
- (abs:VDQF (minus:VDQF
- (match_operand:VDQF 1 "register_operand" "w")
- (match_operand:VDQF 2 "register_operand" "w"))))]
- "TARGET_SIMD"
- "fabd\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
- [(set_attr "type" "neon_fp_abd_<Vetype><q>")]
-)
-
-(define_insn "*fabd_scalar<mode>3"
- [(set (match_operand:GPF 0 "register_operand" "=w")
- (abs:GPF (minus:GPF
- (match_operand:GPF 1 "register_operand" "w")
- (match_operand:GPF 2 "register_operand" "w"))))]
+(define_insn "fabd<mode>3"
+ [(set (match_operand:VALLF 0 "register_operand" "=w")
+ (abs:VALLF
+ (minus:VALLF
+ (match_operand:VALLF 1 "register_operand" "w")
+ (match_operand:VALLF 2 "register_operand" "w"))))]
"TARGET_SIMD"
- "fabd\t%<s>0, %<s>1, %<s>2"
+ "fabd\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
[(set_attr "type" "neon_fp_abd_<Vetype><q>")]
)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 9bbe815..ca29074 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -5440,17 +5440,6 @@ vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c)
return result;
}
-__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
-vabd_f32 (float32x2_t a, float32x2_t b)
-{
- float32x2_t result;
- __asm__ ("fabd %0.2s, %1.2s, %2.2s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vabd_s8 (int8x8_t a, int8x8_t b)
{
@@ -5517,17 +5506,6 @@ vabd_u32 (uint32x2_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline float64_t __attribute__ ((__always_inline__))
-vabdd_f64 (float64_t a, float64_t b)
-{
- float64_t result;
- __asm__ ("fabd %d0, %d1, %d2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vabdl_high_s8 (int8x16_t a, int8x16_t b)
{
@@ -5660,28 +5638,6 @@ vabdl_u32 (uint32x2_t a, uint32x2_t b)
return result;
}
-__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
-vabdq_f32 (float32x4_t a, float32x4_t b)
-{
- float32x4_t result;
- __asm__ ("fabd %0.4s, %1.4s, %2.4s"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
-__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
-vabdq_f64 (float64x2_t a, float64x2_t b)
-{
- float64x2_t result;
- __asm__ ("fabd %0.2d, %1.2d, %2.2d"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vabdq_s8 (int8x16_t a, int8x16_t b)
{
@@ -5748,17 +5704,6 @@ vabdq_u32 (uint32x4_t a, uint32x4_t b)
return result;
}
-__extension__ static __inline float32_t __attribute__ ((__always_inline__))
-vabds_f32 (float32_t a, float32_t b)
-{
- float32_t result;
- __asm__ ("fabd %s0, %s1, %s2"
- : "=w"(result)
- : "w"(a), "w"(b)
- : /* No clobbers */);
- return result;
-}
-
__extension__ static __inline int16_t __attribute__ ((__always_inline__))
vaddlv_s8 (int8x8_t a)
{
@@ -10246,6 +10191,38 @@ vtbx2_p8 (poly8x8_t r, poly8x8x2_t tab, uint8x8_t idx)
/* Start of optimal implementations in approved order. */
+/* vabd. */
+
+__extension__ static __inline float32_t __attribute__ ((__always_inline__))
+vabds_f32 (float32_t a, float32_t b)
+{
+ return __builtin_aarch64_fabdsf (a, b);
+}
+
+__extension__ static __inline float64_t __attribute__ ((__always_inline__))
+vabdd_f64 (float64_t a, float64_t b)
+{
+ return __builtin_aarch64_fabddf (a, b);
+}
+
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vabd_f32 (float32x2_t a, float32x2_t b)
+{
+ return __builtin_aarch64_fabdv2sf (a, b);
+}
+
+__extension__ static __inline float32x4_t __attribute__ ((__always_inline__))
+vabdq_f32 (float32x4_t a, float32x4_t b)
+{
+ return __builtin_aarch64_fabdv4sf (a, b);
+}
+
+__extension__ static __inline float64x2_t __attribute__ ((__always_inline__))
+vabdq_f64 (float64x2_t a, float64x2_t b)
+{
+ return __builtin_aarch64_fabdv2df (a, b);
+}
+
/* vabs */
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
--
1.9.1