This adds a bunch of new intrinsics, implemented with GCC vector extensions to
maximise mid-end optimization (the same approach as AArch64). Note that unlike
AArch64, no attempt is made to support bigendian.
gcc/ChangeLog:
* config/arm/arm_neon.h (vcreate_f16, vdup_lane_f16, vld1_lane_f16,
vld1_dup_f16, vreinterpret_p8_f16, vreinterpret_p16_f16,
vreinterpret_f16_p8, vreinterpret_f16_p16, vreinterpret_f16_f32,
vreinterpret_f16_p64, vreinterpret_f16_s64, vreinterpret_f16_u64,
vreinterpret_f16_s8, vreinterpret_f16_s16, vreinterpret_f16_s32,
vreinterpret_f16_u8, vreinterpret_f16_u16, vreinterpret_f16_u32,
vreinterpret_f32_f16, vreinterpret_p64_f16, vreinterpret_s64_f16,
vreinterpret_u64_f16, vreinterpret_s8_f16, vreinterpret_s16_f16,
vreinterpret_s32_f16, vreinterpret_u8_f16, vreinterpret_u16_f16,
vreinterpret_u32_f16): New.
diff --git a/gcc/config/arm/arm_neon.h b/gcc/config/arm/arm_neon.h
index e58b772ee29f910a344d2d3a5be5a7818a79af64..231d1392b93fe78a37f58595f775b0cc87fb709f 100644
--- a/gcc/config/arm/arm_neon.h
+++ b/gcc/config/arm/arm_neon.h
@@ -41,6 +41,7 @@ typedef __simd64_int8_t int8x8_t;
typedef __simd64_int16_t int16x4_t;
typedef __simd64_int32_t int32x2_t;
typedef __builtin_neon_di int64x1_t;
+typedef __builtin_neon_hf float16_t;
typedef __simd64_float16_t float16x4_t;
typedef __simd64_float32_t float32x2_t;
typedef __simd64_poly8_t poly8x8_t;
@@ -5182,6 +5183,20 @@ vget_lane_s32 (int32x2_t __a, const int __b)
return (int32_t)__builtin_neon_vget_lanev2si (__a, __b);
}
+/* Functions cannot accept or return __FP16 types. Even if the function
+ were marked always-inline so there were no call sites, the declaration
+ would nonetheless raise an error. Hence, we must use a macro instead. */
+
+#define vget_lane_f16(__v, __i) \
+ __extension__ \
+ ({ \
+ float16x4_t __vec = (__v); \
+ int __idx = (__i); \
+ __builtin_arm_lane_check (4, __idx); \
+ float16_t __res = __vec[__idx]; \
+ __res; \
+ })
+
__extension__ static __inline float32_t __attribute__ ((__always_inline__))
vget_lane_f32 (float32x2_t __a, const int __b)
{
@@ -5314,6 +5329,17 @@ vset_lane_s32 (int32_t __a, int32x2_t __b, const int __c)
return (int32x2_t)__builtin_neon_vset_lanev2si ((__builtin_neon_si) __a, __b, __c);
}
+#define vset_lane_f16(__e, __v, __i) \
+ __extension__ \
+ ({ \
+ float16_t __elem = (__e); \
+ float16x4_t __vec = (__v); \
+ int __idx = (__i); \
+ __builtin_arm_lane_check (4, __idx); \
+ __vec[__idx] = __elem; \
+ __vec; \
+ })
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vset_lane_f32 (float32_t __a, float32x2_t __b, const int __c)
{
@@ -5460,6 +5486,12 @@ vcreate_s64 (uint64_t __a)
return (int64x1_t)__builtin_neon_vcreatedi ((__builtin_neon_di) __a);
}
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vcreate_f16 (uint64_t __a)
+{
+ return (float16x4_t) __a;
+}
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vcreate_f32 (uint64_t __a)
{
@@ -5520,6 +5552,13 @@ vdup_n_s32 (int32_t __a)
return (int32x2_t)__builtin_neon_vdup_nv2si ((__builtin_neon_si) __a);
}
+#define vdup_n_f16(__e1) \
+ __extension__ \
+ ({ \
+ float16_t __e = (__e1); \
+ (float16x4_t) {__e, __e, __e, __e}; \
+ })
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vdup_n_f32 (float32_t __a)
{
@@ -5800,6 +5839,12 @@ vdup_lane_s32 (int32x2_t __a, const int __b)
return (int32x2_t)__builtin_neon_vdup_lanev2si (__a, __b);
}
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vdup_lane_f16 (float16x4_t __a, const int __b)
+{
+ return vdup_n_f16 (vget_lane_f16 (__a, __b));
+}
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vdup_lane_f32 (float32x2_t __a, const int __b)
{
@@ -8777,6 +8822,12 @@ vld1_lane_s32 (const int32_t * __a, int32x2_t __b, const int __c)
return (int32x2_t)__builtin_neon_vld1_lanev2si ((const __builtin_neon_si *) __a, __b, __c);
}
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vld1_lane_f16 (const float16_t * __a, float16x4_t __b, const int __c)
+{
+ return vset_lane_f16 (*__a, __b, __c);
+}
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vld1_lane_f32 (const float32_t * __a, float32x2_t __b, const int __c)
{
@@ -8925,6 +8976,12 @@ vld1_dup_s32 (const int32_t * __a)
return (int32x2_t)__builtin_neon_vld1_dupv2si ((const __builtin_neon_si *) __a);
}
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vld1_dup_f16 (const float16_t * __a)
+{
+ return vdup_n_f16 (*__a);
+}
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vld1_dup_f32 (const float32_t * __a)
{
@@ -11809,6 +11866,12 @@ vreinterpret_p8_p16 (poly16x4_t __a)
}
__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
+vreinterpret_p8_f16 (float16x4_t __a)
+{
+ return (poly8x8_t) __a;
+}
+
+__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__))
vreinterpret_p8_f32 (float32x2_t __a)
{
return (poly8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
@@ -11877,6 +11940,12 @@ vreinterpret_p16_p8 (poly8x8_t __a)
}
__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
+vreinterpret_p16_f16 (float16x4_t __a)
+{
+ return (poly16x4_t) __a;
+}
+
+__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__))
vreinterpret_p16_f32 (float32x2_t __a)
{
return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
@@ -11938,6 +12007,80 @@ vreinterpret_p16_u32 (uint32x2_t __a)
return (poly16x4_t)__builtin_neon_vreinterpretv4hiv2si ((int32x2_t) __a);
}
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_p8 (poly8x8_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_p16 (poly16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_f32 (float32x2_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_p64 (poly64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+#endif
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_s64 (int64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_u64 (uint64x1_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_s8 (int8x8_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_s16 (int16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_s32 (int32x2_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_u8 (uint8x8_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_u16 (uint16x4_t __a)
+{
+ return (float16x4_t) __a;
+}
+
+__extension__ static __inline float16x4_t __attribute__ ((__always_inline__))
+vreinterpret_f16_u32 (uint32x2_t __a)
+{
+ return (float16x4_t) __a;
+}
+
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vreinterpret_f32_p8 (poly8x8_t __a)
{
@@ -11950,6 +12093,12 @@ vreinterpret_f32_p16 (poly16x4_t __a)
return (float32x2_t)__builtin_neon_vreinterpretv2sfv4hi ((int16x4_t) __a);
}
+__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
+vreinterpret_f32_f16 (float16x4_t __a)
+{
+ return (float32x2_t) __a;
+}
+
#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
vreinterpret_f32_p64 (poly64x1_t __a)
@@ -12024,6 +12173,14 @@ vreinterpret_p64_p16 (poly16x4_t __a)
#endif
#ifdef __ARM_FEATURE_CRYPTO
__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
+vreinterpret_p64_f16 (float16x4_t __a)
+{
+ return (poly64x1_t) __a;
+}
+
+#endif
+#ifdef __ARM_FEATURE_CRYPTO
+__extension__ static __inline poly64x1_t __attribute__ ((__always_inline__))
vreinterpret_p64_f32 (float32x2_t __a)
{
return (poly64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
@@ -12107,6 +12264,12 @@ vreinterpret_s64_p16 (poly16x4_t __a)
}
__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
+vreinterpret_s64_f16 (float16x4_t __a)
+{
+ return (int64x1_t) __a;
+}
+
+__extension__ static __inline int64x1_t __attribute__ ((__always_inline__))
vreinterpret_s64_f32 (float32x2_t __a)
{
return (int64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
@@ -12175,6 +12338,12 @@ vreinterpret_u64_p16 (poly16x4_t __a)
}
__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
+vreinterpret_u64_f16 (float16x4_t __a)
+{
+ return (uint64x1_t) __a;
+}
+
+__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__))
vreinterpret_u64_f32 (float32x2_t __a)
{
return (uint64x1_t)__builtin_neon_vreinterpretdiv2sf (__a);
@@ -12243,6 +12412,12 @@ vreinterpret_s8_p16 (poly16x4_t __a)
}
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+vreinterpret_s8_f16 (float16x4_t __a)
+{
+ return (int8x8_t) __a;
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vreinterpret_s8_f32 (float32x2_t __a)
{
return (int8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
@@ -12311,6 +12486,12 @@ vreinterpret_s16_p16 (poly16x4_t __a)
}
__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+vreinterpret_s16_f16 (float16x4_t __a)
+{
+ return (int16x4_t) __a;
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
vreinterpret_s16_f32 (float32x2_t __a)
{
return (int16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
@@ -12379,6 +12560,12 @@ vreinterpret_s32_p16 (poly16x4_t __a)
}
__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+vreinterpret_s32_f16 (float16x4_t __a)
+{
+ return (int32x2_t) __a;
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
vreinterpret_s32_f32 (float32x2_t __a)
{
return (int32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);
@@ -12447,6 +12634,12 @@ vreinterpret_u8_p16 (poly16x4_t __a)
}
__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+vreinterpret_u8_f16 (float16x4_t __a)
+{
+ return (uint8x8_t) __a;
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
vreinterpret_u8_f32 (float32x2_t __a)
{
return (uint8x8_t)__builtin_neon_vreinterpretv8qiv2sf (__a);
@@ -12515,6 +12708,12 @@ vreinterpret_u16_p16 (poly16x4_t __a)
}
__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+vreinterpret_u16_f16 (float16x4_t __a)
+{
+ return (uint16x4_t) __a;
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
vreinterpret_u16_f32 (float32x2_t __a)
{
return (uint16x4_t)__builtin_neon_vreinterpretv4hiv2sf (__a);
@@ -12583,6 +12782,12 @@ vreinterpret_u32_p16 (poly16x4_t __a)
}
__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+vreinterpret_u32_f16 (float16x4_t __a)
+{
+ return (uint32x2_t) __a;
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
vreinterpret_u32_f32 (float32x2_t __a)
{
return (uint32x2_t)__builtin_neon_vreinterpretv2siv2sf (__a);