Hi, This patch add three intrinsics that are required by the ACLE specification. A new testcase is added which covers vfms_n_f32 and vfmsq_n_f32. Tested on both aarch64-linux-gnu and aarch64_be-linux-gnu. OK?
Index: gcc/ChangeLog =================================================================== --- gcc/ChangeLog (revision 218582) +++ gcc/ChangeLog (working copy) @@ -1,3 +1,8 @@ +2014-12-11 Felix Yang <felix.y...@huawei.com> + + * config/aarch64/arm_neon.h (vfms_n_f32, vfmsq_n_f32, vfmsq_n_f64): New + intrinsics. + 2014-12-10 Felix Yang <felix.y...@huawei.com> * config/aarch64/aarch64-protos.h (aarch64_function_profiler): Remove Index: gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_n.c =================================================================== --- gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_n.c (revision 0) +++ gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vfms_n.c (revision 0) @@ -0,0 +1,67 @@ +#include <arm_neon.h> +#include "arm-neon-ref.h" +#include "compute-ref-data.h" + +#ifdef __aarch64__ +/* Expected results. */ +VECT_VAR_DECL(expected,hfloat,32,2) [] = { 0x4438ca3d, 0x44390a3d }; +VECT_VAR_DECL(expected,hfloat,32,4) [] = { 0x44869eb8, 0x4486beb8, 0x4486deb8, 0x4486feb8 }; + +#define VECT_VAR_ASSIGN(S,Q,T1,W) S##Q##_##T1##W +#define ASSIGN(S, Q, T, W, V) T##W##_t S##Q##_##T##W = V +#define TEST_MSG "VFMS_N/VFMSQ_N" + +void exec_vfms_n (void) +{ + /* Basic test: v4=vfms_n(v1,v2), then store the result. */ +#define TEST_VFMS(Q, T1, T2, W, N) \ + VECT_VAR(vector_res, T1, W, N) = \ + vfms##Q##_n_##T2##W(VECT_VAR(vector1, T1, W, N), \ + VECT_VAR(vector2, T1, W, N), \ + VECT_VAR_ASSIGN(scalar, Q, T1, W)); \ + vst1##Q##_##T2##W(VECT_VAR(result, T1, W, N), VECT_VAR(vector_res, T1, W, N)) + +#define CHECK_VFMS_RESULTS(test_name,comment) \ + { \ + CHECK_FP(test_name, float, 32, 2, PRIx32, expected, comment); \ + CHECK_FP(test_name, float, 32, 4, PRIx32, expected, comment); \ + } + +#define DECL_VABD_VAR(VAR) \ + DECL_VARIABLE(VAR, float, 32, 2); \ + DECL_VARIABLE(VAR, float, 32, 4); \ + + DECL_VABD_VAR(vector1); + DECL_VABD_VAR(vector2); + DECL_VABD_VAR(vector3); + DECL_VABD_VAR(vector_res); + + clean_results (); + + /* Initialize input "vector1" from "buffer". */ + VLOAD(vector1, buffer, , float, f, 32, 2); + VLOAD(vector1, buffer, q, float, f, 32, 4); + + /* Choose init value arbitrarily. */ + VDUP(vector2, , float, f, 32, 2, -9.3f); + VDUP(vector2, q, float, f, 32, 4, -29.7f); + + /* Choose init value arbitrarily. */ + ASSIGN(scalar, , float, 32, 81.2f); + ASSIGN(scalar, q, float, 32, 36.8f); + + /* Execute the tests. */ + TEST_VFMS(, float, f, 32, 2); + TEST_VFMS(q, float, f, 32, 4); + + CHECK_VFMS_RESULTS (TEST_MSG, ""); +} +#endif + +int main (void) +{ +#ifdef __aarch64__ + exec_vfms_n (); +#endif + return 0; +} Index: gcc/testsuite/ChangeLog =================================================================== --- gcc/testsuite/ChangeLog (revision 218582) +++ gcc/testsuite/ChangeLog (working copy) @@ -1,3 +1,7 @@ +2014-12-08 Felix Yang <felix.y...@huawei.com> + + * gcc.target/aarch64/advsimd-intrinsics/vfms_n.c: New test. + 2014-12-10 Martin Liska <mli...@suse.cz> * gcc.dg/ipa/pr63909.c: New test. Index: gcc/config/aarch64/arm_neon.h =================================================================== --- gcc/config/aarch64/arm_neon.h (revision 218582) +++ gcc/config/aarch64/arm_neon.h (working copy) @@ -15254,7 +15254,24 @@ vfmsq_f64 (float64x2_t __a, float64x2_t __b, float return __builtin_aarch64_fmav2df (-__b, __c, __a); } +__extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) +vfms_n_f32 (float32x2_t __a, float32x2_t __b, float32_t __c) +{ + return __builtin_aarch64_fmav2sf (-__b, vdup_n_f32 (__c), __a); +} +__extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) +vfmsq_n_f32 (float32x4_t __a, float32x4_t __b, float32_t __c) +{ + return __builtin_aarch64_fmav4sf (-__b, vdupq_n_f32 (__c), __a); +} + +__extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) +vfmsq_n_f64 (float64x2_t __a, float64x2_t __b, float64_t __c) +{ + return __builtin_aarch64_fmav2df (-__b, vdupq_n_f64 (__c), __a); +} + /* vfms_lane */ __extension__ static __inline float32x2_t __attribute__ ((__always_inline__))
add-vfms_n-v1.diff
Description: add-vfms_n-v1.diff