Hi,
This patch fixes vdiv[q]_f[32,64] neon intrinsics testcase.
Testcase ran on both little and big endian targets with no problems.
OK?
Thanks,
Alex
gcc/testsuite/
2013-11-19 Alex Velenko <alex.vele...@arm.com>
* gcc.target/aarch64/vdiv_f.c (test_vdiv_f32): vector indexing
replaced with builtins.
(test_vdiv_f64): Likewise.
(test_vdivq_f32): Likewise.
(test_vdivq_f64): Likewise.
diff --git a/gcc/testsuite/gcc.target/aarch64/vdiv_f.c b/gcc/testsuite/gcc.target/aarch64/vdiv_f.c
index cc3a9570c0fac0dcbf38f38314a416cca5e58c6e..98aae58acb9df3da0148fc1835a51c63a7c47d5d 100644
--- a/gcc/testsuite/gcc.target/aarch64/vdiv_f.c
+++ b/gcc/testsuite/gcc.target/aarch64/vdiv_f.c
@@ -90,6 +90,8 @@
#define REG_INFEX(reg_len) REG_INFEX##reg_len
#define POSTFIX(reg_len, data_len) \
CONCAT1 (REG_INFEX (reg_len), f##data_len)
+#define LANE_POSTFIX(reg_len, data_len) \
+ CONCAT1 (REG_INFEX (reg_len),lane_f##data_len)
#define DATA_TYPE_32 float
#define DATA_TYPE_64 double
@@ -99,10 +101,9 @@
#define EPSILON_64 __DBL_EPSILON__
#define EPSILON(data_len) EPSILON_##data_len
-#define INDEX64_32 [i]
-#define INDEX64_64
-#define INDEX128_32 [i]
-#define INDEX128_64 [i]
+#define GET_ELEMENT(reg_len, data_len) \
+ CONCAT1 (vget, LANE_POSTFIX (reg_len, data_len))
+
#define INDEX(reg_len, data_len) \
CONCAT1 (INDEX, reg_len##_##data_len)
@@ -122,7 +123,7 @@
#define INHIB_OPTIMIZATION asm volatile ("" : : : "memory")
-#define RUN_TEST(a, b, c, testseta, testsetb, answset, count, \
+#define RUN_TEST(a, b, c, a1, c1, testseta, testsetb, answset, count, \
reg_len, data_len, n) \
{ \
int i; \
@@ -135,8 +136,10 @@
for (i = 0; i < n; i++) \
{ \
INHIB_OPTIMIZATION; \
- if (!FP_equals ((a) INDEX (reg_len, data_len), \
- (c) INDEX (reg_len, data_len), \
+ (a1) = GET_ELEMENT (reg_len, data_len) ((a), (i)); \
+ (c1) = GET_ELEMENT (reg_len, data_len) ((c), (i)); \
+ if (!FP_equals ((a1), \
+ (c1), \
EPSILON (data_len))) \
return 1; \
} \
@@ -152,9 +155,8 @@ int
test_vdiv_f32 ()
{
int count;
- float32x2_t a;
- float32x2_t b;
- float32x2_t c;
+ float32x2_t a, b, c;
+ float32_t a1, c1;
float32_t testseta[10][2] = {
{ TESTA0, TESTA1 }, { TESTA2, TESTA3 },
@@ -182,7 +184,8 @@ test_vdiv_f32 ()
for (count = 0; count < 10; count++)
{
- RUN_TEST (a, b, c, testseta, testsetb, answset, count, 64, 32, 2);
+ RUN_TEST (a, b, c, a1, c1, testseta, testsetb, \
+ answset, count, 64, 32, 2);
}
return 0;
@@ -202,10 +205,8 @@ int
test_vdiv_f64 ()
{
int count;
- float64x1_t a;
- float64x1_t b;
- float64x1_t c;
-
+ float64x1_t a, b ,c;
+ float64_t a1, c1;
float64_t testseta[20][1] = {
{ TESTA0 }, { TESTA1 }, { TESTA2 }, { TESTA3 },
{ TESTA4 }, { TESTA5 }, { TESTA6 }, { TESTA7 },
@@ -232,7 +233,8 @@ test_vdiv_f64 ()
for (count = 0; count < 20; count++)
{
- RUN_TEST (a, b, c, testseta, testsetb, answset, count, 64, 64, 1);
+ RUN_TEST (a, b, c, a1, c1, testseta, testsetb, \
+ answset, count, 64, 64, 1);
}
return 0;
}
@@ -253,9 +255,8 @@ int
test_vdivq_f32 ()
{
int count;
- float32x4_t a;
- float32x4_t b;
- float32x4_t c;
+ float32x4_t a, b, c;
+ float32_t a1, c1;
float32_t testseta[5][4] = {
{ TESTA0, TESTA1, TESTA2, TESTA3 },
@@ -283,7 +284,8 @@ test_vdivq_f32 ()
for (count = 0; count < 5; count++)
{
- RUN_TEST (a, b, c, testseta, testsetb, answset, count, 128, 32, 4);
+ RUN_TEST (a, b, c, a1, c1, testseta, testsetb, \
+ answset, count, 128, 32, 4);
}
return 0;
}
@@ -302,9 +304,8 @@ int
test_vdivq_f64 ()
{
int count;
- float64x2_t a;
- float64x2_t b;
- float64x2_t c;
+ float64x2_t a, b, c;
+ float64_t a1, c1;
float64_t testseta[10][2] = {
{ TESTA0, TESTA1 }, { TESTA2, TESTA3 },
@@ -332,7 +333,8 @@ test_vdivq_f64 ()
for (count = 0; count < 10; count++)
{
- RUN_TEST (a, b, c, testseta, testsetb, answset, count, 128, 64, 2);
+ RUN_TEST (a, b, c, a1, c1, testseta, testsetb, \
+ answset, count, 128, 64, 2);
}
return 0;