The following patch enables vector permutations optimization by using another vector element size when applicable. It allows usage of simpler instructions in applicable cases.
example: #define vector __attribute__((vector_size(16) )) vector float f(vector float a, vector float b) { return __builtin_shuffle (a, b, (vector int){0, 1, 4,5}); } was compiled into: ... adrp x0, .LC0 ldr q2, [x0, #:lo12:.LC0] tbl v0.16b, {v0.16b - v1.16b}, v2.16b ... and after patch: ... zip1 v0.2d, v0.2d, v1.2d ... bootstrapped and tested on aarch64-linux-gnu with no regressions This patch was initially introduced by Andrew Pinksi <apin...@marvell.com> with me being involved later. (I have no write access to repo) Thanks, Dmitrij gcc/ChangeLog: 2020-06-11 Andrew Pinski <apin...@marvell.com> PR gcc/82199 * gcc/config/aarch64/aarch64.c (aarch64_evpc_reencode): New function gcc/testsuite/ChangeLog: 2020-06-11 Andrew Pinski <apin...@marvell.com> PR gcc/82199 * gcc.target/aarch64/vdup_n_3.c: New test * gcc.target/aarch64/vzip_1.c: New test * gcc.target/aarch64/vzip_2.c: New test * gcc.target/aarch64/vzip_3.c: New test * gcc.target/aarch64/vzip_4.c: New test Co-Authored-By: Dmitrij Pochepko <dmitrij.poche...@bell-sw.com> Thanks, Dmitrij
>From 3c9f3fe834811386223755fc58e2ab4a612eefcf Mon Sep 17 00:00:00 2001 From: Dmitrij Pochepko <dmitrij.poche...@bell-sw.com> Date: Thu, 11 Jun 2020 14:13:35 +0300 Subject: [PATCH] __builtin_shuffle sometimes should produce zip1 rather than TBL (PR82199) The following patch enables vector permutations optimization by using another vector element size when applicable. It allows usage of simpler instructions in applicable cases. example: vector float f(vector float a, vector float b) { return __builtin_shuffle (a, b, (vector int){0, 1, 4,5}); } was compiled into: ... adrp x0, .LC0 ldr q2, [x0, #:lo12:.LC0] tbl v0.16b, {v0.16b - v1.16b}, v2.16b ... and after patch: ... zip1 v0.2d, v0.2d, v1.2d ... bootstrapped and tested on aarch64-linux-gnu with no regressions gcc/ChangeLog: 2020-06-11 Andrew Pinski <apin...@marvell.com> PR gcc/82199 * gcc/config/aarch64/aarch64.c (aarch64_evpc_reencode): New function gcc/testsuite/ChangeLog: 2020-06-11 Andrew Pinski <apin...@marvell.com> PR gcc/82199 * gcc.target/aarch64/vdup_n_3.c: New test * gcc.target/aarch64/vzip_1.c: New test * gcc.target/aarch64/vzip_2.c: New test * gcc.target/aarch64/vzip_3.c: New test * gcc.target/aarch64/vzip_4.c: New test Co-Authored-By: Dmitrij Pochepko <dmitrij.poche...@bell-sw.com> --- gcc/config/aarch64/aarch64.c | 81 +++++++++++++++++++++++++++++ gcc/testsuite/gcc.target/aarch64/vdup_n_3.c | 16 ++++++ gcc/testsuite/gcc.target/aarch64/vzip_1.c | 11 ++++ gcc/testsuite/gcc.target/aarch64/vzip_2.c | 12 +++++ gcc/testsuite/gcc.target/aarch64/vzip_3.c | 12 +++++ gcc/testsuite/gcc.target/aarch64/vzip_4.c | 12 +++++ 6 files changed, 144 insertions(+) create mode 100644 gcc/testsuite/gcc.target/aarch64/vdup_n_3.c create mode 100644 gcc/testsuite/gcc.target/aarch64/vzip_1.c create mode 100644 gcc/testsuite/gcc.target/aarch64/vzip_2.c create mode 100644 gcc/testsuite/gcc.target/aarch64/vzip_3.c create mode 100644 gcc/testsuite/gcc.target/aarch64/vzip_4.c diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 973c65a..ab7b39e 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -19889,6 +19889,8 @@ struct expand_vec_perm_d bool testing_p; }; +static bool aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d); + /* Generate a variable permutation. */ static void @@ -20074,6 +20076,83 @@ aarch64_evpc_trn (struct expand_vec_perm_d *d) return true; } +/* Try to re-encode the PERM constant so it use the bigger size up. + This rewrites constants such as {0, 1, 4, 5}/V4SF to {0, 2}/V2DI. + We retry with this new constant with the full suite of patterns. */ +static bool +aarch64_evpc_reencode (struct expand_vec_perm_d *d) +{ + expand_vec_perm_d newd; + unsigned HOST_WIDE_INT nelt; + + if (d->vec_flags != VEC_ADVSIMD) + return false; + + unsigned int encoded_nelts = d->perm.encoding ().encoded_nelts (); + for (unsigned int i = 0; i < encoded_nelts; ++i) + if (!d->perm[i].is_constant ()) + return false; + + /* to_constant is safe since this routine is specific to Advanced SIMD + vectors. */ + nelt = d->perm.length ().to_constant (); + + /* Get the new mode. Always twice the size of the inner + and half the elements. */ + machine_mode new_mode; + switch (d->vmode) + { + /* 128bit vectors. */ + case E_V4SFmode: + case E_V4SImode: + new_mode = V2DImode; + break; + case E_V8BFmode: + case E_V8HFmode: + case E_V8HImode: + new_mode = V4SImode; + break; + case E_V16QImode: + new_mode = V8HImode; + break; + /* 64bit vectors. */ + case E_V4BFmode: + case E_V4HFmode: + case E_V4HImode: + new_mode = V2SImode; + break; + case E_V8QImode: + new_mode = V4HImode; + break; + default: + return false; + } + + newd.vmode = new_mode; + newd.vec_flags = VEC_ADVSIMD; + newd.target = d->target ? gen_lowpart (new_mode, d->target) : NULL; + newd.op0 = d->op0 ? gen_lowpart (new_mode, d->op0) : NULL; + newd.op1 = d->op1 ? gen_lowpart (new_mode, d->op1) : NULL; + newd.testing_p = d->testing_p; + newd.one_vector_p = d->one_vector_p; + vec_perm_builder newpermconst; + newpermconst.new_vector (nelt / 2, nelt / 2, 1); + + /* Convert the perm constant if we can. Require even, odd as the pairs. */ + for (unsigned int i = 0; i < nelt; i += 2) + { + unsigned int elt0 = d->perm[i].to_constant (); + unsigned int elt1 = d->perm[i+1].to_constant (); + if ((elt0 & 1) != 0 || elt0 + 1 != elt1) + return false; + newpermconst.quick_push (elt0 / 2); + } + newpermconst.finalize (); + + newd.perm.new_vector (newpermconst, newd.one_vector_p ? 1 : 2, nelt / 2); + return aarch64_expand_vec_perm_const_1 (&newd); +} + /* Recognize patterns suitable for the UZP instructions. */ static bool aarch64_evpc_uzp (struct expand_vec_perm_d *d) @@ -20471,6 +20550,8 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) return true; else if (aarch64_evpc_sel (d)) return true; + else if (aarch64_evpc_reencode (d)) + return true; if (d->vec_flags == VEC_SVE_DATA) return aarch64_evpc_sve_tbl (d); else if (d->vec_flags == VEC_ADVSIMD) diff --git a/gcc/testsuite/gcc.target/aarch64/vdup_n_3.c b/gcc/testsuite/gcc.target/aarch64/vdup_n_3.c new file mode 100644 index 0000000..289604d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vdup_n_3.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ + +#define vector __attribute__((vector_size(4*sizeof(float)))) + +/* These are both dups. */ +vector float f(vector float a, vector float b) +{ + return __builtin_shuffle (a, a, (vector int){0, 1, 0, 1}); +} +vector float f1(vector float a, vector float b) +{ + return __builtin_shuffle (a, a, (vector int){2, 3, 2, 3}); +} + +/* { dg-final { scan-assembler-times "\[ \t\]*dup\[ \t\]+v\[0-9\]+\.2d" 2 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/vzip_1.c b/gcc/testsuite/gcc.target/aarch64/vzip_1.c new file mode 100644 index 0000000..65a9d97 --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vzip_1.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ + +#define vector __attribute__((vector_size(2*sizeof(float)))) + +vector float f(vector float a, vector float b) +{ + return __builtin_shuffle (a, b, (vector int){0, 2}); +} + +/* { dg-final { scan-assembler-times "\[ \t\]*zip1\[ \t\]+v\[0-9\]+\.2s" 1 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/vzip_2.c b/gcc/testsuite/gcc.target/aarch64/vzip_2.c new file mode 100644 index 0000000..a60b90f --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vzip_2.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ + +#define vector __attribute__((vector_size(4*sizeof(float)))) + +vector float f(vector float a, vector float b) +{ + /* This is the same as zip1 v.2d as {0, 1, 4, 5} can be converted to {0, 2}. */ + return __builtin_shuffle (a, b, (vector int){0, 1, 4, 5}); +} + +/* { dg-final { scan-assembler-times "\[ \t\]*zip1\[ \t\]+v\[0-9\]+\.2d" 1 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/vzip_3.c b/gcc/testsuite/gcc.target/aarch64/vzip_3.c new file mode 100644 index 0000000..0446d1f --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vzip_3.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ + +#define vector __attribute__((vector_size(4*sizeof(float)))) + +vector float f(vector float a, vector float b) +{ + /* This is the same as zip1 v.2d as {4, 5, 0, 1} can be converted to {2, 0}. */ + return __builtin_shuffle (a, b, (vector int){4, 5, 0, 1}); +} + +/* { dg-final { scan-assembler-times "\[ \t\]*zip1\[ \t\]+v\[0-9\]+\.2d" 1 } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/vzip_4.c b/gcc/testsuite/gcc.target/aarch64/vzip_4.c new file mode 100644 index 0000000..b21d8cf --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vzip_4.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ + +#define vector __attribute__((vector_size(4*sizeof(float)))) + +vector float f(vector float a, vector float b) +{ + /* This is the same as zip2 v.2d as {2, 3, 6, 7} can be converted to {1, 3}. */ + return __builtin_shuffle (a, b, (vector int){2, 3, 6, 7}); +} + +/* { dg-final { scan-assembler-times "\[ \t\]*zip2\[ \t\]+v\[0-9\]+\.2d" 1 } } */ -- 2.7.4