When working on fixing bugs of zvl1024b. I notice a special VLA SLP case can be better optimized.
v = vec_perm (op1, op2, { nunits - 1, nunits, nunits + 1, ... }) Before this patch, we are using genriec approach (vrgather): vid vadd.vx vrgather vmsgeu vrgather With this patch, we use vec_extract + slide1up: scalar = vec_extract (last element of op1) v = slide1up (op2, scalar) Tested on zvl128b/zvl256b/zvl512b/zvl1024b of both RV32 and RV64 no regression. Ok for trunk ? PR target/112599 gcc/ChangeLog: * config/riscv/riscv-v.cc (shuffle_extract_and_slide1up_patterns): New function. (expand_vec_perm_const_1): Add new optimization. gcc/testsuite/ChangeLog: * gcc.target/riscv/rvv/autovec/pr112599-2.c: New test. --- gcc/config/riscv/riscv-v.cc | 38 ++++++++++++++ .../gcc.target/riscv/rvv/autovec/pr112599-2.c | 51 +++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112599-2.c diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc index 72b96d8339d..18619a11592 100644 --- a/gcc/config/riscv/riscv-v.cc +++ b/gcc/config/riscv/riscv-v.cc @@ -3232,6 +3232,42 @@ shuffle_bswap_pattern (struct expand_vec_perm_d *d) return true; } +/* Recognize the pattern that can be shuffled by vec_extract and slide1up + approach. */ + +static bool +shuffle_extract_and_slide1up_patterns (struct expand_vec_perm_d *d) +{ + poly_int64 nunits = GET_MODE_NUNITS (d->vmode); + + /* Recognize { nunits - 1, nunits, nunits + 1, ... }. */ + if (!d->perm.series_p (0, 2, nunits - 1, 2) + || !d->perm.series_p (1, 2, nunits, 2)) + return false; + + /* Disable when nunits < 4 since the later generic approach + is more profitable on indice = { nunits - 1, nunits }. */ + if (!known_gt (nunits, 2)) + return false; + + /* Success! */ + if (d->testing_p) + return true; + + /* Extract the last element of the first vector. */ + scalar_mode smode = GET_MODE_INNER (d->vmode); + rtx tmp = gen_reg_rtx (smode); + emit_vec_extract (tmp, d->op0, nunits - 1); + + /* Insert the scalar into element 0. */ + unsigned int unspec + = FLOAT_MODE_P (d->vmode) ? UNSPEC_VFSLIDE1UP : UNSPEC_VSLIDE1UP; + insn_code icode = code_for_pred_slide (unspec, d->vmode); + rtx ops[] = {d->target, d->op1, tmp}; + emit_vlmax_insn (icode, BINARY_OP, ops); + return true; +} + /* Recognize the pattern that can be shuffled by generic approach. */ static bool @@ -3310,6 +3346,8 @@ expand_vec_perm_const_1 (struct expand_vec_perm_d *d) return true; if (shuffle_bswap_pattern (d)) return true; + if (shuffle_extract_and_slide1up_patterns (d)) + return true; if (shuffle_generic_patterns (d)) return true; return false; diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112599-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112599-2.c new file mode 100644 index 00000000000..fd87565b054 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/pr112599-2.c @@ -0,0 +1,51 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvl1024b -mabi=lp64d -O3" } */ + +struct s { struct s *n; } *p; +struct s ss; +#define MAX 10 +struct s sss[MAX]; +int count = 0; + +int look( struct s *p, struct s **pp ) +{ + for ( ; p; p = p->n ) + ; + *pp = p; + count++; + return( 1 ); +} + +void sub( struct s *p, struct s **pp ) +{ + for ( ; look( p, pp ); ) { + if ( p ) + p = p->n; + else + break; + } +} + +int +foo(void) +{ + struct s *pp; + struct s *next; + int i; + + p = &ss; + next = p; + for ( i = 0; i < MAX; i++ ) { + next->n = &sss[i]; + next = next->n; + } + next->n = 0; + + sub( p, &pp ); + if (count != MAX+2) + __builtin_abort (); + return 0; +} + +/* { dg-final { scan-assembler-not {vrgather} } } */ +/* { dg-final { scan-assembler-times {vslide1up\.vx} 1 } } */ -- 2.36.3