From: Robin Dapp <rd...@ventanamicro.com>

This patch adds efficient handling of interleaving patterns like
[0 4 1 5] to vec_perm_const.  It is implemented by a slideup and a
gather.

gcc/ChangeLog:

        * config/riscv/riscv-v.cc (shuffle_interleave_patterns): New
        function.
        (expand_vec_perm_const_1): Use new function.

gcc/testsuite/ChangeLog:

        * gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave-run.c: New 
test.
        * gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave.c: New test.
---
 gcc/config/riscv/riscv-v.cc                   |  80 ++++++++++++
 .../vls-vlmax/shuffle-interleave-run.c        | 122 ++++++++++++++++++
 .../autovec/vls-vlmax/shuffle-interleave.c    |  69 ++++++++++
 3 files changed, 271 insertions(+)
 create mode 100644 
gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave-run.c
 create mode 100644 
gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave.c

diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index deb2bdb4247..3f8fd3257c4 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -3492,6 +3492,84 @@ shuffle_slide_patterns (struct expand_vec_perm_d *d)
   return true;
 }
 
+/* Recognize interleaving patterns like [0 4 1 5].  */
+
+static bool
+shuffle_interleave_patterns (struct expand_vec_perm_d *d)
+{
+  machine_mode vmode = d->vmode;
+  machine_mode sel_mode = related_int_vector_mode (vmode).require ();
+  poly_int64 vec_len = d->perm.length ();
+  int n_patterns = d->perm.encoding ().npatterns ();
+
+  if (!vec_len.is_constant ())
+    return false;
+
+  if (n_patterns != 2)
+    return false;
+
+  unsigned vlen = vec_len.to_constant ();
+
+  if (vlen < 4 || vlen > 64)
+    return false;
+
+  if (d->one_vector_p)
+    return false;
+
+  bool low = true;
+  if (d->perm.series_p (0, 2, 0, 1)
+      && d->perm.series_p (1, 2, vlen, 1))
+    low = true;
+  else if (d->perm.series_p (0, 2, vlen / 2, 1)
+          && d->perm.series_p (1, 2, vlen + vlen / 2, 1))
+    low = false;
+  else
+    return false;
+
+  vec_perm_builder sel (vlen, 2, 1);
+  sel.safe_grow (vlen);
+  int cnt = 0;
+  for (unsigned i = 0; i < vlen; i += 2)
+    {
+      sel[i] = cnt;
+      sel[i + 1] = cnt + vlen / 2;
+      cnt++;
+    }
+
+  vec_perm_indices indices (sel, 2, vlen);
+
+  if (vlen != indices.length ().to_constant ())
+    return false;
+
+  /* Success!  */
+  if (d->testing_p)
+    return true;
+
+  int slide_cnt = vlen / 2;
+  rtx tmp = gen_reg_rtx (vmode);
+
+  if (low)
+    {
+      /* No need for a vector length because we slide up until the
+        end of OP1 anyway.  */
+      rtx ops[] = {tmp, d->op0, d->op1, gen_int_mode (slide_cnt, Pmode)};
+      insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEUP, vmode);
+      emit_vlmax_insn (icode, SLIDEUP_OP_MERGE, ops);
+    }
+  else
+    {
+      rtx ops[] = {tmp, d->op1, d->op0, gen_int_mode (slide_cnt, Pmode)};
+      insn_code icode = code_for_pred_slide (UNSPEC_VSLIDEDOWN, vmode);
+      emit_nonvlmax_insn (icode, BINARY_OP_TUMA, ops,
+                         gen_int_mode (slide_cnt, Pmode));
+    }
+
+  rtx sel_rtx = vec_perm_indices_to_rtx (sel_mode, indices);
+  emit_vlmax_gather_insn (gen_lowpart (vmode, d->target), tmp, sel_rtx);
+
+  return true;
+}
+
 /* Recognize decompress patterns:
 
    1. VEC_PERM_EXPR op0 and op1
@@ -3808,6 +3886,8 @@ expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
            return true;
          if (shuffle_slide_patterns (d))
            return true;
+         if (shuffle_interleave_patterns (d))
+           return true;
          if (shuffle_compress_patterns (d))
            return true;
          if (shuffle_decompress_patterns (d))
diff --git 
a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave-run.c 
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave-run.c
new file mode 100644
index 00000000000..57748d95362
--- /dev/null
+++ 
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave-run.c
@@ -0,0 +1,122 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target riscv_v_ok } */
+/* { dg-add-options riscv_v } */
+/* { dg-additional-options "-O3 -mrvv-max-lmul=m8 -std=gnu99" } */
+
+#include "shuffle-interleave.c"
+
+#define SERIES_2(x, y) (x), (x + 1)
+#define SERIES_4(x, y) SERIES_2 (x, y), SERIES_2 (x + 2, y)
+#define SERIES_8(x, y) SERIES_4 (x, y), SERIES_4 (x + 4, y)
+#define SERIES_16(x, y) SERIES_8 (x, y), SERIES_8 (x + 8, y)
+#define SERIES_32(x, y) SERIES_16 (x, y), SERIES_16 (x + 16, y)
+#define SERIES_64(x, y) SERIES_32 (x, y), SERIES_32 (x + 32, y)
+
+#define comp(a, b, n)                                                          
\
+  for (unsigned i = 0; i < n; ++i)                                             
\
+    if ((a)[i] != (b)[i])                                                      
\
+      __builtin_abort ();
+
+#define CHECK1(TYPE, NUNITS)                                                   
\
+  __attribute__ ((noipa)) void check1_##TYPE ()                                
\
+  {                                                                            
\
+    TYPE v0 = (TYPE){SERIES_##NUNITS (0, NUNITS)};                             
\
+    TYPE v1 = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)};                        
\
+    TYPE ref = (TYPE){MASKL_##NUNITS (0, NUNITS)};                             
\
+    TYPE res;                                                                  
\
+    permute1_##TYPE (v0, v1, &res);                                            
\
+    comp (res, ref, NUNITS);                                                   
\
+  }
+
+#define CHECK2(TYPE, NUNITS)                                                   
\
+  __attribute__ ((noipa)) void check2_##TYPE ()                       \
+  {                                                                            
\
+    TYPE v0 = (TYPE){SERIES_##NUNITS (0, NUNITS)};                             
\
+    TYPE v1 = (TYPE){SERIES_##NUNITS (NUNITS, NUNITS)};                        
\
+    TYPE ref = (TYPE){MASKH_##NUNITS (0, NUNITS)};                             
\
+    TYPE res;                                                                  
\
+    permute2_##TYPE (v0, v1, &res);                                            
\
+    comp (res, ref, NUNITS);                                                   
\
+  }
+
+#define CHECK_ALL(T)                                                           
 \
+  T (vnx4qi, 4)                                                                
\
+  T (vnx8qi, 8)                                                                
\
+  T (vnx16qi, 16)                                                              
\
+  T (vnx32qi, 32)                                                              
\
+  T (vnx64qi, 64)                                                              
\
+  T (vnx4hi, 4)                                                                
\
+  T (vnx8hi, 8)                                                                
\
+  T (vnx16hi, 16)                                                              
\
+  T (vnx32hi, 32)                                                              
\
+  T (vnx64hi, 64)                                                              
\
+  T (vnx4si, 4)                                                                
\
+  T (vnx8si, 8)                                                                
\
+  T (vnx16si, 16)                                                              
\
+  T (vnx32si, 32)                                                              
\
+  T (vnx4di, 4)                                                                
\
+  T (vnx8di, 8)                                                                
\
+  T (vnx16di, 16)                                                              
\
+  T (vnx4sf, 4)                                                                
\
+  T (vnx8sf, 8)                                                                
\
+  T (vnx16sf, 16)                                                              
\
+  T (vnx32sf, 32)                                                              
\
+  T (vnx4df, 4)                                                                
\
+  T (vnx8df, 8)                                                                
\
+  T (vnx16df, 16)
+
+CHECK_ALL (CHECK1)
+CHECK_ALL (CHECK2)
+
+int
+main ()
+{
+  check1_vnx4qi ();
+  check1_vnx8qi ();
+  check1_vnx16qi ();
+  check1_vnx32qi ();
+  check1_vnx64qi ();
+  check1_vnx4hi ();
+  check1_vnx8hi ();
+  check1_vnx16hi ();
+  check1_vnx32hi ();
+  check1_vnx64hi ();
+  check1_vnx4si ();
+  check1_vnx8si ();
+  check1_vnx16si ();
+  check1_vnx32si ();
+  check1_vnx4di ();
+  check1_vnx8di ();
+  check1_vnx16di ();
+  check1_vnx4sf ();
+  check1_vnx8sf ();
+  check1_vnx16sf ();
+  check1_vnx32sf ();
+  check1_vnx4df ();
+  check1_vnx8df ();
+  check1_vnx16df ();
+  check2_vnx4qi ();
+  check2_vnx8qi ();
+  check2_vnx16qi ();
+  check2_vnx32qi ();
+  check2_vnx64qi ();
+  check2_vnx4hi ();
+  check2_vnx8hi ();
+  check2_vnx16hi ();
+  check2_vnx32hi ();
+  check2_vnx64hi ();
+  check2_vnx4si ();
+  check2_vnx8si ();
+  check2_vnx16si ();
+  check2_vnx32si ();
+  check2_vnx4di ();
+  check2_vnx8di ();
+  check2_vnx16di ();
+  check2_vnx4sf ();
+  check2_vnx8sf ();
+  check2_vnx16sf ();
+  check2_vnx32sf ();
+  check2_vnx4df ();
+  check2_vnx8df ();
+  check2_vnx16df ();
+}
diff --git 
a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave.c 
b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave.c
new file mode 100644
index 00000000000..3e241f01871
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/shuffle-interleave.c
@@ -0,0 +1,69 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -march=rv64gcv -mrvv-max-lmul=m8" } */
+
+#include "perm.h"
+
+#define MASKL_2(x, y) (x), (x + y)
+#define MASKL_4(x, y) MASKL_2 (x, y), MASKL_2 (x + 1, y)
+#define MASKL_8(x, y) MASKL_4 (x, y), MASKL_4 (x + 2, y)
+#define MASKL_16(x, y) MASKL_8 (x, y), MASKL_8 (x + 4, y)
+#define MASKL_32(x, y) MASKL_16 (x, y), MASKL_16 (x + 8, y)
+#define MASKL_64(x, y) MASKL_32 (x, y), MASKL_32 (x + 16, y)
+
+#define MASKH_2(x, y) (x + y / 2), (x + y / 2 + y)
+#define MASKH_4(x, y) MASKH_2 (x, y), MASKH_2 (x + 1, y)
+#define MASKH_8(x, y) MASKH_4 (x, y), MASKH_4 (x + 2, y)
+#define MASKH_16(x, y) MASKH_8 (x, y), MASKH_8 (x + 4, y)
+#define MASKH_32(x, y) MASKH_16 (x, y), MASKH_16 (x + 8, y)
+#define MASKH_64(x, y) MASKH_32 (x, y), MASKH_32 (x + 16, y)
+
+#define PERMUTE1(TYPE, NUNITS)                                                 
\
+  __attribute__ ((noipa)) void permute1_##TYPE (TYPE values1, TYPE values2,    
\
+                                               TYPE *out)                     \
+  {                                                                            
\
+    TYPE v = __builtin_shufflevector (values1, values2,                        
\
+                                     MASKL_##NUNITS (0, NUNITS));             \
+    *(TYPE *) out = v;                                                         
\
+  }
+
+#define PERMUTE2(TYPE, NUNITS)                                                 
\
+  __attribute__ ((noipa)) void permute2_##TYPE (TYPE values1, TYPE values2,    
\
+                                               TYPE *out)                     \
+  {                                                                            
\
+    TYPE v = __builtin_shufflevector (values1, values2,                        
\
+                                     MASKH_##NUNITS (0, NUNITS));             \
+    *(TYPE *) out = v;                                                         
\
+  }
+
+#define TEST_ALL(T)                                                            
\
+  T (vnx4qi, 4)                                                                
\
+  T (vnx8qi, 8)                                                                
\
+  T (vnx16qi, 16)                                                              
\
+  T (vnx32qi, 32)                                                              
\
+  T (vnx64qi, 64)                                                              
\
+  T (vnx4hi, 4)                                                                
\
+  T (vnx8hi, 8)                                                                
\
+  T (vnx16hi, 16)                                                              
\
+  T (vnx32hi, 32)                                                              
\
+  T (vnx64hi, 64)                                                              
\
+  T (vnx4si, 4)                                                                
\
+  T (vnx8si, 8)                                                                
\
+  T (vnx16si, 16)                                                              
\
+  T (vnx32si, 32)                                                              
\
+  T (vnx4di, 4)                                                                
\
+  T (vnx8di, 8)                                                                
\
+  T (vnx16di, 16)                                                              
\
+  T (vnx4sf, 4)                                                                
\
+  T (vnx8sf, 8)                                                                
\
+  T (vnx16sf, 16)                                                              
\
+  T (vnx32sf, 32)                                                              
\
+  T (vnx4df, 4)                                                                
\
+  T (vnx8df, 8)                                                                
\
+  T (vnx16df, 16)
+
+TEST_ALL (PERMUTE1)
+TEST_ALL (PERMUTE2)
+
+/* { dg-final { scan-assembler-times "vslideup" 24 } } */
+/* { dg-final { scan-assembler-times "vslidedown" 24 } } */
+/* { dg-final { scan-assembler-times "vrgather" 48 } } */
-- 
2.47.0

Reply via email to