https://gcc.gnu.org/g:30cfa1c174648154945c5494573fb6b5dc829fe6

commit r16-6901-g30cfa1c174648154945c5494573fb6b5dc829fe6
Author: Jeff Law <[email protected]>
Date:   Mon Jan 19 07:44:54 2026 -0700

    [PR target/113666] Simplify VEC_EXTRACT from a uniform vector
    
    This fixes a P3 regression relative to gcc-13 on the RISC-V platform for 
this code:
    
    > unsigned char a;
    >
    > int main() {
    >   short b = a = 0;
    >   for (; a != 19; a++)
    >     if (a)
    >       b = 32872 >> a;
    >
    >   if (b == 0)
    >     return 0;
    >   else
    >     return 1;
    > }
    >
    > -march=rv64gcv_zvl256b -mabi=lp64d -O3 -ftree-vectorize
    
    Doesn't need vector at all.  Good code generation here looks like:
    
    >         lui     a5,%hi(a)
    >         li      a4,19
    >         sb      a4,%lo(a)(a5)
    >         li      a0,0
    >         ret
    
    gcc-14 and gcc-15 produce horrific code here, roughly 20 instructions,
    over half of which are vector.  It's not even worth posting, it's
    atrocious.
    
    The trunk improves things, but not quite to the quality of gcc-13:
    
    >         vsetivli        zero,8,e16,mf2,ta,ma
    >         vmv.v.i v1,0
    >         lui     a5,%hi(a)
    >         li      a4,19
    >         vslidedown.vi   v1,v1,1
    >         sb      a4,%lo(a)(a5)
    >         vmv.x.s a0,v1
    >         snez    a0,a0
    >         ret
    
    If we look at the .optimized dump we have this nugget:
    
    >   _26 = .VEC_EXTRACT ({ 0, 0, 0, 0, 0, 0, 0, 0 }, 1);
    
    If we're extracting an element out of a uniform vector, then any element
    will do and it's conveniently returned by uniform_vector_p.    So with a
    simple match.pd pattern that simplifies to _26 = 0.  That in turn allows
    elimination of all the vector code and simplify the return value to a
    constant as well, resulting in the desired code shown earlier.
    
    One could easily argue that this need not be restricted to a uniform
    vector and I would totally agree.  But given we're in stage4, the
    minimal fix for the regression seems more appropriate.  But I could
    certainly be convinced to handle the more general case here.
    
    Bootstrapped and regression tested on x86 & riscv64.  Tested across the
    cross configurations as well with no regressions.
    
            PR target/113666
    gcc/
            * fold-const-call.cc (fold_const_vec_extract): New function.
            (fold_const_call, case CFN_VEC_EXTRACT): Call it.
            * match.pd (IFN_VEC_EXTRACT): Handle extraction from a uniform
            vector.
    
    gcc/testsuite
            * gcc.target/riscv/rvv/base/pr113666.c: New test.
    
    Co-authored-by: Andrew Pinski <[email protected]>

Diff:
---
 gcc/fold-const-call.cc                             | 23 +++++++++++++++++++++
 gcc/match.pd                                       |  5 +++++
 gcc/testsuite/gcc.target/riscv/rvv/base/pr113666.c | 24 ++++++++++++++++++++++
 3 files changed, 52 insertions(+)

diff --git a/gcc/fold-const-call.cc b/gcc/fold-const-call.cc
index 464d162cf550..aa63ced00bba 100644
--- a/gcc/fold-const-call.cc
+++ b/gcc/fold-const-call.cc
@@ -1459,6 +1459,26 @@ fold_const_vec_shl_insert (tree, tree arg0, tree arg1)
   return NULL_TREE;
 }
 
+/* Fold a call to IFN_VEC_EXTRACT (ARG0, ARG1), returning a value
+   of type TYPE.
+
+   Right now this is only handling uniform vectors, so ARG1 is not
+   used.  But it could be easily adjusted in the future to handle
+   non-uniform vectors by extracting the relevant element.  */
+
+static tree
+fold_const_vec_extract (tree, tree arg0, tree)
+{
+  if (TREE_CODE (arg0) != VECTOR_CST)
+    return NULL_TREE;
+
+  /* vec_extract ( dup(CST), CST) -> dup (CST). */
+  if (tree elem = uniform_vector_p (arg0))
+    return elem;
+
+  return NULL_TREE;
+}
+
 /* Try to evaluate:
 
       *RESULT = FN (*ARG0, *ARG1)
@@ -1865,6 +1885,9 @@ fold_const_call (combined_fn fn, tree type, tree arg0, 
tree arg1)
     case CFN_VEC_SHL_INSERT:
       return fold_const_vec_shl_insert (type, arg0, arg1);
 
+    case CFN_VEC_EXTRACT:
+      return fold_const_vec_extract (type, arg0, arg1);
+
     case CFN_UBSAN_CHECK_ADD:
     case CFN_ADD_OVERFLOW:
       subcode = PLUS_EXPR;
diff --git a/gcc/match.pd b/gcc/match.pd
index ea840502640b..e0a2399ca65d 100644
--- a/gcc/match.pd
+++ b/gcc/match.pd
@@ -12226,3 +12226,8 @@ and,
         && TYPE_UNSIGNED (type)
         && @0 == @3)
     (bit_xor (rrotate @0 @4) @2)))
+
+/* Optimize extraction from a uniform vector to a representative element as
+   long as the requested element is within range.  */
+(simplify (IFN_VEC_EXTRACT (vec_duplicate @0) INTEGER_CST@1)
+ @0)
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr113666.c 
b/gcc/testsuite/gcc.target/riscv/rvv/base/pr113666.c
new file mode 100644
index 000000000000..b1034d7676d0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pr113666.c
@@ -0,0 +1,24 @@
+/* { dg-do compile } */
+/* { dg-options "-march=rv64gcv -mabi=lp64 -O3" { target rv64} } */
+/* { dg-options "-march=rv32gcv -mabi=ilp32 -O3" { target rv32} } */
+
+unsigned char a;
+
+int main() {
+  short b = a = 0;
+  for (; a != 19; a++)
+    if (a)
+      b = 32872 >> a;
+
+  if (b == 0)
+    return 0;
+  else
+    return 1;
+}
+
+/* If we vectorized, we should still be able to collapse away the VEC_EXTRACT,
+   leaving zero vector code in the final assembly.  So there should be no 
+   vsetvl instructions.  */
+/* { dg-final { scan-assembler-not {vsetivli} } } */
+
+

Reply via email to