Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.

Ok for master?

Thanks,
Tamar

gcc/ChangeLog:

        * config/aarch64/aarch64-builtins.c
        (aarch64_general_gimple_fold_builtin): Add ashl, sshl, ushl, ashr,
        ashr_simd, lshr, lshr_simd.
        * config/aarch64/aarch64-simd-builtins.def (lshr): Use USHIFTIMM.
        * config/aarch64/arm_neon.h (vshr_n_u8, vshr_n_u16, vshr_n_u32,
        vshrq_n_u8, vshrq_n_u16, vshrq_n_u32, vshrq_n_u64): Fix type hack.


gcc/testsuite/ChangeLog:

        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-1.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-2.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-3.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-4.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-5.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-6.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-7.c: New test.
        * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-8.c: New test.
        * gcc.target/aarch64/signbit-2.c: New test.

--- inline copy of patch ---

diff --git a/gcc/config/aarch64/aarch64-builtins.c 
b/gcc/config/aarch64/aarch64-builtins.c
index 
f6b41d9c200d6300dee65ba60ae94488231a8a38..c362b29186cfc0bf0d39c08c314cfd6a99124cb2
 100644
--- a/gcc/config/aarch64/aarch64-builtins.c
+++ b/gcc/config/aarch64/aarch64-builtins.c
@@ -2394,6 +2394,54 @@ aarch64_general_gimple_fold_builtin (unsigned int fcode, 
gcall *stmt)
                                               1, args[0]);
        gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
        break;
+      BUILTIN_VSDQ_I_DI (BINOP, ashl, 3, NONE)
+       if (TREE_CODE (args[1]) == INTEGER_CST
+           && wi::ltu_p (wi::to_wide (args[1]), element_precision (args[0])))
+         new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                         LSHIFT_EXPR, args[0], args[1]);
+       break;
+      BUILTIN_VSDQ_I_DI (BINOP, sshl, 0, NONE)
+      BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0, NONE)
+       {
+         tree cst = args[1];
+         tree ctype = TREE_TYPE (cst);
+         /* Left shifts can be both scalar or vector, e.g. uint64x1_t is
+            treated as a scalar type not a vector one.  */
+         if ((cst = uniform_integer_cst_p (cst)) != NULL_TREE)
+           {
+             wide_int wcst = wi::to_wide (cst);
+             tree unit_ty = TREE_TYPE (cst);
+
+             wide_int abs_cst = wi::abs (wcst);
+             if (wi::geu_p (abs_cst, element_precision (args[0])))
+               break;
+
+             if (wi::neg_p (wcst, TYPE_SIGN (ctype)))
+               {
+                 tree final_cst;
+                 final_cst = wide_int_to_tree (unit_ty, abs_cst);
+                 if (TREE_CODE (cst) != INTEGER_CST)
+                   final_cst = build_uniform_cst (ctype, final_cst);
+
+                 new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                                 RSHIFT_EXPR, args[0],
+                                                 final_cst);
+               }
+             else
+               new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                               LSHIFT_EXPR, args[0], args[1]);
+           }
+       }
+       break;
+      BUILTIN_VDQ_I (SHIFTIMM, ashr, 3, NONE)
+      VAR1 (SHIFTIMM, ashr_simd, 0, NONE, di)
+      BUILTIN_VDQ_I (USHIFTIMM, lshr, 3, NONE)
+      VAR1 (USHIFTIMM, lshr_simd, 0, NONE, di)
+       if (TREE_CODE (args[1]) == INTEGER_CST
+           && wi::ltu_p (wi::to_wide (args[1]), element_precision (args[0])))
+         new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
+                                         RSHIFT_EXPR, args[0], args[1]);
+       break;
       BUILTIN_GPF (BINOP, fmulx, 0, ALL)
        {
          gcc_assert (nargs == 2);
diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def 
b/gcc/config/aarch64/aarch64-simd-builtins.def
index 
402453aa9bba5949da43c984c4603196b1efd092..bbe0a4a3c4aea4187e7b1a9f10ab60e79df7b138
 100644
--- a/gcc/config/aarch64/aarch64-simd-builtins.def
+++ b/gcc/config/aarch64/aarch64-simd-builtins.def
@@ -409,7 +409,7 @@
 
   BUILTIN_VDQ_I (SHIFTIMM, ashr, 3, NONE)
   VAR1 (SHIFTIMM, ashr_simd, 0, NONE, di)
-  BUILTIN_VDQ_I (SHIFTIMM, lshr, 3, NONE)
+  BUILTIN_VDQ_I (USHIFTIMM, lshr, 3, NONE)
   VAR1 (USHIFTIMM, lshr_simd, 0, NONE, di)
   /* Implemented by aarch64_<sur>shr_n<mode>.  */
   BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n, 0, NONE)
diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h
index 
635a223b59eb0f64304351939d444411b697af81..c4ef5f7f7e3658c830893931ef5a874842410e10
 100644
--- a/gcc/config/aarch64/arm_neon.h
+++ b/gcc/config/aarch64/arm_neon.h
@@ -27400,21 +27400,21 @@ __extension__ extern __inline uint8x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshr_n_u8 (uint8x8_t __a, const int __b)
 {
-  return (uint8x8_t) __builtin_aarch64_lshrv8qi ((int8x8_t) __a, __b);
+  return __builtin_aarch64_lshrv8qi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint16x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshr_n_u16 (uint16x4_t __a, const int __b)
 {
-  return (uint16x4_t) __builtin_aarch64_lshrv4hi ((int16x4_t) __a, __b);
+  return __builtin_aarch64_lshrv4hi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshr_n_u32 (uint32x2_t __a, const int __b)
 {
-  return (uint32x2_t) __builtin_aarch64_lshrv2si ((int32x2_t) __a, __b);
+  return __builtin_aarch64_lshrv2si_uus (__a, __b);
 }
 
 __extension__ extern __inline uint64x1_t
@@ -27456,28 +27456,28 @@ __extension__ extern __inline uint8x16_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u8 (uint8x16_t __a, const int __b)
 {
-  return (uint8x16_t) __builtin_aarch64_lshrv16qi ((int8x16_t) __a, __b);
+  return __builtin_aarch64_lshrv16qi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint16x8_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u16 (uint16x8_t __a, const int __b)
 {
-  return (uint16x8_t) __builtin_aarch64_lshrv8hi ((int16x8_t) __a, __b);
+  return __builtin_aarch64_lshrv8hi_uus (__a, __b);
 }
 
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u32 (uint32x4_t __a, const int __b)
 {
-  return (uint32x4_t) __builtin_aarch64_lshrv4si ((int32x4_t) __a, __b);
+  return __builtin_aarch64_lshrv4si_uus (__a, __b);
 }
 
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
 vshrq_n_u64 (uint64x2_t __a, const int __b)
 {
-  return (uint64x2_t) __builtin_aarch64_lshrv2di ((int64x2_t) __a, __b);
+  return __builtin_aarch64_lshrv2di_uus (__a, __b);
 }
 
 __extension__ extern __inline int64_t
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-1.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-1.c
new file mode 100644
index 
0000000000000000000000000000000000000000..31cc9e2319aadf5414fa32edc40094c1fc579db5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-1.c
@@ -0,0 +1,11 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+
+#include <arm_neon.h>
+
+uint8x8_t foo (uint8x8_t a)
+{
+  return vshr_n_u8 (a, 2);
+}
+
+/* { dg-final { scan-assembler-times {\tushr\t.+, 2} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-2.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-2.c
new file mode 100644
index 
0000000000000000000000000000000000000000..0b48c67f02e77c2c2ecb469537f8f376b48d9074
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-2.c
@@ -0,0 +1,11 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+
+#include <arm_neon.h>
+
+int16x8_t foo (int16x8_t a)
+{
+  return vshrq_n_s16 (a, 8);
+}
+
+/* { dg-final { scan-assembler-times {\tsshr\t.+, 8} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-3.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-3.c
new file mode 100644
index 
0000000000000000000000000000000000000000..2ea15b4c0fffdb02b8af4c576a5b3e8757fa8b23
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-3.c
@@ -0,0 +1,11 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+
+#include <arm_neon.h>
+
+int16x8_t foo (int16x8_t a)
+{
+  return vshrq_n_s16 (a, 16);
+}
+
+/* { dg-final { scan-assembler-times {\tsshr\t.+, 16} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-4.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-4.c
new file mode 100644
index 
0000000000000000000000000000000000000000..e26a1e422781577bc42ac22e7c993a8093e44925
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-4.c
@@ -0,0 +1,11 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+
+#include <arm_neon.h>
+
+int64x1_t foo (int64x1_t a)
+{
+  return vshl_s64 (a, vdup_n_s64(80));
+}
+
+/* { dg-final { scan-assembler-times {\tsshl\t.+, d[0-9]+} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-5.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-5.c
new file mode 100644
index 
0000000000000000000000000000000000000000..1444f3a6420aef0d88dba57f2334992b183098ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-5.c
@@ -0,0 +1,12 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+/* { dg-skip-if "no optimizations" { *-*-* } { "-O0" } { "" } } */
+
+#include <arm_neon.h>
+
+int64x1_t foo (int64x1_t a)
+{
+  return vshl_s64 (a, vdup_n_s64(-6));
+}
+
+/* { dg-final { scan-assembler-times {\tsshr\t.+, 6} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-6.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-6.c
new file mode 100644
index 
0000000000000000000000000000000000000000..4ff493f96bdd9461042683f3373c05103d4a9d8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-6.c
@@ -0,0 +1,10 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+
+#include <arm_neon.h>
+
+int32x4_t foo (int32x4_t x) {
+  return vshlq_s32(x, vdupq_n_s32(256));
+}
+
+/* { dg-final { scan-assembler-times {\tsshl\t.+, v[0-9].4s} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-7.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-7.c
new file mode 100644
index 
0000000000000000000000000000000000000000..76a73ba812a853541a93e2024b166e34ff540d08
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-7.c
@@ -0,0 +1,12 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+/* { dg-skip-if "no optimizations" { *-*-* } { "-O0" } { "" } } */
+
+#include <arm_neon.h>
+
+int32x4_t foo (int32x4_t x) {
+  return vshlq_s32(vdupq_n_s32(1), vdupq_n_s32(10));
+}
+
+/* { dg-final { scan-assembler-not {\tsshl\t} } } */
+/* { dg-final { scan-assembler-times {\tmovi\t} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-8.c 
b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-8.c
new file mode 100644
index 
0000000000000000000000000000000000000000..27de3498f5321eee68fa0f1288b66bb11014c4e3
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/advsimd-intrinsics/vshl-opt-8.c
@@ -0,0 +1,10 @@
+/* { dg-do assemble } */
+/* { dg-additional-options "--save-temps" } */
+
+#include <arm_neon.h>
+
+int32x4_t foo (int32x4_t x) {
+  return vshlq_s32(x, vdupq_n_s32(-64));
+}
+
+/* { dg-final { scan-assembler-times {\tsshl\t.+, v[0-9]+.4s} 1 } } */
diff --git a/gcc/testsuite/gcc.target/aarch64/signbit-2.c 
b/gcc/testsuite/gcc.target/aarch64/signbit-2.c
new file mode 100644
index 
0000000000000000000000000000000000000000..e4e9afc854317cb599fa8118a1117c5a52e6f497
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/signbit-2.c
@@ -0,0 +1,36 @@
+/* { dg-do assemble } */
+/* { dg-options "-O1 --save-temps" } */
+
+#include <arm_neon.h>
+
+int32x2_t foo1 (int32x2_t a)
+{
+  return vshr_n_s32 (vneg_s32 (a), 31);
+}
+
+int32x4_t foo2 (int32x4_t a)
+{
+  return vshrq_n_s32 (vnegq_s32 (a), 31);
+}
+
+int16x8_t foo3 (int16x8_t a)
+{
+  return vshrq_n_s16 (vnegq_s16 (a), 15);
+}
+
+int16x4_t foo4 (int16x4_t a)
+{
+  return vshr_n_s16 (vneg_s16 (a), 15);
+}
+
+int8x16_t foo5 (int8x16_t a)
+{
+  return vshrq_n_s8 (vnegq_s8 (a), 7);
+}
+
+int8x8_t foo6 (int8x8_t a)
+{
+  return vshr_n_s8 (vneg_s8 (a), 7);
+}
+
+/* { dg-final { scan-assembler-times {\tcmgt\t} 6 } } */

> -----Original Message-----
> From: Richard Sandiford <richard.sandif...@arm.com>
> Sent: Monday, October 25, 2021 7:31 PM
> To: Tamar Christina <tamar.christ...@arm.com>
> Cc: gcc-patches@gcc.gnu.org; nd <n...@arm.com>; Richard Earnshaw
> <richard.earns...@arm.com>; Marcus Shawcroft
> <marcus.shawcr...@arm.com>; Kyrylo Tkachov <kyrylo.tkac...@arm.com>
> Subject: Re: [PATCH]AArch64 Lower intrinsics shift to GIMPLE when possible.
> 
> Tamar Christina <tamar.christ...@arm.com> writes:
> >> >>
> >> >> int32x4_t foo(int32x4_t x) {
> >> >>   return vshlq_s32(x, vdupq_n_s32(256)); }
> >> >>
> >> >> should fold to “x” (if we fold it at all).  Similarly:
> >> >>
> >> >> int32x4_t foo(int32x4_t x) {
> >> >>   return vshlq_s32(x, vdupq_n_s32(257)); }
> >> >>
> >> >> should fold to x << 1 (again if we fold it at all).
> >> >>
> >> >> For a shift right:
> >> >>
> >> >> int32x4_t foo(int32x4_t x) {
> >> >>   return vshlq_s32(x, vdupq_n_s32(-64)); }
> >> >>
> >> >> is equivalent to:
> >> >>
> >> >> int32x4_t foo(int32x4_t x) {
> >> >>   return vshrq_n_s32(x, 31);
> >> >> }
> >> >>
> >> >> and so it shouldn't fold to 0.
> >> >
> >> > And here I thought I had read the specs very carefully...
> >> >
> >> > I will punt on  them because I don't think those ranged are common at
> all.
> >>
> >> Sounds good.
> >>
> >> There were other review comments further down the message (I should
> >> have been clearer about that, sorry).  Could you have a look at those too?
> >>
> >
> > Yes sorry I had missed those.
> >
> >> > +        }
> >> > +        break;
> >> > +      BUILTIN_VSDQ_I_DI (BINOP, sshl, 0, NONE)
> >> > +      BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0, NONE)
> >> > +        {
> >> > +          tree cst = args[1];
> >> > +          tree ctype = TREE_TYPE (cst);
> >> > +          HOST_WIDE_INT bits = GET_MODE_UNIT_BITSIZE (TYPE_MODE
> (TREE_TYPE (args[0])));
> >> > +          if (INTEGRAL_TYPE_P (ctype)
> >> > +              && TREE_CODE (cst) == INTEGER_CST)
> >>
> >> I don't think this works, since args[1] is a vector rather than a scalar.  
> >> E.g.
> trying locally:
> >
> > The _x1_t types are treated as scalar, not vectors, so both are needed.
> 
> Ah, yeah, sorry for missing that.
> 
> > My original patch tested the scalar variant which is why this is here.
> > I added vector one.
> >
> > Bootstrapped Regtested on aarch64-none-linux-gnu and no issues.
> >
> > Ok for master?
> >
> > Thanks,
> > Tamar
> >
> > gcc/ChangeLog:
> >
> >     * config/aarch64/aarch64-builtins.c
> >     (aarch64_general_gimple_fold_builtin): Add ashl, sshl, ushl, ashr,
> >     ashr_simd, lshr, lshr_simd.
> >     * config/aarch64/aarch64-simd-builtins.def (lshr): Use USHIFTIMM.
> >     * config/aarch64/arm_neon.h (vshr_n_u8, vshr_n_u16, vshr_n_u32,
> >     vshrq_n_u8, vshrq_n_u16, vshrq_n_u32, vshrq_n_u64): Fix type
> hack.
> >
> >
> > gcc/testsuite/ChangeLog:
> >
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-1.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-2.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-3.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-4.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-5.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-6.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-7.c: New test.
> >     * gcc.target/aarch64/advsimd-intrinsics/vshl-opt-8.c: New test.
> >     * gcc.target/aarch64/signbit-2.c: New test.
> >
> > --- inline copy of patch ---
> >
> > diff --git a/gcc/config/aarch64/aarch64-builtins.c
> > b/gcc/config/aarch64/aarch64-builtins.c
> > index
> >
> f6b41d9c200d6300dee65ba60ae94488231a8a38..41da13f82f8cfe0de3c56e62f
> e88
> > 4ffabf315ef9 100644
> > --- a/gcc/config/aarch64/aarch64-builtins.c
> > +++ b/gcc/config/aarch64/aarch64-builtins.c
> > @@ -2394,6 +2394,89 @@ aarch64_general_gimple_fold_builtin (unsigned
> int fcode, gcall *stmt)
> >                                            1, args[0]);
> >     gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt));
> >     break;
> > +      BUILTIN_VSDQ_I_DI (BINOP, ashl, 3, NONE)
> > +   {
> > +     tree cst = args[1];
> > +     tree ctype = TREE_TYPE (cst);
> > +     if (TREE_CODE (cst) == INTEGER_CST)
> > +       {
> > +         wide_int wcst = wi::to_wide (cst);
> > +         if (wi::geu_p (wi::abs (wcst), element_precision (args[0])))
> > +           break;
> > +
> > +         if (wi::neg_p (wcst, TYPE_SIGN (ctype)))
> > +           new_stmt =
> > +             gimple_build_assign (gimple_call_lhs (stmt),
> > +                                  RSHIFT_EXPR, args[0],
> > +                                  wide_int_to_tree (ctype,
> > +                                                    wi::abs (wcst)));
> > +         else
> > +           new_stmt =
> > +             gimple_build_assign (gimple_call_lhs (stmt),
> > +                                  LSHIFT_EXPR, args[0], args[1]);
> > +       }
> 
> I don't think we should fold the negative cases here: they're erroneous in the
> same way that shifts by precision are.  E.g. clang gives an error
> for:
> 
>     #include <arm_neon.h>
> 
>     int32x4_t foo(int32x4_t x) {
>       return vshlq_n_s32(x, -1);
>     }
> 
> So I think this simplifies to:
> 
>     if (TREE_CODE (args[1]) == INTEGER_CST
>         && wi::ltu_p (wi::to_wide (args[1]), element_precision (args[0])))
>        new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
>                                      LSHIFT_EXPR, args[0], args[1]);
> 
> along similar lines to the shifts right.
> 
> > +   }
> > +   break;
> > +      BUILTIN_VSDQ_I_DI (BINOP, sshl, 0, NONE)
> > +      BUILTIN_VSDQ_I_DI (BINOP_UUS, ushl, 0, NONE)
> > +   {
> > +     tree cst = args[1];
> > +     tree ctype = TREE_TYPE (cst);
> > +     /* Left shifts can be both scalar or vector, e.g. uint64x1_t is
> > +        treated as a scalar type not a vector one.  */
> > +     if ((VECTOR_INTEGER_TYPE_P (ctype)
> > +          && uniform_vector_p (cst))
> > +         || TREE_CODE (cst) == INTEGER_CST)
> 
> There's a uniform_integer_cst_p — looks like this would be simpler using
> that.
> 
> > +       {
> > +         wide_int wcst;
> > +         tree unit_ty;
> > +         if (TREE_CODE (cst) == INTEGER_CST)
> > +           {
> > +             wcst = wi::to_wide (cst);
> > +             unit_ty = TREE_TYPE (cst);
> > +           }
> > +         else
> > +           {
> > +             tree tmp = vector_cst_elt (cst, 0);
> > +             wcst = wi::to_wide (tmp);
> > +             unit_ty = TREE_TYPE (tmp);
> > +           }
> > +
> > +         wide_int abs_cst = wi::abs (wcst);
> > +         if (wi::geu_p (wi::abs (wcst), element_precision (args[0])))
> 
> Might as well reuse abs_cst here.
> 
> > +           break;
> > +
> > +         if (wi::neg_p (wcst, TYPE_SIGN (ctype)))
> > +           {
> > +             tree final_cst;
> > +             final_cst = wide_int_to_tree (unit_ty, abs_cst);
> > +             if (TREE_CODE (cst) != INTEGER_CST)
> > +               final_cst = build_uniform_cst (ctype, final_cst);
> > +
> > +             new_stmt =
> > +               gimple_build_assign (gimple_call_lhs (stmt),
> > +                                    RSHIFT_EXPR, args[0], final_cst);
> > +           }
> > +         else
> > +           new_stmt =
> > +             gimple_build_assign (gimple_call_lhs (stmt),
> > +                                  LSHIFT_EXPR, args[0], args[1]);
> > +       }
> > +   }
> > +   break;
> > +      BUILTIN_VDQ_I (SHIFTIMM, ashr, 3, NONE)
> > +      VAR1 (SHIFTIMM, ashr_simd, 0, NONE, di)
> > +      BUILTIN_VDQ_I (USHIFTIMM, lshr, 3, NONE)
> > +      VAR1 (USHIFTIMM, lshr_simd, 0, NONE, di)
> > +   {
> > +     tree cst = args[1];
> > +     if (TREE_CODE (cst) == INTEGER_CST
> > +         && wi::leu_p (wi::to_wide (cst), element_precision (args[0]) - 1)
> > +         && wi::geu_p (wi::to_wide (cst), 0))
> 
> The geu_p is redundant: geu_p (x, 0) is true for all x.
> 
> Thanks,
> Richard

Attachment: rb14935.patch
Description: rb14935.patch

Reply via email to