Hi, 

  Here is a first pass at fixing PR86731, which is an issue introduced
when gimple folding the vec_sl() intrinsic.
    
This has been sniff tested (successfully) on a power7.  Full regtests for
linux/Powerpc systems is pending.  I expect I'll need to tweak some of
the testcase scan-assembler stanzas after reviewing those results, but I
wanted to get this out for review sooner versus later.  :-)
    
Assuming good results, is this OK for trunk and backport to 8?
Thanks,
-Will
    
[gcc]
    
2018-08-14  Will Schmidt  <will_schm...@vnet.ibm.com>

        PR target/86731
        * config/rs6000/rs6000.c (rs6000_gimple_fold_builtin): Update logic
          around folding of vec_sl() to handle out of range shift values.
    
[testsuite]

2018-08-14  Will Schmidt  <will_schm...@vnet.ibm.com>

        PR target/86731
        * gcc.target/powerpc/fold-vec-shift-altivectest-1.c: New test.
        * gcc.target/powerpc/fold-vec-shift-altivectest-2.c: New test.
        * gcc.target/powerpc/fold-vec-shift-altivectest-3.c: New test.

diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index ec92e6a..0a84290 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -15481,20 +15481,48 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
       builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}.  */
     case ALTIVEC_BUILTIN_VSLB:
     case ALTIVEC_BUILTIN_VSLH:
     case ALTIVEC_BUILTIN_VSLW:
     case P8V_BUILTIN_VSLD:
-      arg0 = gimple_call_arg (stmt, 0);
-      if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
-         && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
-       return false;
-      arg1 = gimple_call_arg (stmt, 1);
-      lhs = gimple_call_lhs (stmt);
-      g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, arg1);
-      gimple_set_location (g, gimple_location (stmt));
-      gsi_replace (gsi, g, true);
-      return true;
+      {
+       location_t loc;
+       gimple_seq stmts = NULL;
+       arg0 = gimple_call_arg (stmt, 0);
+       tree arg0_type = TREE_TYPE (arg0);
+       if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
+                       && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
+               return false;
+       arg1 = gimple_call_arg (stmt, 1);
+       tree arg1_type = TREE_TYPE (arg1);
+       tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
+       tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
+       loc = gimple_location (stmt);
+       lhs = gimple_call_lhs (stmt);
+       /* Force arg1 into the range valid matching the arg0 type.  */
+       /* Build a vector consisting of the max valid bit-size values.  */
+       int n_elts = VECTOR_CST_NELTS (arg1);
+       int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
+                               * BITS_PER_UNIT;
+       tree element_size = build_int_cst (unsigned_element_type,
+                                          tree_size_in_bits / n_elts);
+       tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
+       for (int i = 0; i < n_elts; i++)
+         elts.safe_push (element_size);
+       tree modulo_tree = elts.build ();
+       /* Modulo the provided shift value against that vector.  */
+       tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
+                                          unsigned_arg1_type, arg1);
+       tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
+                                     unsigned_arg1_type, unsigned_arg1,
+                                     modulo_tree);
+       gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+       /* And finally, do the shift.  */
+       g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
+       gimple_set_location (g, gimple_location (stmt));
+       gsi_replace (gsi, g, true);
+       return true;
+      }
     /* Flavors of vector shift right.  */
     case ALTIVEC_BUILTIN_VSRB:
     case ALTIVEC_BUILTIN_VSRH:
     case ALTIVEC_BUILTIN_VSRW:
     case P8V_BUILTIN_VSRD:
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-1.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-1.c
new file mode 100644
index 0000000..e0546bf
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-1.c
@@ -0,0 +1,73 @@
+/* PR86731.  Verify that the rs6000 gimple-folding code handles the
+   left shift operation properly.  This is a testcase variation that
+   explicitly specifies -fwrapv, which is a condition for the
+   gimple folding of the vec_sl() intrinsic.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O3 -fwrapv " } */
+
+#include <altivec.h>
+/* original test as reported.  */
+vector unsigned int splat(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector unsigned int) vec_sl(mzero, mzero);
+}
+
+/* more testcase variations.  */
+vector unsigned char splatu1(void)
+{
+        vector unsigned char mzero = vec_splat_u8(-1);
+        return (vector unsigned char) vec_sl(mzero, mzero);
+}
+
+vector unsigned short splatu2(void)
+{
+        vector unsigned short mzero = vec_splat_u16(-1);
+        return (vector unsigned short) vec_sl(mzero, mzero);
+}
+
+vector unsigned int splatu3(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector unsigned int) vec_sl(mzero, mzero);
+}
+
+vector unsigned long long splatu4(void)
+{
+        vector unsigned long long mzero = {-1,-1};
+        return (vector unsigned long long) vec_sl(mzero, mzero);
+}
+vector signed char splats1(void)
+{
+        vector unsigned char mzero = vec_splat_u8(-1);
+        return (vector signed char) vec_sl(mzero, mzero);
+}
+
+vector signed short splats2(void)
+{
+        vector unsigned short mzero = vec_splat_u16(-1);
+        return (vector signed short) vec_sl(mzero, mzero);
+}
+
+vector signed int splats3(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector signed int) vec_sl(mzero, mzero);
+}
+
+vector signed long long splats4(void)
+{
+        vector unsigned long long mzero = {-1,-1};
+        return (vector signed long long) vec_sl(mzero, mzero);
+}
+
+/* Codegen will consist of splat and shift instructions for most types.
+   If folding is enabled, the vec_sl tests using vector long long type will
+   generate a lvx instead of a vspltisw+vsld pair.  */
+
+/* { dg-final { scan-assembler-times {\mvspltisb\M|\mvspltish\M|\mvspltisw\M} 
7 } } */
+/* { dg-final { scan-assembler-times {\mvslb\M|\mvslh\M|\mvslw\M|\mvsld\M} 7 } 
} */
+/* { dg-final { scan-assembler-times {\mlvx\M} 2 } } */
+
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-2.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-2.c
new file mode 100644
index 0000000..20442ed
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-2.c
@@ -0,0 +1,73 @@
+/* PR86731.  Verify that the rs6000 gimple-folding code handles the
+   left shift operation properly.  This is a testcase variation that
+   explicitly disables gimple folding.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O3 -fwrapv -mno-fold-gimple" } */
+/* { dg-prune-output "gimple folding of rs6000 builtins has been disabled." } 
*/
+
+
+#include <altivec.h>
+/* original test as reported.  */
+vector unsigned int splat(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector unsigned int) vec_sl(mzero, mzero);
+}
+
+/* more testcase variations.  */
+vector unsigned char splatu1(void)
+{
+        vector unsigned char mzero = vec_splat_u8(-1);
+        return (vector unsigned char) vec_sl(mzero, mzero);
+}
+
+vector unsigned short splatu2(void)
+{
+        vector unsigned short mzero = vec_splat_u16(-1);
+        return (vector unsigned short) vec_sl(mzero, mzero);
+}
+
+vector unsigned int splatu3(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector unsigned int) vec_sl(mzero, mzero);
+}
+
+vector unsigned long long splatu4(void)
+{
+        vector unsigned long long mzero = {-1,-1};
+        return (vector unsigned long long) vec_sl(mzero, mzero);
+}
+vector signed char splats1(void)
+{
+        vector unsigned char mzero = vec_splat_u8(-1);
+        return (vector signed char) vec_sl(mzero, mzero);
+}
+
+vector signed short splats2(void)
+{
+        vector unsigned short mzero = vec_splat_u16(-1);
+        return (vector signed short) vec_sl(mzero, mzero);
+}
+
+vector signed int splats3(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector signed int) vec_sl(mzero, mzero);
+}
+
+vector signed long long splats4(void)
+{
+        vector unsigned long long mzero = {-1,-1};
+        return (vector signed long long) vec_sl(mzero, mzero);
+}
+
+/* Codegen will consist of splat and shift instructions for most types.
+   Noted variations:  if gimple folding is disabled, or if -fwrapv is not 
specified, the
+   long long tests will generate a vspltisw+vsld pair, versus generating a 
lvx.  */
+/* { dg-final { scan-assembler-times {\mvspltisb\M|\mvspltish\M|\mvspltisw\M} 
9 } } */
+/* { dg-final { scan-assembler-times {\mvslb\M|\mvslh\M|\mvslw\M|\mvsld\M} 9 } 
} */
+/* { dg-final { scan-assembler-times {\mlvx\M} 0 } } */
+
diff --git a/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-3.c 
b/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-3.c
new file mode 100644
index 0000000..df0b88c
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/fold-vec-shift-altivectest-3.c
@@ -0,0 +1,71 @@
+/* PR86731.  Verify that the rs6000 gimple-folding code handles the
+   left shift properly.  */
+
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_altivec_ok } */
+/* { dg-options "-maltivec -O3" } */
+
+#include <altivec.h>
+/* The original test as reported.  */
+vector unsigned int splat(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector unsigned int) vec_sl(mzero, mzero);
+}
+
+/* more testcase variations.  */
+vector unsigned char splatu1(void)
+{
+        vector unsigned char mzero = vec_splat_u8(-1);
+        return (vector unsigned char) vec_sl(mzero, mzero);
+}
+
+vector unsigned short splatu2(void)
+{
+        vector unsigned short mzero = vec_splat_u16(-1);
+        return (vector unsigned short) vec_sl(mzero, mzero);
+}
+
+vector unsigned int splatu3(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector unsigned int) vec_sl(mzero, mzero);
+}
+
+vector unsigned long long splatu4(void)
+{
+        vector unsigned long long mzero = {-1,-1};
+        return (vector unsigned long long) vec_sl(mzero, mzero);
+}
+vector signed char splats1(void)
+{
+        vector unsigned char mzero = vec_splat_u8(-1);
+        return (vector signed char) vec_sl(mzero, mzero);
+}
+
+vector signed short splats2(void)
+{
+        vector unsigned short mzero = vec_splat_u16(-1);
+        return (vector signed short) vec_sl(mzero, mzero);
+}
+
+vector signed int splats3(void)
+{
+        vector unsigned int mzero = vec_splat_u32(-1);
+        return (vector signed int) vec_sl(mzero, mzero);
+}
+
+vector signed long long splats4(void)
+{
+        vector unsigned long long mzero = {-1,-1};
+        return (vector signed long long) vec_sl(mzero, mzero);
+}
+
+/* Codegen will consist of splat and shift instructions for most types.
+   Noted variations:  if gimple folding is disabled, or if -fwrapv is not
+   specified, the long long tests will generate a vspltisw+vsld pair,
+   versus generating a single lvx.  */
+/* { dg-final { scan-assembler-times {\mvspltisb\M|\mvspltish\M|\mvspltisw\M} 
9 } } */
+/* { dg-final { scan-assembler-times {\mvslb\M|\mvslh\M|\mvslw\M|\mvsld\M} 9 } 
} */
+/* { dg-final { scan-assembler-times {\mlvx\M} 0 } } */
+


Reply via email to