Hi!

For signed integers with undefined overflow we already optimize x * y / y
into x, but for signed integers with -fwrapv or unsigned integers we don't.
The following patch allows optimizing that into just x if value ranges
prove that x * y will never overflow.
It uses the global SSA_NAME_RANGE_INFO only, because like mentioned
in another PR we don't currently have a way to tell the ranger from match.pd
the use stmt (and we'd need in that case to tell ranger to only follow
SSA_NAME_DEF_STMTs + SSA_NAME_RANGE_INFO and never go in the other
direction, as following immediate uses seems forbidden in match.pd).
Another possibility would be to optimize this during vrp, but on the
other side the optimization itself is match.pd-ish.

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

2020-11-26  Jakub Jelinek  <ja...@redhat.com>

        PR tree-optimization/97997
        * match.pd ((t * 2) / 2) -> t): Optimize even for defined
        overflow if ranges prove there is no overflow.

        * gcc.dg/tree-ssa/pr97997-1.c: New test.
        * gcc.dg/tree-ssa/pr97997-2.c: New test.

--- gcc/match.pd.jj     2020-11-25 16:58:33.840370571 +0100
+++ gcc/match.pd        2020-11-25 21:45:07.487050919 +0100
@@ -650,9 +650,48 @@ (define_operator_list COND_TERNARY
 (for div (trunc_div ceil_div floor_div round_div exact_div)
  (simplify
   (div (mult:c @0 @1) @1)
-  (if (ANY_INTEGRAL_TYPE_P (type)
-       && TYPE_OVERFLOW_UNDEFINED (type))
-   @0)))
+  (if (ANY_INTEGRAL_TYPE_P (type))
+   (if (TYPE_OVERFLOW_UNDEFINED (type))
+    @0
+#if GIMPLE
+    (if (TREE_CODE (@0) == SSA_NAME
+        && (TREE_CODE (@1) == SSA_NAME || TREE_CODE (@1) == INTEGER_CST))
+     (with
+      {
+       bool overflowed = true;
+       wide_int wmin0, wmax0;
+       if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
+         {
+           /* If the multiplication can't overflow/wrap around, then
+              it can be optimized too.  */
+           wide_int wmin1, wmax1;
+           wi::overflow_type min_ovf, max_ovf;
+           if (TREE_CODE (@1) == INTEGER_CST)
+             {
+               wmin1 = wi::to_wide (@1);
+               wi::mul (wmin0, wmin1, TYPE_SIGN (type), &min_ovf);
+               wi::mul (wmax0, wmin1, TYPE_SIGN (type), &max_ovf);
+               if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
+                 overflowed = false;
+             }
+           else if (get_range_info (@1, &wmin1, &wmax1) == VR_RANGE)
+             {
+               wi::mul (wmin0, wmin1, TYPE_SIGN (type), &min_ovf);
+               wi::mul (wmax0, wmax1, TYPE_SIGN (type), &max_ovf);
+               if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
+                 {
+                   wi::mul (wmin0, wmax1, TYPE_SIGN (type), &min_ovf);
+                   wi::mul (wmax0, wmin1, TYPE_SIGN (type), &max_ovf);
+                   if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
+                     overflowed = false;
+                 }
+             }
+         }
+      }
+     (if (!overflowed)
+      @0)))
+#endif
+   ))))
 
 (for op (negate abs)
  /* Simplify cos(-x) and cos(|x|) -> cos(x).  Similarly for cosh.  */
--- gcc/testsuite/gcc.dg/tree-ssa/pr97997-1.c.jj        2020-11-25 
21:54:29.745748747 +0100
+++ gcc/testsuite/gcc.dg/tree-ssa/pr97997-1.c   2020-11-25 21:59:01.428703547 
+0100
@@ -0,0 +1,52 @@
+/* PR tree-optimization/97997 */
+/* { dg-do compile { target { ilp32 || lp64 } } } */
+/* { dg-options "-O2 -fdump-tree-optimized" } */
+/* { dg-final { scan-tree-dump-times "return x_\[0-9]*\\\(D\\\);" 6 
"optimized" } } */
+/* { dg-final { scan-tree-dump-not " / " "optimized" } } */
+/* { dg-final { scan-tree-dump-not " \\* " "optimized" } } */
+
+unsigned short
+f1 (unsigned short x)
+{
+  return x * 10 / 10;
+}
+
+unsigned short
+f2 (unsigned short x)
+{
+  int a = x;
+  int b = 10;
+  int c = 10;
+  return a * b / c;
+}
+
+unsigned short
+f3 (unsigned short x)
+{
+  return x * 10U / 10;
+}
+
+unsigned short
+f4 (unsigned short x)
+{
+  unsigned a = x;
+  unsigned b = 10;
+  unsigned c = 10;
+  return a * b / c;
+}
+
+unsigned short
+f5 (unsigned short x, unsigned short y)
+{
+  return (unsigned) x * y / y;
+}
+
+unsigned int
+f6 (unsigned int x, unsigned int y)
+{
+  if (x >= 30000)
+    __builtin_unreachable ();
+  if (y >= ~0U / 30000)
+    __builtin_unreachable ();
+  return x * y / y;
+}
--- gcc/testsuite/gcc.dg/tree-ssa/pr97997-2.c.jj        2020-11-25 
21:55:34.322024930 +0100
+++ gcc/testsuite/gcc.dg/tree-ssa/pr97997-2.c   2020-11-25 21:59:08.570623495 
+0100
@@ -0,0 +1,41 @@
+/* PR tree-optimization/97997 */
+/* { dg-do compile { target { ilp32 || lp64 } } } */
+/* { dg-options "-O2 -fdump-tree-optimized -fwrapv" } */
+/* { dg-final { scan-tree-dump-times "return x_\[0-9]*\\\(D\\\);" 4 
"optimized" } } */
+/* { dg-final { scan-tree-dump-not " / " "optimized" } } */
+/* { dg-final { scan-tree-dump-not " \\* " "optimized" } } */
+
+unsigned short
+f1 (unsigned short x)
+{
+  return x * 10 / 10;
+}
+
+unsigned short
+f2 (unsigned short x)
+{
+  int a = x;
+  int b = 10;
+  int c = 10;
+  return a * b / c;
+}
+
+short
+f3 (short x, short y)
+{
+  return x * y / y;
+}
+
+int
+f4 (int x, int y)
+{
+  if (x >= 30000)
+    __builtin_unreachable ();
+  if (x <= -30000)
+    __builtin_unreachable ();
+  if (y >= __INT_MAX__ / 30000)
+    __builtin_unreachable ();
+  if (y <= -__INT_MAX__ / 30000)
+    __builtin_unreachable ();
+  return x * y / y;
+}

        Jakub

Reply via email to