https://gcc.gnu.org/g:4e59fe15c8fef138ccd2022f22ca46676814494c

commit 4e59fe15c8fef138ccd2022f22ca46676814494c
Author: Alexandre Oliva <ol...@gnu.org>
Date:   Thu Nov 21 22:37:04 2024 -0300

    drop decode_field_reference subroutines

Diff:
---
 gcc/gimple-fold.cc                    | 159 ++++++++++++----------------------
 gcc/testsuite/gcc.dg/field-merge-11.c |  32 +++++++
 2 files changed, 87 insertions(+), 104 deletions(-)

diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 1c9bdd0452b3..6e948c13cbea 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -70,7 +70,6 @@ along with GCC; see the file COPYING3.  If not see
 #include "varasm.h"
 #include "internal-fn.h"
 #include "gimple-range.h"
-#include "tree-ssa-loop-niter.h" // stmt_dominates_stmt_p
 
 enum strlen_range_kind {
   /* Compute the exact constant string length.  */
@@ -7392,77 +7391,6 @@ extern bool gimple_bit_and_cst (tree, tree *, tree 
(*)(tree));
 extern bool gimple_bit_xor_cst (tree, tree *, tree (*)(tree));
 extern bool gimple_rshift_cst (tree, tree *, tree (*)(tree));
 
-/* Follow substitutable SSA DEFs for *NAME, including type casts,
-   adjusting *NAME to the single rhs or the type cast operand along
-   the way.  Return the target type of the earliest type cast
-   found.  */
-
-static tree
-is_cast_p (tree *name)
-{
-  tree type = 0;
-  tree res_ops[1];
-
-  while (gimple_any_convert (*name, res_ops, follow_all_ssa_edges))
-    {
-      if (!type)
-       type = TREE_TYPE (*name);
-      *name = res_ops[0];
-    }
-
-  return type;
-}
-
-/* If *R_ARG is a constant zero, and L_ARG is a possibly masked
-   BIT_XOR_EXPR, return 1 and set *r_arg to l_arg.
-   Otherwise, return 0.
-
-   The returned value should be passed to decode_field_reference for it
-   to handle l_arg, and then doubled for r_arg.  */
-
-static int
-prepare_xor (tree l_arg, tree *r_arg)
-{
-  int ret = 0;
-
-  if (!integer_zerop (*r_arg))
-    return ret;
-
-  tree exp = l_arg;
-  tree res_ops[2];
-
-  if (gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges))
-    exp = res_ops[0];
-
-  if (gimple_bit_xor_cst (exp, res_ops, follow_all_ssa_edges))
-    {
-      *r_arg = l_arg;
-      return 1;
-    }
-
-  return ret;
-}
-
-/* If EXP is a SSA_NAME whose DEF is a load stmt, set *LOAD to it and
-   return its RHS, otherwise return EXP.  */
-
-static tree
-follow_load (tree exp, gimple **load)
-{
-  if (TREE_CODE (exp) == SSA_NAME
-      && !SSA_NAME_IS_DEFAULT_DEF (exp))
-    {
-      gimple *def = SSA_NAME_DEF_STMT (exp);
-      if (gimple_assign_load_p (def))
-       {
-         *load = def;
-         exp = gimple_assign_rhs1 (def);
-       }
-    }
-
-  return exp;
-}
-
 /* Subroutine for fold_truth_andor_1: decode a field reference.
 
    If EXP is a comparison reference, we return the innermost reference.
@@ -7485,9 +7413,11 @@ follow_load (tree exp, gimple **load)
 
    *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
 
-   XOR_WHICH is 1 or 2 if EXP was found to be a (possibly masked)
-   BIT_XOR_EXPR compared with zero.  We're to take the first or second
-   operand thereof if so.  It should be zero otherwise.
+   *XOR_P is to be FALSE if EXP might be a XOR used in a compare, in which
+   case, if XOR_CMP_OP is a zero constant, it will be overridden with *EXP_,
+   *XOR_P will be set to TRUE, and the left-hand operand of the XOR will be
+   decoded.  If *XOR_P is TRUE, XOR_CMP_OP is supposed to be NULL, and then the
+   right-hand operand of the XOR will be decoded.
 
    *LOAD is set to the load stmt of the innermost reference, if any,
    *and NULL otherwise.
@@ -7499,7 +7429,8 @@ static tree
 decode_field_reference (tree *exp_, HOST_WIDE_INT *pbitsize,
                        HOST_WIDE_INT *pbitpos, machine_mode *pmode,
                        bool *punsignedp, bool *preversep, bool *pvolatilep,
-                       wide_int *pmask, wide_int *pand_mask, int xor_which,
+                       wide_int *pmask, wide_int *pand_mask,
+                       bool *xor_p, tree *xor_cmp_op,
                        gimple **load)
 {
   tree exp = *exp_;
@@ -7509,6 +7440,7 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
   unsigned int precision;
   int shiftrt = 0;
   wide_int mask;
+  tree res_ops[2];
 
   *load = NULL;
 
@@ -7518,36 +7450,44 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
   if (! INTEGRAL_TYPE_P (TREE_TYPE (exp)))
     return NULL_TREE;
 
-  /* We are interested in the bare arrangement of bits, so strip everything
-     that doesn't affect the machine mode.  However, record the type of the
-     outermost expression if it may matter below.  */
-  outer_type = is_cast_p (&exp);
+  /* Drop casts, only save the outermost type.  We need not worry about
+     narrowing then widening casts, or vice-versa, for those that are not
+     essential for the compare have already been optimized out at this
+     point.  */
+  while (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
+    {
+      if (!outer_type)
+       outer_type = TREE_TYPE (exp);
+      exp = res_ops[0];
+    }
 
-  tree res_ops[2];
   if (gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges))
     {
       exp = res_ops[0];
       and_mask = wi::to_wide (res_ops[1]);
     }
 
-  if (xor_which)
+  if (xor_p && gimple_bit_xor_cst (exp, res_ops, follow_all_ssa_edges))
     {
-      if (!gimple_bit_xor_cst (exp, res_ops, follow_all_ssa_edges))
-       gcc_unreachable ();
-      switch (xor_which)
+      if (*xor_p)
        {
-       case 1:
-       case 2:
-         exp = res_ops[xor_which - 1];
-         break;
-       default:
-         gcc_unreachable ();
+         exp = res_ops[1];
+         gcc_checking_assert (!xor_cmp_op);
+       }
+      else
+       {
+         *xor_p = true;
+         exp = res_ops[0];
+         *xor_cmp_op = exp_;
        }
     }
 
-  if (tree t = is_cast_p (&exp))
-    if (!outer_type)
-      outer_type = t;
+  while (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
+    {
+      if (!outer_type)
+       outer_type = TREE_TYPE (exp);
+      exp = res_ops[0];
+    }
 
   if (gimple_rshift_cst (exp, res_ops, follow_all_ssa_edges))
     {
@@ -7557,11 +7497,23 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
        return NULL_TREE;
     }
 
-  if (tree t = is_cast_p (&exp))
-    if (!outer_type)
-      outer_type = t;
+  while (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
+    {
+      if (!outer_type)
+       outer_type = TREE_TYPE (exp);
+      exp = res_ops[0];
+    }
 
-  exp = follow_load (exp, load);
+  if (TREE_CODE (exp) == SSA_NAME
+      && !SSA_NAME_IS_DEFAULT_DEF (exp))
+    {
+      gimple *def = SSA_NAME_DEF_STMT (exp);
+      if (gimple_assign_load_p (def))
+       {
+         *load = def;
+         exp = gimple_assign_rhs1 (def);
+       }
+    }
 
   poly_int64 poly_bitsize, poly_bitpos;
   int unsignedp, reversep = *preversep, volatilep = *pvolatilep;
@@ -7940,27 +7892,26 @@ fold_truth_andor_maybe_separate (location_t loc,
 
   ll_reversep = lr_reversep = rl_reversep = rr_reversep = 0;
   volatilep = 0;
-  int l_xor = prepare_xor (ll_arg, &lr_arg);
+  bool l_xor, r_xor = false;
   ll_inner = decode_field_reference (&ll_arg,
                                     &ll_bitsize, &ll_bitpos, &ll_mode,
                                     &ll_unsignedp, &ll_reversep, &volatilep,
-                                    &ll_mask, &ll_and_mask, l_xor,
+                                    &ll_mask, &ll_and_mask, &l_xor, &lr_arg,
                                     &ll_load);
   lr_inner = decode_field_reference (&lr_arg,
                                     &lr_bitsize, &lr_bitpos, &lr_mode,
                                     &lr_unsignedp, &lr_reversep, &volatilep,
-                                    &lr_mask, &lr_and_mask, 2 * l_xor,
+                                    &lr_mask, &lr_and_mask, &l_xor, 0,
                                     &lr_load);
-  int r_xor = prepare_xor (rl_arg, &rr_arg);
   rl_inner = decode_field_reference (&rl_arg,
                                     &rl_bitsize, &rl_bitpos, &rl_mode,
                                     &rl_unsignedp, &rl_reversep, &volatilep,
-                                    &rl_mask, &rl_and_mask, r_xor,
+                                    &rl_mask, &rl_and_mask, &r_xor, &rr_arg,
                                     &rl_load);
   rr_inner = decode_field_reference (&rr_arg,
                                     &rr_bitsize, &rr_bitpos, &rr_mode,
                                     &rr_unsignedp, &rr_reversep, &volatilep,
-                                    &rr_mask, &rr_and_mask, 2 * r_xor,
+                                    &rr_mask, &rr_and_mask, &r_xor, 0,
                                     &rr_load);
 
   /* It must be true that the inner operation on the lhs of each
diff --git a/gcc/testsuite/gcc.dg/field-merge-11.c 
b/gcc/testsuite/gcc.dg/field-merge-11.c
new file mode 100644
index 000000000000..fe627cddd7fd
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/field-merge-11.c
@@ -0,0 +1,32 @@
+/* { dg-do run } */
+/* { dg-options "-O" } */
+
+/* Check that narrowing casts aren't ignored, and that same-field tests at
+   different widths aren't misoptimized.  */
+
+struct s {
+  short a;
+  unsigned short b;
+  int c;
+} __attribute__ ((aligned (4)));
+
+struct s p = { 42, (short)(0xef1 - 0x1000), 0x12345678 };
+
+void f (void) {
+  if (0
+      || (p.a & 0xcc) != 8
+      || p.a != 42
+      || (int)(signed char)p.b != (int)(signed char)(0xef1 - 0x1000)
+      || (unsigned)(unsigned char)p.b != (unsigned)(unsigned char)(0xef1 - 
0x1000)
+      || (unsigned)p.b != (unsigned short)(0xef1 - 0x1000)
+      || (int)(short)p.b != (int)(0xef1 - 0x1000)
+      || (long)(unsigned char)(p.c >> 8) != (long)(unsigned char)0x123456
+      || p.c != 0x12345678
+      )
+    __builtin_abort ();
+}
+
+int main () {
+  f ();
+  return 0;
+}

Reply via email to