https://gcc.gnu.org/g:17fd3555e2c6a747adb166cafa01b794d3ef6ed5

commit 17fd3555e2c6a747adb166cafa01b794d3ef6ed5
Author: Alexandre Oliva <ol...@gnu.org>
Date:   Sun Dec 1 06:58:50 2024 -0300

    ifcombine: simplify and check for build error

Diff:
---
 gcc/gimple-fold.cc                    | 117 +++++++++++++---------------------
 gcc/testsuite/gcc.dg/field-merge-12.c |  33 ++++++++++
 2 files changed, 79 insertions(+), 71 deletions(-)

diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 988e552180c9..fa86aa9665f7 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -7446,9 +7446,6 @@ maybe_fold_comparisons_from_match_pd (tree type, enum 
tree_code code,
    *PBITSIZE is set to the number of bits in the reference, *PBITPOS is
    set to the starting bit number.
 
-   If the innermost field can be completely contained in a mode-sized
-   unit, *PMODE is set to that mode.  Otherwise, it is set to VOIDmode.
-
    *PVOLATILEP is set to 1 if the any expression encountered is volatile;
    otherwise it is not changed.
 
@@ -7456,10 +7453,8 @@ maybe_fold_comparisons_from_match_pd (tree type, enum 
tree_code code,
 
    *PREVERSEP is set to the storage order of the field.
 
-   *PMASK is set to the mask used.  This is either contained in a
-   BIT_AND_EXPR or derived from the width of the field.
-
    *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.
+   If PAND_MASK *is NULL, BIT_AND_EXPR is not recognized.
 
    *XOR_P is to be FALSE if EXP might be a XOR used in a compare, in which
    case, if XOR_CMP_OP is a zero constant, it will be overridden with *PEXP,
@@ -7478,10 +7473,9 @@ maybe_fold_comparisons_from_match_pd (tree type, enum 
tree_code code,
 
 static tree
 decode_field_reference (tree *pexp, HOST_WIDE_INT *pbitsize,
-                       HOST_WIDE_INT *pbitpos, machine_mode *pmode,
+                       HOST_WIDE_INT *pbitpos,
                        bool *punsignedp, bool *preversep, bool *pvolatilep,
-                       wide_int *pmask, wide_int *pand_mask,
-                       bool *xor_p, tree *xor_cmp_op,
+                       wide_int *pand_mask, bool *xor_p, tree *xor_cmp_op,
                        gimple **load, location_t loc[4])
 {
   /* These are from match.pd.  */
@@ -7498,6 +7492,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   int shiftrt = 0;
   wide_int mask;
   tree res_ops[2];
+  machine_mode mode;
 
   *load = NULL;
 
@@ -7522,7 +7517,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
     }
 
   /* Recognize and save a masking operation.  */
-  if (gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges))
+  if (pand_mask && gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges))
     {
       loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
       exp = res_ops[0];
@@ -7600,11 +7595,10 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   poly_int64 poly_bitsize, poly_bitpos;
   int unsignedp, reversep = *preversep, volatilep = *pvolatilep;
   inner = get_inner_reference (exp, &poly_bitsize, &poly_bitpos, &offset,
-                              pmode, &unsignedp, &reversep, &volatilep);
+                              &mode, &unsignedp, &reversep, &volatilep);
 
   HOST_WIDE_INT bs, bp;
-  if ((inner == exp && !and_mask.get_precision ())
-      || !poly_bitsize.is_constant (&bs)
+  if (!poly_bitsize.is_constant (&bs)
       || !poly_bitpos.is_constant (&bp)
       || bs <= shiftrt
       || offset != 0
@@ -7649,13 +7643,13 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
*pbitsize,
   /* Compute the mask to access the bitfield.  */
   precision = *pbitsize;
 
-  mask = wi::mask (*pbitsize, false, precision);
-
   /* Merge it with the mask we found in the BIT_AND_EXPR, if any.  */
   if (and_mask.get_precision () != 0)
-    mask &= wide_int::from (and_mask, precision, UNSIGNED);
+    {
+      and_mask = wide_int::from (and_mask, precision, UNSIGNED);
+      mask &= and_mask;
+    }
 
-  *pmask = mask;
   *pand_mask = and_mask;
   return inner;
 }
@@ -7913,10 +7907,8 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
   HOST_WIDE_INT rnbitsize, rnbitpos, rnprec;
   bool ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp;
   bool ll_reversep, lr_reversep, rl_reversep, rr_reversep;
-  machine_mode ll_mode, lr_mode, rl_mode, rr_mode;
   scalar_int_mode lnmode, lnmode2, rnmode;
-  wide_int ll_mask, lr_mask, rl_mask, rr_mask;
-  wide_int ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask;
+  wide_int ll_and_mask, rl_and_mask;
   wide_int l_const, r_const;
   tree lntype, rntype, result;
   HOST_WIDE_INT first_bit, end_bit;
@@ -7987,25 +7979,21 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
   ll_reversep = lr_reversep = rl_reversep = rr_reversep = 0;
   volatilep = 0;
   bool l_xor = false, r_xor = false;
-  ll_inner = decode_field_reference (&ll_arg,
-                                    &ll_bitsize, &ll_bitpos, &ll_mode,
+  ll_inner = decode_field_reference (&ll_arg, &ll_bitsize, &ll_bitpos,
                                     &ll_unsignedp, &ll_reversep, &volatilep,
-                                    &ll_mask, &ll_and_mask, &l_xor, &lr_arg,
+                                    &ll_and_mask, &l_xor, &lr_arg,
                                     &ll_load, ll_loc);
-  lr_inner = decode_field_reference (&lr_arg,
-                                    &lr_bitsize, &lr_bitpos, &lr_mode,
+  lr_inner = decode_field_reference (&lr_arg, &lr_bitsize, &lr_bitpos,
                                     &lr_unsignedp, &lr_reversep, &volatilep,
-                                    &lr_mask, &lr_and_mask, &l_xor, 0,
+                                    0, &l_xor, 0,
                                     &lr_load, lr_loc);
-  rl_inner = decode_field_reference (&rl_arg,
-                                    &rl_bitsize, &rl_bitpos, &rl_mode,
+  rl_inner = decode_field_reference (&rl_arg, &rl_bitsize, &rl_bitpos,
                                     &rl_unsignedp, &rl_reversep, &volatilep,
-                                    &rl_mask, &rl_and_mask, &r_xor, &rr_arg,
+                                    &rl_and_mask, &r_xor, &rr_arg,
                                     &rl_load, rl_loc);
-  rr_inner = decode_field_reference (&rr_arg,
-                                    &rr_bitsize, &rr_bitpos, &rr_mode,
+  rr_inner = decode_field_reference (&rr_arg, &rr_bitsize, &rr_bitpos,
                                     &rr_unsignedp, &rr_reversep, &volatilep,
-                                    &rr_mask, &rr_and_mask, &r_xor, 0,
+                                    0, &r_xor, 0,
                                     &rr_load, rr_loc);
 
   /* It must be true that the inner operation on the lhs of each
@@ -8038,12 +8026,7 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
 
   if (lsignbit)
     {
-      wide_int sign = wi::mask (ll_bitsize - 1, true,
-                               TYPE_PRECISION (TREE_TYPE (ll_arg)));
-      if (!ll_mask.get_precision ())
-       ll_mask = sign;
-      else
-       ll_mask &= sign;
+      wide_int sign = wi::mask (ll_bitsize - 1, true, ll_bitsize);
       if (!ll_and_mask.get_precision ())
        ll_and_mask = sign;
       else
@@ -8052,12 +8035,7 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
 
   if (rsignbit)
     {
-      wide_int sign = wi::mask (rl_bitsize - 1, true,
-                               TYPE_PRECISION (TREE_TYPE (rl_arg)));
-      if (!rl_mask.get_precision ())
-       rl_mask = sign;
-      else
-       rl_mask &= sign;
+      wide_int sign = wi::mask (rl_bitsize - 1, true, rl_bitsize);
       if (!rl_and_mask.get_precision ())
        rl_and_mask = sign;
       else
@@ -8073,13 +8051,13 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
     {
       if (l_const.get_precision ()
          && l_const == 0
-         && wi::popcount (ll_mask) == 1)
+         && wi::popcount (ll_and_mask) == 1)
        {
          /* Make the left operand unsigned, since we are only interested
             in the value of one bit.  Otherwise we are doing the wrong
             thing below.  */
          ll_unsignedp = 1;
-         l_const = ll_mask;
+         l_const = ll_and_mask;
        }
       else
        return 0;
@@ -8090,10 +8068,10 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
     {
       if (r_const.get_precision ()
          && r_const == 0
-         && wi::popcount (rl_mask) == 1)
+         && wi::popcount (rl_and_mask) == 1)
        {
          rl_unsignedp = 1;
-         r_const = rl_mask;
+         r_const = rl_and_mask;
        }
       else
        return 0;
@@ -8231,10 +8209,17 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
     }
 
   /* Adjust masks to match the positions in the combined lntype.  */
-  ll_mask = wi::lshift (wide_int::from (ll_mask, lnprec, UNSIGNED),
-                       xll_bitpos);
-  rl_mask = wi::lshift (wide_int::from (rl_mask, lnprec, UNSIGNED),
-                       xrl_bitpos);
+  wide_int ll_mask, rl_mask, r_mask;
+  if (ll_and_mask.get_precision ())
+    ll_mask = wi::lshift (wide_int::from (ll_and_mask, lnprec, UNSIGNED),
+                         xll_bitpos);
+  else
+    ll_mask = wi::shifted_mask (xll_bitpos, ll_bitsize, false, lnprec);
+  if (rl_and_mask.get_precision ())
+    rl_mask = wi::lshift (wide_int::from (rl_and_mask, lnprec, UNSIGNED),
+                         xrl_bitpos);
+  else
+    rl_mask = wi::shifted_mask (xrl_bitpos, rl_bitsize, false, lnprec);
 
   /* Adjust right-hand constants in both original comparisons to match width
      and bit position.  */
@@ -8242,12 +8227,8 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
     {
       l_const = wide_int::from (l_const, lnprec,
                                TYPE_SIGN (TREE_TYPE (lr_arg)));
-      if (!TYPE_UNSIGNED (TREE_TYPE (lr_arg)))
-       {
-         l_const = wi::zext (l_const, TYPE_PRECISION (TREE_TYPE (lr_arg)));
-         if (ll_and_mask.get_precision ())
-           l_const &= wide_int::from (ll_and_mask, lnprec, UNSIGNED);
-       }
+      if (ll_and_mask.get_precision ())
+       l_const &= wide_int::from (ll_and_mask, lnprec, UNSIGNED);
       l_const <<= xll_bitpos;
       if ((l_const & ~ll_mask) != 0)
        {
@@ -8262,12 +8243,8 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
 
       r_const = wide_int::from (r_const, lnprec,
                                TYPE_SIGN (TREE_TYPE (rr_arg)));
-      if (!TYPE_UNSIGNED (TREE_TYPE (rr_arg)))
-       {
-         r_const = wi::zext (r_const, TYPE_PRECISION (TREE_TYPE (rr_arg)));
-         if (rl_and_mask.get_precision ())
-           r_const &= wide_int::from (rl_and_mask, lnprec, UNSIGNED);
-       }
+      if (rl_and_mask.get_precision ())
+       r_const &= wide_int::from (rl_and_mask, lnprec, UNSIGNED);
       r_const <<= xrl_bitpos;
       if ((r_const & ~rl_mask) != 0)
        {
@@ -8306,7 +8283,7 @@ fold_truth_andor_for_ifcombine (enum tree_code code, tree 
truth_type,
                          lr_reversep);
 
       /* No masking needed, we know the full constants.  */
-      lr_mask = wi::mask (0, true, lnprec);
+      r_mask = wi::mask (0, true, lnprec);
 
       /* If the compiler thinks this is used uninitialized below, it's
         because it can't realize that parts can only be 2 when
@@ -8391,10 +8368,8 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
tree truth_type,
        }
 
       /* Adjust the masks to match the combined type, and combine them.  */
-      lr_mask = wide_int::from (lr_mask, rnprec, UNSIGNED) << xlr_bitpos;
-      rr_mask = wide_int::from (rr_mask, rnprec, UNSIGNED) << xrr_bitpos;
-
-      lr_mask |= rr_mask;
+      r_mask = (wi::shifted_mask (xlr_bitpos, lr_bitsize, false, rnprec)
+               | wi::shifted_mask (xrr_bitpos, rr_bitsize, false, rnprec);
 
       /* Load the right-hand operand of the combined compare.  */
       toshift[1][0] = MIN (xlr_bitpos, xrr_bitpos);
@@ -8431,7 +8406,7 @@ fold_truth_andor_for_ifcombine (enum tree_code code, tree 
truth_type,
     }
 
   /* Now issue the loads for the left-hand combined operand/s.  */
-  ll_mask |= rl_mask;
+  wide_int l_mask = ll_mask | rl_mask;
   toshift[0][0] = MIN (xll_bitpos, xrl_bitpos);
   shifted[0][0] = 0;
 
@@ -8467,7 +8442,7 @@ fold_truth_andor_for_ifcombine (enum tree_code code, tree 
truth_type,
   for (int i = 0; i < parts; i++)
     {
       tree op[2] = { ld_arg[0][i], ld_arg[1][i] };
-      wide_int mask[2] = { ll_mask, lr_mask };
+      wide_int mask[2] = { l_mask, r_mask };
       location_t *locs[2] = { i ? rl_loc : ll_loc, i ? rr_loc : lr_loc };
 
       /* Figure out the masks, and unshare the original operands.  */
diff --git a/gcc/testsuite/gcc.dg/field-merge-12.c 
b/gcc/testsuite/gcc.dg/field-merge-12.c
new file mode 100644
index 000000000000..7056eb607e90
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/field-merge-12.c
@@ -0,0 +1,33 @@
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+/* Check that we don't crash when trying to handle masks that don't match the
+   width of the original type.  */
+
+struct s {
+  long long q;
+};
+
+struct s x1 = { 1 };
+struct s xm1 = { -1 };
+struct s x8 = { 8 };
+struct s x0 = { 0 };
+
+bool f(struct s *p)
+{
+  int q = (int)p->q;
+  return (q < 0) || (q & 7);
+}
+
+int main ()
+{
+  if (!f (&x1))
+    __builtin_abort ();
+  if (!f (&xm1))
+    __builtin_abort ();
+  if (f (&x8))
+    __builtin_abort ();
+  if (f (&x0))
+    __builtin_abort ();
+  return 0;
+}

Reply via email to