https://gcc.gnu.org/g:186c7798d62200521bdb385a39365e424ccf6b09

commit 186c7798d62200521bdb385a39365e424ccf6b09
Author: Alexandre Oliva <ol...@gnu.org>
Date:   Thu Nov 21 23:30:38 2024 -0300

    rework locations in fold_truth_andof_for_ifcombine

Diff:
---
 gcc/fold-const.h          | 10 +++---
 gcc/gimple-fold.cc        | 81 ++++++++++++++++++++++++++++++++---------------
 gcc/tree-ssa-ifcombine.cc |  7 ++--
 3 files changed, 65 insertions(+), 33 deletions(-)

diff --git a/gcc/fold-const.h b/gcc/fold-const.h
index 6372dee74e9e..77a5c916cbd8 100644
--- a/gcc/fold-const.h
+++ b/gcc/fold-const.h
@@ -261,10 +261,12 @@ extern void clear_type_padding_in_mask (tree, unsigned 
char *);
 extern bool clear_padding_type_may_have_padding_p (tree);
 extern bool arith_overflowed_p (enum tree_code, const_tree, const_tree,
                                const_tree);
-extern tree fold_truth_andor_maybe_separate (location_t, enum tree_code, tree,
-                                            enum tree_code, tree, tree,
-                                            enum tree_code, tree, tree,
-                                            tree *);
+extern tree fold_truth_andor_for_ifcombine (enum tree_code, tree,
+                                           location_t, enum tree_code,
+                                           tree, tree,
+                                           location_t, enum tree_code,
+                                           tree, tree,
+                                           tree *);
 
 
 /* Class used to compare gimple operands.  */
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 6e948c13cbea..f79a7594fdc4 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -7422,6 +7422,9 @@ extern bool gimple_rshift_cst (tree, tree *, tree 
(*)(tree));
    *LOAD is set to the load stmt of the innermost reference, if any,
    *and NULL otherwise.
 
+   LOC[0..3] are filled in as conversion, masking, shifting and loading
+   operations are located.
+
    Return 0 if this is not a component reference or is one that we can't
    do anything with.  */
 
@@ -7431,7 +7434,7 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
                        bool *punsignedp, bool *preversep, bool *pvolatilep,
                        wide_int *pmask, wide_int *pand_mask,
                        bool *xor_p, tree *xor_cmp_op,
-                       gimple **load)
+                       gimple **load, location_t loc[4])
 {
   tree exp = *exp_;
   tree outer_type = 0;
@@ -7454,21 +7457,27 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
      narrowing then widening casts, or vice-versa, for those that are not
      essential for the compare have already been optimized out at this
      point.  */
-  while (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
+  if (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
     {
       if (!outer_type)
-       outer_type = TREE_TYPE (exp);
+       {
+         outer_type = TREE_TYPE (exp);
+         loc[0] = gimple_location (SSA_NAME_DEF_STMT (exp));
+       }
       exp = res_ops[0];
     }
 
   if (gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges))
     {
+      loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
       exp = res_ops[0];
       and_mask = wi::to_wide (res_ops[1]);
     }
 
   if (xor_p && gimple_bit_xor_cst (exp, res_ops, follow_all_ssa_edges))
     {
+      /* No location recorded for this one, it's entirely subsumed by the
+        compare.  */
       if (*xor_p)
        {
          exp = res_ops[1];
@@ -7478,29 +7487,36 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
        {
          *xor_p = true;
          exp = res_ops[0];
-         *xor_cmp_op = exp_;
+         *xor_cmp_op = *exp_;
        }
     }
 
-  while (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
+  if (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
     {
       if (!outer_type)
-       outer_type = TREE_TYPE (exp);
+       {
+         outer_type = TREE_TYPE (exp);
+         loc[0] = gimple_location (SSA_NAME_DEF_STMT (exp));
+       }
       exp = res_ops[0];
     }
 
   if (gimple_rshift_cst (exp, res_ops, follow_all_ssa_edges))
     {
+      loc[2] = gimple_location (SSA_NAME_DEF_STMT (exp));
       exp = res_ops[0];
       shiftrt = tree_to_shwi (res_ops[1]);
       if (shiftrt <= 0)
        return NULL_TREE;
     }
 
-  while (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
+  if (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
     {
       if (!outer_type)
-       outer_type = TREE_TYPE (exp);
+       {
+         outer_type = TREE_TYPE (exp);
+         loc[0] = gimple_location (SSA_NAME_DEF_STMT (exp));
+       }
       exp = res_ops[0];
     }
 
@@ -7510,6 +7526,7 @@ decode_field_reference (tree *exp_, HOST_WIDE_INT 
*pbitsize,
       gimple *def = SSA_NAME_DEF_STMT (exp);
       if (gimple_assign_load_p (def))
        {
+         loc[3] = gimple_location (def);
          *load = def;
          exp = gimple_assign_rhs1 (def);
        }
@@ -7630,6 +7647,9 @@ make_bit_field_load (location_t loc, tree inner, tree 
orig_inner, tree type,
                     HOST_WIDE_INT bitsize, poly_int64 bitpos,
                     bool unsignedp, bool reversep, gimple *point)
 {
+  if (point && loc == UNKNOWN_LOCATION)
+    loc = gimple_location (point);
+
   tree ref = make_bit_field_ref (loc, unshare_expr (inner),
                                 unshare_expr (orig_inner),
                                 type, bitsize, bitpos,
@@ -7799,11 +7819,12 @@ reuse_split_load (tree /* in[0] out[1] */ ln_arg[2],
    We return the simplified tree or 0 if no optimization is possible.  */
 
 tree
-fold_truth_andor_maybe_separate (location_t loc,
-                                enum tree_code code, tree truth_type,
-                                enum tree_code lcode, tree ll_arg, tree lr_arg,
-                                enum tree_code rcode, tree rl_arg, tree rr_arg,
-                                tree *separatep)
+fold_truth_andor_for_ifcombine (enum tree_code code, tree truth_type,
+                               location_t lloc, enum tree_code lcode,
+                               tree ll_arg, tree lr_arg,
+                               location_t rloc, enum tree_code rcode,
+                               tree rl_arg, tree rr_arg,
+                               tree *separatep)
 {
   /* If this is the "or" of two comparisons, we can do something if
      the comparisons are NE_EXPR.  If this is the "and", we can do something
@@ -7835,6 +7856,12 @@ fold_truth_andor_maybe_separate (location_t loc,
   bool volatilep;
   bool l_split_load;
 
+  /* These are indexed by: conv, mask, shft, load.  */
+  location_t ll_loc[4] = { lloc, lloc, lloc, UNKNOWN_LOCATION };
+  location_t lr_loc[4] = { lloc, lloc, lloc, UNKNOWN_LOCATION };
+  location_t rl_loc[4] = { rloc, rloc, rloc, UNKNOWN_LOCATION };
+  location_t rr_loc[4] = { rloc, rloc, rloc, UNKNOWN_LOCATION };
+
   gcc_checking_assert (!separatep || !*separatep);
 
   /* Start by getting the comparison codes.  Fail if anything is volatile.
@@ -7897,22 +7924,22 @@ fold_truth_andor_maybe_separate (location_t loc,
                                     &ll_bitsize, &ll_bitpos, &ll_mode,
                                     &ll_unsignedp, &ll_reversep, &volatilep,
                                     &ll_mask, &ll_and_mask, &l_xor, &lr_arg,
-                                    &ll_load);
+                                    &ll_load, ll_loc);
   lr_inner = decode_field_reference (&lr_arg,
                                     &lr_bitsize, &lr_bitpos, &lr_mode,
                                     &lr_unsignedp, &lr_reversep, &volatilep,
                                     &lr_mask, &lr_and_mask, &l_xor, 0,
-                                    &lr_load);
+                                    &lr_load, lr_loc);
   rl_inner = decode_field_reference (&rl_arg,
                                     &rl_bitsize, &rl_bitpos, &rl_mode,
                                     &rl_unsignedp, &rl_reversep, &volatilep,
                                     &rl_mask, &rl_and_mask, &r_xor, &rr_arg,
-                                    &rl_load);
+                                    &rl_load, rl_loc);
   rr_inner = decode_field_reference (&rr_arg,
                                     &rr_bitsize, &rr_bitpos, &rr_mode,
                                     &rr_unsignedp, &rr_reversep, &volatilep,
                                     &rr_mask, &rr_and_mask, &r_xor, 0,
-                                    &rr_load);
+                                    &rr_load, rr_loc);
 
   /* It must be true that the inner operation on the lhs of each
      comparison must be the same if we are to be able to do anything.
@@ -8245,7 +8272,7 @@ fold_truth_andor_maybe_separate (location_t loc,
        {
          bitpos[1][0] = rnbitpos;
          bitsiz[1][0] = rnbitsize;
-         ld_arg[1][0] = make_bit_field_load (loc, lr_inner, lr_arg,
+         ld_arg[1][0] = make_bit_field_load (ll_loc[3], lr_inner, lr_arg,
                                              rntype, rnbitsize, rnbitpos,
                                              lr_unsignedp || rr_unsignedp,
                                              lr_reversep, lr_load);
@@ -8259,7 +8286,7 @@ fold_truth_andor_maybe_separate (location_t loc,
              point[0] = lr_load;
              point[1] = rr_load;
              build_split_load (ld_arg[1], bitpos[1], bitsiz[1], toshift[1],
-                               shifted[1], loc, lr_inner, lr_arg,
+                               shifted[1], rl_loc[3], lr_inner, lr_arg,
                                rnmode, rnmode2, rnbitpos, lr_reversep, point);
            }
          else
@@ -8320,7 +8347,7 @@ fold_truth_andor_maybe_separate (location_t loc,
     {
       bitpos[0][0] = lnbitpos;
       bitsiz[0][0] = lnbitsize;
-      ld_arg[0][0] = make_bit_field_load (loc, ll_inner, ll_arg,
+      ld_arg[0][0] = make_bit_field_load (ll_loc[3], ll_inner, ll_arg,
                                          lntype, lnbitsize, lnbitpos,
                                          ll_unsignedp || rl_unsignedp,
                                          ll_reversep, ll_load);
@@ -8334,7 +8361,7 @@ fold_truth_andor_maybe_separate (location_t loc,
              point[0] = ll_load;
              point[1] = rl_load;
              build_split_load (ld_arg[0], bitpos[0], bitsiz[0], toshift[0],
-                               shifted[0], loc, ll_inner, ll_arg,
+                               shifted[0], rl_loc[3], ll_inner, ll_arg,
                                lnmode, lnmode2, lnbitpos, ll_reversep, point);
            }
       else
@@ -8348,6 +8375,7 @@ fold_truth_andor_maybe_separate (location_t loc,
     {
       tree op[2] = { ld_arg[0][i], ld_arg[1][i] };
       wide_int mask[2] = { ll_mask, lr_mask };
+      location_t *locs[2] = { i ? rl_loc : ll_loc, i ? rr_loc : lr_loc };
 
       for (int j = 0; j < 2; j++)
        {
@@ -8380,7 +8408,7 @@ fold_truth_andor_maybe_separate (location_t loc,
            }
 
          tree shiftsz = bitsize_int (shift);
-         op[j] = fold_build2_loc (loc, RSHIFT_EXPR, TREE_TYPE (op[j]),
+         op[j] = fold_build2_loc (locs[j][1], RSHIFT_EXPR, TREE_TYPE (op[j]),
                                   op[j], shiftsz);
          mask[j] = wi::lrshift (mask[j], shift);
        }
@@ -8394,16 +8422,17 @@ fold_truth_andor_maybe_separate (location_t loc,
                   < TYPE_PRECISION (TREE_TYPE (op[1])));
          if (!j)
            type = TREE_TYPE (op[1]);
-         op[j] = fold_convert_loc (loc, type, op[j]);
+         op[j] = fold_convert_loc (locs[j][0], type, op[j]);
          mask[j] = wide_int::from (mask[j], TYPE_PRECISION (type), UNSIGNED);
        }
 
       for (int j = 0; j < 2; j++)
        if (mask[j] != wi::mask (0, true, mask[j].get_precision ()))
-         op[j] = build2_loc (loc, BIT_AND_EXPR, type,
+         op[j] = build2_loc (locs[j][2], BIT_AND_EXPR, type,
                              op[j], wide_int_to_tree (type, mask[j]));
 
-      cmp[i] = build2_loc (loc, wanted_code, truth_type, op[0], op[1]);
+      cmp[i] = build2_loc (i ? rloc : lloc, wanted_code, truth_type,
+                          op[0], op[1]);
     }
 
   if (first1)
@@ -8412,7 +8441,7 @@ fold_truth_andor_maybe_separate (location_t loc,
   if (parts == 1)
     result = cmp[0];
   else if (!separatep || !maybe_separate)
-    result = build2_loc (loc, orig_code, truth_type, cmp[0], cmp[1]);
+    result = build2_loc (rloc, orig_code, truth_type, cmp[0], cmp[1]);
   else
     {
       result = cmp[0];
diff --git a/gcc/tree-ssa-ifcombine.cc b/gcc/tree-ssa-ifcombine.cc
index 6cb2edeb66eb..e87c82a6f011 100644
--- a/gcc/tree-ssa-ifcombine.cc
+++ b/gcc/tree-ssa-ifcombine.cc
@@ -973,12 +973,13 @@ ifcombine_ifandif (basic_block inner_cond_bb, bool 
inner_inv,
                                            gimple_cond_lhs (outer_cond),
                                            gimple_cond_rhs (outer_cond),
                                            gimple_bb (outer_cond)))
-         && !(t = (fold_truth_andor_maybe_separate
-                   (UNKNOWN_LOCATION, TRUTH_ANDIF_EXPR,
-                    boolean_type_node,
+         && !(t = (fold_truth_andor_for_ifcombine
+                   (TRUTH_ANDIF_EXPR, boolean_type_node,
+                    gimple_location (outer_cond),
                     outer_cond_code,
                     gimple_cond_lhs (outer_cond),
                     gimple_cond_rhs (outer_cond),
+                    gimple_location (inner_cond),
                     inner_cond_code,
                     gimple_cond_lhs (inner_cond),
                     gimple_cond_rhs (inner_cond),

Reply via email to