On Sun, Dec 8, 2024 at 11:17 AM Alexandre Oliva <ol...@adacore.com> wrote:
>
> Here's yet another incremental patchlet, with the changes made in
> response to your review to v2.
>
> I'll post v3 containing it momentarily, but this incremental is supposed
> to make it easier to review.
>
> diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
> index 0d255561b1bdc..be7916e957d66 100644
> --- a/gcc/c-family/c.opt
> +++ b/gcc/c-family/c.opt
> @@ -1477,10 +1477,6 @@ Wtemplates
>  C++ ObjC++ Var(warn_templates) Warning
>  Warn on primary template declaration.
>
> -Wtautological-compare
> -C ObjC C++ ObjC++ Var(warn_tautological_compare) Warning LangEnabledBy(C 
> ObjC C++ ObjC++,Wall)
> -Warn if a comparison always evaluates to true or false.
> -
>  Wtemplate-body
>  C++ ObjC++ Var(warn_template_body) Warning Init(1)
>  Diagnose errors when parsing a template.
> diff --git a/gcc/common.opt b/gcc/common.opt
> index bb226ac61e6a1..915ce5bffb4e0 100644
> --- a/gcc/common.opt
> +++ b/gcc/common.opt
> @@ -812,6 +812,10 @@ Wsystem-headers
>  Common Var(warn_system_headers) Warning
>  Do not suppress warnings from system headers.
>
> +Wtautological-compare
> +Common Var(warn_tautological_compare) Warning LangEnabledBy(C ObjC C++ 
> ObjC++,Wall)
> +Warn if a comparison always evaluates to true or false.
> +
>  Wtrampolines
>  Common Var(warn_trampolines) Warning
>  Warn whenever a trampoline is generated.
> diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
> index 2c33cdfb1b29a..3ba4307769dd8 100644
> --- a/gcc/gimple-fold.cc
> +++ b/gcc/gimple-fold.cc
> @@ -7439,6 +7439,53 @@ maybe_fold_comparisons_from_match_pd (tree type, enum 
> tree_code code,
>    return NULL_TREE;
>  }
>
> +/* Return TRUE and set op[0] if T, following all SSA edges, is a type
> +   conversion.  */
> +

Like below, gimple_convert_def_p ().

> +static bool
> +gimple_fold_follow_convert (tree t, tree op[1])
> +{
> +  if (TREE_CODE (t) == SSA_NAME
> +      && !SSA_NAME_IS_DEFAULT_DEF (t))
> +    if (gassign *def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (t)))
> +      switch (gimple_assign_rhs_code (def))
> +       {
> +       CASE_CONVERT:
> +         op[0] = gimple_assign_rhs1 (def);
> +         return true;
> +       case VIEW_CONVERT_EXPR:
> +         op[0] = TREE_OPERAND (gimple_assign_rhs1 (def), 0);
> +         return true;
> +       default:
> +         break;
> +       }
> +  return false;
> +}
> +
> +/* Return TRUE and set op[*] if T, following all SSA edges, resolves to a
> +   binary expression with code CODE.  */
> +
> +static bool
> +gimple_fold_binop_cst (enum tree_code code, tree t, tree op[2])

Since it doesn't actually fold - can you name it

static bool
gimple_binop_def_p (...)

and ...

> +{
> +  if (TREE_CODE (t) == SSA_NAME
> +      && !SSA_NAME_IS_DEFAULT_DEF (t))
> +    if (gimple *def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (t)))
> +      if (gimple_assign_rhs_code (def) == code)
> +       {
> +         tree op0 = gimple_assign_rhs1 (def);
> +         tree op1 = gimple_assign_rhs2 (def);
> +         if (tree_swap_operands_p (op0, op1))
> +           std::swap (op0, op1);

All stmts are canonical, you shouldn't need to swap operands here.

> +         if (uniform_integer_cst_p (op1))

... put this in the caller so it could be made more universal eventually?

Otherwise the incremental patch looks OK.

Richard.

> +           {
> +             op[0] = op0;
> +             op[1] = op1;
> +             return true;
> +           }
> +       }
> +  return false;
> +}
>  /* Subroutine for fold_truth_andor_1: decode a field reference.
>
>     If *PEXP is a comparison reference, we return the innermost reference.
> @@ -7478,12 +7525,6 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>                         wide_int *pand_mask, bool *xor_p, tree *xor_cmp_op,
>                         gimple **load, location_t loc[4])
>  {
> -  /* These are from match.pd.  */
> -  extern bool gimple_any_convert (tree, tree *, tree (*)(tree));
> -  extern bool gimple_bit_and_cst (tree, tree *, tree (*)(tree));
> -  extern bool gimple_bit_xor_cst (tree, tree *, tree (*)(tree));
> -  extern bool gimple_rshift_cst (tree, tree *, tree (*)(tree));
> -
>    tree exp = *pexp;
>    tree outer_type = 0;
>    wide_int and_mask;
> @@ -7504,7 +7545,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>       narrowing then widening casts, or vice-versa, for those that are not
>       essential for the compare have already been optimized out at this
>       point.  */
> -  if (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
> +  if (gimple_fold_follow_convert (exp, res_ops))
>      {
>        if (!outer_type)
>         {
> @@ -7515,7 +7556,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>      }
>
>    /* Recognize and save a masking operation.  */
> -  if (pand_mask && gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges))
> +  if (pand_mask && gimple_fold_binop_cst (BIT_AND_EXPR, exp, res_ops))
>      {
>        loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
>        exp = res_ops[0];
> @@ -7523,7 +7564,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>      }
>
>    /* Turn (a ^ b) [!]= 0 into a [!]= b.  */
> -  if (xor_p && gimple_bit_xor_cst (exp, res_ops, follow_all_ssa_edges))
> +  if (xor_p && gimple_fold_binop_cst (BIT_XOR_EXPR, exp, res_ops))
>      {
>        /* No location recorded for this one, it's entirely subsumed by the
>          compare.  */
> @@ -7545,7 +7586,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>      }
>
>    /* Another chance to drop conversions.  */
> -  if (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
> +  if (gimple_fold_follow_convert (exp, res_ops))
>      {
>        if (!outer_type)
>         {
> @@ -7556,17 +7597,19 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>      }
>
>    /* Take note of shifts.  */
> -  if (gimple_rshift_cst (exp, res_ops, follow_all_ssa_edges))
> +  if (gimple_fold_binop_cst (RSHIFT_EXPR, exp, res_ops))
>      {
>        loc[2] = gimple_location (SSA_NAME_DEF_STMT (exp));
>        exp = res_ops[0];
> +      if (!tree_fits_shwi_p (res_ops[1]))
> +       return NULL_TREE;
>        shiftrt = tree_to_shwi (res_ops[1]);
>        if (shiftrt <= 0)
>         return NULL_TREE;
>      }
>
>    /* Yet another chance to drop conversions.  */
> -  if (gimple_any_convert (exp, res_ops, follow_all_ssa_edges))
> +  if (gimple_fold_follow_convert (exp, res_ops))
>      {
>        if (!outer_type)
>         {
> @@ -7712,17 +7755,26 @@ make_bit_field_load (location_t loc, tree inner, tree 
> orig_inner, tree type,
>    if (!point)
>      return ref;
>
> -  gimple_stmt_iterator gsi = gsi_for_stmt (point);
> -  tree ret = force_gimple_operand_gsi (&gsi, ref,
> -                                      true, NULL_TREE,
> -                                      true, GSI_NEW_STMT);
> +  gimple_seq stmts = NULL;
> +  tree ret = force_gimple_operand (ref, &stmts, true, NULL_TREE);
> +
>    /* We know the vuse is supposed to end up being the same as that at the
>       original load at the insertion point, but if we don't set it, it will 
> be a
>       generic placeholder that only the global SSA update at the end of the 
> pass
>       would make equal, too late for us to use in further combinations.  So go
>       ahead and copy the vuse.  */
> -  use_operand_p use_p = gimple_vuse_op (gsi_stmt (gsi));
> -  SET_USE (use_p, gimple_vuse (point));
> +
> +  tree reaching_vuse = gimple_vuse (point);
> +  for (gimple_stmt_iterator i = gsi_start (stmts);
> +       !gsi_end_p (i); gsi_next (&i))
> +    {
> +      gimple *new_stmt = gsi_stmt (i);
> +      if (gimple_has_mem_ops (new_stmt))
> +       gimple_set_vuse (new_stmt, reaching_vuse);
> +    }
> +
> +  gimple_stmt_iterator gsi = gsi_for_stmt (point);
> +  gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
>    return ret;
>  }
>
> @@ -7748,12 +7800,18 @@ build_split_load (tree /* out */ ln_arg[2],
>                   HOST_WIDE_INT bit_pos, bool reversep,
>                   gimple *point[2])
>  {
> +  scalar_int_mode modes[2] = { mode, mode2 };
>    bitsiz[0] = GET_MODE_BITSIZE (mode);
>    bitsiz[1] = GET_MODE_BITSIZE (mode2);
>
>    for (int i = 0; i < 2; i++)
>      {
> -      tree type = lang_hooks.types.type_for_size (bitsiz[i], 1);
> +      tree type = lang_hooks.types.type_for_mode (modes[i], 1);
> +      if (!type)
> +       {
> +         type = build_nonstandard_integer_type (bitsiz[0], 1);
> +         gcc_assert (type);
> +       }
>        bitpos[i] = bit_pos;
>        ln_arg[i] = make_bit_field_load (loc, inner, orig_inner,
>                                        type, bitsiz[i],
> @@ -7921,14 +7979,12 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>       If one operand is a BIT_AND_EXPR with the constant one, treat it as if
>       it were surrounded with a NE_EXPR.  */
>
> -  if (TREE_SIDE_EFFECTS (ll_arg) || TREE_SIDE_EFFECTS (lr_arg)
> -      || TREE_SIDE_EFFECTS (rl_arg) || TREE_SIDE_EFFECTS (rr_arg))
> -    return 0;
> -
>    if (TREE_CODE_CLASS (lcode) != tcc_comparison
>        || TREE_CODE_CLASS (rcode) != tcc_comparison)
>      return 0;
>
> +  /* We don't normally find TRUTH_*IF_EXPR in gimple, but these codes may be
> +     given by our caller to denote conditions from different blocks.  */
>    switch (code)
>      {
>      case TRUTH_AND_EXPR:
> @@ -8196,12 +8252,9 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>       make it wide enough to hold both.  */
>    if (l_split_load)
>      lnbitsize += GET_MODE_BITSIZE (lnmode2);
> -  lntype = lang_hooks.types.type_for_size (lnbitsize, 1);
> +  lntype = build_nonstandard_integer_type (lnbitsize, 1);
>    if (!lntype)
> -    {
> -      gcc_checking_assert (l_split_load);
> -      lntype = build_nonstandard_integer_type (lnbitsize, 1);
> -    }
> +    return NULL_TREE;
>    lnprec = TYPE_PRECISION (lntype);
>    xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos;
>
> @@ -8237,7 +8290,8 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>        l_const <<= xll_bitpos;
>        if ((l_const & ~ll_mask) != 0)
>         {
> -         warning (0, "comparison is always %d", wanted_code == NE_EXPR);
> +         warning_at (lloc, OPT_Wtautological_compare,
> +                     "comparison is always %d", wanted_code == NE_EXPR);
>
>           return constant_boolean_node (wanted_code == NE_EXPR, truth_type);
>         }
> @@ -8252,7 +8306,8 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>        r_const <<= xrl_bitpos;
>        if ((r_const & ~rl_mask) != 0)
>         {
> -         warning (0, "comparison is always %d", wanted_code == NE_EXPR);
> +         warning_at (rloc, OPT_Wtautological_compare,
> +                     "comparison is always %d", wanted_code == NE_EXPR);
>
>           return constant_boolean_node (wanted_code == NE_EXPR, truth_type);
>         }
> @@ -8355,12 +8410,9 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>        rnbitpos = first_bit & ~ (rnbitsize - 1);
>        if (r_split_load)
>         rnbitsize += GET_MODE_BITSIZE (rnmode2);
> -      rntype = lang_hooks.types.type_for_size (rnbitsize, 1);
> +      rntype = build_nonstandard_integer_type (rnbitsize, 1);
>        if (!rntype)
> -       {
> -         gcc_checking_assert (r_split_load);
> -         rntype = build_nonstandard_integer_type (rnbitsize, 1);
> -       }
> +       return  0;
>        rnprec = TYPE_PRECISION (rntype);
>        xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos;
>
> diff --git a/gcc/match.pd b/gcc/match.pd
> index 09569d1926458..2dd67b69cf1f5 100644
> --- a/gcc/match.pd
> +++ b/gcc/match.pd
> @@ -201,17 +201,6 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
>  (match (maybe_bit_not @0)
>   (bit_xor_cst@0 @1 @2))
>
> -/* These are used by ifcombine fold_truth_andor.  */
> -(match (any_convert @0)
> - (convert @0))
> -(match (any_convert @0)
> - (view_convert @0))
> -(match (bit_and_cst @0 @1)
> - (bit_and @0 uniform_integer_cst_p@1))
> -(match (rshift_cst @0 @1)
> - (rshift @0 uniform_integer_cst_p@1)
> - (if (tree_fits_shwi_p (@1))))
> -
>  #if GIMPLE
>  (match (maybe_truncate @0)
>   (convert @0)
>
>
> --
> Alexandre Oliva, happy hacker            https://FSFLA.org/blogs/lxo/
>    Free Software Activist                   GNU Toolchain Engineer
> More tolerance and less prejudice are key for inclusion and diversity
> Excluding neuro-others for not behaving ""normal"" is *not* inclusive

Reply via email to