On Tue, Jan 14, 2025 at 6:05 AM Alexandre Oliva <ol...@adacore.com> wrote:
>
>
> Arrange for decode_field_reference to use local variables throughout,
> to modify the out parms only when we're about to return non-NULL, and
> to drop the unused case of NULL pand_mask, that had a latent failure
> to detect signbit masking.
>
> Regstrapped on x86_64-linux-gnu along with the PR118456 patch.
> Ok to install?

OK.

Thanks,
Richard.

>
> for  gcc/ChangeLog
>
>         * gimple-fold.cc (decode_field_reference): Rebustify to set
>         out parms only when returning non-NULL.
>         (fold_truth_andor_for_ifcombine): Bail if
>         decode_field_reference returns NULL.  Add complementary assert
>         on r_const's not being set when l_const isn't.
> ---
>  gcc/gimple-fold.cc |  155 
> +++++++++++++++++++++++++++-------------------------
>  1 file changed, 80 insertions(+), 75 deletions(-)
>
> diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
> index 5b1fbe6db1df3..3c971a29ef045 100644
> --- a/gcc/gimple-fold.cc
> +++ b/gcc/gimple-fold.cc
> @@ -7510,18 +7510,17 @@ gimple_binop_def_p (enum tree_code code, tree t, tree 
> op[2])
>     *PREVERSEP is set to the storage order of the field.
>
>     *PAND_MASK is set to the mask found in a BIT_AND_EXPR, if any.  If
> -   PAND_MASK *is NULL, BIT_AND_EXPR is not recognized.  If *PAND_MASK
> -   is initially set to a mask with nonzero precision, that mask is
> +   *PAND_MASK is initially set to a mask with nonzero precision, that mask is
>     combined with the found mask, or adjusted in precision to match.
>
>     *PSIGNBIT is set to TRUE if, before clipping to *PBITSIZE, the mask
>     encompassed bits that corresponded to extensions of the sign bit.
>
> -   *XOR_P is to be FALSE if EXP might be a XOR used in a compare, in which
> -   case, if XOR_CMP_OP is a zero constant, it will be overridden with *PEXP,
> -   *XOR_P will be set to TRUE, *XOR_PAND_MASK will be copied from *PAND_MASK,
> -   and the left-hand operand of the XOR will be decoded.  If *XOR_P is TRUE,
> -   XOR_CMP_OP and XOR_PAND_MASK are supposed to be NULL, and then the
> +   *PXORP is to be FALSE if EXP might be a XOR used in a compare, in which
> +   case, if PXOR_CMP_OP is a zero constant, it will be overridden with *PEXP,
> +   *PXORP will be set to TRUE, *PXOR_AND_MASK will be copied from *PAND_MASK,
> +   and the left-hand operand of the XOR will be decoded.  If *PXORP is TRUE,
> +   PXOR_CMP_OP and PXOR_AND_MASK are supposed to be NULL, and then the
>     right-hand operand of the XOR will be decoded.
>
>     *LOAD is set to the load stmt of the innermost reference, if any,
> @@ -7538,8 +7537,8 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>                         HOST_WIDE_INT *pbitpos,
>                         bool *punsignedp, bool *preversep, bool *pvolatilep,
>                         wide_int *pand_mask, bool *psignbit,
> -                       bool *xor_p, tree *xor_cmp_op, wide_int 
> *xor_pand_mask,
> -                       gimple **load, location_t loc[4])
> +                       bool *pxorp, tree *pxor_cmp_op, wide_int 
> *pxor_and_mask,
> +                       gimple **pload, location_t loc[4])
>  {
>    tree exp = *pexp;
>    tree outer_type = 0;
> @@ -7549,9 +7548,11 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>    tree res_ops[2];
>    machine_mode mode;
>    bool convert_before_shift = false;
> -
> -  *load = NULL;
> -  *psignbit = false;
> +  bool signbit = false;
> +  bool xorp = false;
> +  tree xor_cmp_op;
> +  wide_int xor_and_mask;
> +  gimple *load = NULL;
>
>    /* All the optimizations using this function assume integer fields.
>       There are problems with FP fields since the type_for_size call
> @@ -7576,7 +7577,7 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>
>    /* Recognize and save a masking operation.  Combine it with an
>       incoming mask.  */
> -  if (pand_mask && gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
> +  if (gimple_binop_def_p (BIT_AND_EXPR, exp, res_ops)
>        && TREE_CODE (res_ops[1]) == INTEGER_CST)
>      {
>        loc[1] = gimple_location (SSA_NAME_DEF_STMT (exp));
> @@ -7596,29 +7597,29 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>             and_mask &= wide_int::from (*pand_mask, prec_op, UNSIGNED);
>         }
>      }
> -  else if (pand_mask)
> +  else
>      and_mask = *pand_mask;
>
>    /* Turn (a ^ b) [!]= 0 into a [!]= b.  */
> -  if (xor_p && gimple_binop_def_p (BIT_XOR_EXPR, exp, res_ops))
> +  if (pxorp && gimple_binop_def_p (BIT_XOR_EXPR, exp, res_ops))
>      {
>        /* No location recorded for this one, it's entirely subsumed by the
>          compare.  */
> -      if (*xor_p)
> +      if (*pxorp)
>         {
>           exp = res_ops[1];
> -         gcc_checking_assert (!xor_cmp_op && !xor_pand_mask);
> +         gcc_checking_assert (!pxor_cmp_op && !pxor_and_mask);
>         }
> -      else if (!xor_cmp_op)
> +      else if (!pxor_cmp_op)
>         /* Not much we can do when xor appears in the right-hand compare
>            operand.  */
>         return NULL_TREE;
> -      else if (integer_zerop (*xor_cmp_op))
> +      else if (integer_zerop (*pxor_cmp_op))
>         {
> -         *xor_p = true;
> +         xorp = true;
>           exp = res_ops[0];
> -         *xor_cmp_op = *pexp;
> -         *xor_pand_mask = *pand_mask;
> +         xor_cmp_op = *pexp;
> +         xor_and_mask = *pand_mask;
>         }
>      }
>
> @@ -7646,12 +7647,12 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>    /* Yet another chance to drop conversions.  This one is allowed to
>       match a converting load, subsuming the load identification block
>       below.  */
> -  if (!outer_type && gimple_convert_def_p (exp, res_ops, load))
> +  if (!outer_type && gimple_convert_def_p (exp, res_ops, &load))
>      {
>        outer_type = TREE_TYPE (exp);
>        loc[0] = gimple_location (SSA_NAME_DEF_STMT (exp));
> -      if (*load)
> -       loc[3] = gimple_location (*load);
> +      if (load)
> +       loc[3] = gimple_location (load);
>        exp = res_ops[0];
>        /* This looks backwards, but we're going back the def chain, so if we
>          find the conversion here, after finding a shift, that's because the
> @@ -7662,14 +7663,13 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>      }
>
>    /* Identify the load, if there is one.  */
> -  if (!(*load) && TREE_CODE (exp) == SSA_NAME
> -      && !SSA_NAME_IS_DEFAULT_DEF (exp))
> +  if (!load && TREE_CODE (exp) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (exp))
>      {
>        gimple *def = SSA_NAME_DEF_STMT (exp);
>        if (gimple_assign_load_p (def))
>         {
>           loc[3] = gimple_location (def);
> -         *load = def;
> +         load = def;
>           exp = gimple_assign_rhs1 (def);
>         }
>      }
> @@ -7694,20 +7694,14 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>           && !type_has_mode_precision_p (TREE_TYPE (inner))))
>      return NULL_TREE;
>
> -  *pbitsize = bs;
> -  *pbitpos = bp;
> -  *punsignedp = unsignedp;
> -  *preversep = reversep;
> -  *pvolatilep = volatilep;
> -
>    /* Adjust shifts...  */
>    if (convert_before_shift
> -      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
> +      && outer_type && bs > TYPE_PRECISION (outer_type))
>      {
> -      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
> -      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
> -       *pbitpos += excess;
> -      *pbitsize -= excess;
> +      HOST_WIDE_INT excess = bs - TYPE_PRECISION (outer_type);
> +      if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
> +       bp += excess;
> +      bs -= excess;
>      }
>
>    if (shiftrt)
> @@ -7720,49 +7714,57 @@ decode_field_reference (tree *pexp, HOST_WIDE_INT 
> *pbitsize,
>          shift after a change of signedness would make the extension
>          non-uniform, and we can't deal with that (yet ???).  See
>          gcc.dg/field-merge-22.c for a test that would go wrong.  */
> -      if (*pbitsize <= shiftrt
> +      if (bs <= shiftrt
>           || (convert_before_shift
>               && outer_type && unsignedp != TYPE_UNSIGNED (outer_type)))
>         return NULL_TREE;
> -      if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
> -       *pbitpos += shiftrt;
> -      *pbitsize -= shiftrt;
> +      if (!reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
> +       bp += shiftrt;
> +      bs -= shiftrt;
>      }
>
>    /* ... and bit position.  */
>    if (!convert_before_shift
> -      && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
> +      && outer_type && bs > TYPE_PRECISION (outer_type))
>      {
> -      HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
> -      if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
> -       *pbitpos += excess;
> -      *pbitsize -= excess;
> +      HOST_WIDE_INT excess = bs - TYPE_PRECISION (outer_type);
> +      if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
> +       bp += excess;
> +      bs -= excess;
>      }
>
> -  *pexp = exp;
> -
>    /* If the number of bits in the reference is the same as the bitsize of
>       the outer type, then the outer type gives the signedness. Otherwise
>       (in case of a small bitfield) the signedness is unchanged.  */
> -  if (outer_type && *pbitsize == TYPE_PRECISION (outer_type))
> -    *punsignedp = TYPE_UNSIGNED (outer_type);
> +  if (outer_type && bs == TYPE_PRECISION (outer_type))
> +    unsignedp = TYPE_UNSIGNED (outer_type);
>
> -  if (pand_mask)
> +  /* Make the mask the expected width.  */
> +  if (and_mask.get_precision () != 0)
>      {
> -      /* Make the mask the expected width.  */
> -      if (and_mask.get_precision () != 0)
> -       {
> -         /* If the AND_MASK encompasses bits that would be extensions of
> -            the sign bit, set *PSIGNBIT.  */
> -         if (!unsignedp
> -             && and_mask.get_precision () > *pbitsize
> -             && (and_mask
> -                 & wi::mask (*pbitsize, true, and_mask.get_precision ())) != 
> 0)
> -           *psignbit = true;
> -         and_mask = wide_int::from (and_mask, *pbitsize, UNSIGNED);
> -       }
> +      /* If the AND_MASK encompasses bits that would be extensions of
> +        the sign bit, set SIGNBIT.  */
> +      if (!unsignedp
> +         && and_mask.get_precision () > bs
> +         && (and_mask & wi::mask (bs, true, and_mask.get_precision ())) != 0)
> +       signbit = true;
> +      and_mask = wide_int::from (and_mask, bs, UNSIGNED);
> +    }
>
> -      *pand_mask = and_mask;
> +  *pexp = exp;
> +  *pload = load;
> +  *pbitsize = bs;
> +  *pbitpos = bp;
> +  *punsignedp = unsignedp;
> +  *preversep = reversep;
> +  *pvolatilep = volatilep;
> +  *psignbit = signbit;
> +  *pand_mask = and_mask;
> +  if (xorp)
> +    {
> +      *pxorp = xorp;
> +      *pxor_cmp_op = xor_cmp_op;
> +      *pxor_and_mask = xor_and_mask;
>      }
>
>    return inner;
> @@ -8168,19 +8170,27 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>                                      &ll_and_mask, &ll_signbit,
>                                      &l_xor, &lr_arg, &lr_and_mask,
>                                      &ll_load, ll_loc);
> +  if (!ll_inner)
> +    return 0;
>    lr_inner = decode_field_reference (&lr_arg, &lr_bitsize, &lr_bitpos,
>                                      &lr_unsignedp, &lr_reversep, &volatilep,
>                                      &lr_and_mask, &lr_signbit, &l_xor, 0, 0,
>                                      &lr_load, lr_loc);
> +  if (!lr_inner)
> +    return 0;
>    rl_inner = decode_field_reference (&rl_arg, &rl_bitsize, &rl_bitpos,
>                                      &rl_unsignedp, &rl_reversep, &volatilep,
>                                      &rl_and_mask, &rl_signbit,
>                                      &r_xor, &rr_arg, &rr_and_mask,
>                                      &rl_load, rl_loc);
> +  if (!rl_inner)
> +    return 0;
>    rr_inner = decode_field_reference (&rr_arg, &rr_bitsize, &rr_bitpos,
>                                      &rr_unsignedp, &rr_reversep, &volatilep,
>                                      &rr_and_mask, &rr_signbit, &r_xor, 0, 0,
>                                      &rr_load, rr_loc);
> +  if (!rr_inner)
> +    return 0;
>
>    /* It must be true that the inner operation on the lhs of each
>       comparison must be the same if we are to be able to do anything.
> @@ -8188,16 +8198,13 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>       the rhs's.  If one is a load and the other isn't, we have to be
>       conservative and avoid the optimization, otherwise we could get
>       SRAed fields wrong.  */
> -  if (volatilep
> -      || ll_reversep != rl_reversep
> -      || ll_inner == 0 || rl_inner == 0)
> +  if (volatilep || ll_reversep != rl_reversep)
>      return 0;
>
>    if (! operand_equal_p (ll_inner, rl_inner, 0))
>      {
>        /* Try swapping the operands.  */
>        if (ll_reversep != rr_reversep
> -         || !rr_inner
>           || !operand_equal_p (ll_inner, rr_inner, 0))
>         return 0;
>
> @@ -8266,7 +8273,6 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>        lr_reversep = ll_reversep;
>      }
>    else if (lr_reversep != rr_reversep
> -          || lr_inner == 0 || rr_inner == 0
>            || ! operand_equal_p (lr_inner, rr_inner, 0)
>            || ((lr_load && rr_load)
>                ? gimple_vuse (lr_load) != gimple_vuse (rr_load)
> @@ -8520,6 +8526,9 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>    else
>      rl_mask = wi::shifted_mask (xrl_bitpos, rl_bitsize, false, lnprec);
>
> +  /* When we set l_const, we also set r_const.  */
> +  gcc_checking_assert (!l_const.get_precision () == !r_const.get_precision 
> ());
> +
>    /* Adjust right-hand constants in both original comparisons to match width
>       and bit position.  */
>    if (l_const.get_precision ())
> @@ -8550,10 +8559,6 @@ fold_truth_andor_for_ifcombine (enum tree_code code, 
> tree truth_type,
>           return constant_boolean_node (wanted_code == NE_EXPR, truth_type);
>         }
>
> -      /* When we set l_const, we also set r_const, so we need not test it
> -        again.  */
> -      gcc_checking_assert (r_const.get_precision ());
> -
>        /* Before clipping upper bits of the right-hand operand of the compare,
>          check that they're sign or zero extensions, depending on how the
>          left-hand operand would be extended.  */
>
>
> --
> Alexandre Oliva, happy hacker            https://FSFLA.org/blogs/lxo/
>    Free Software Activist                   GNU Toolchain Engineer
> More tolerance and less prejudice are key for inclusion and diversity
> Excluding neuro-others for not behaving ""normal"" is *not* inclusive

Reply via email to