https://gcc.gnu.org/g:b92022845a5d1099702aa026f64095339c96bd22
commit b92022845a5d1099702aa026f64095339c96bd22 Author: Alexandre Oliva <ol...@gnu.org> Date: Thu Nov 21 22:37:00 2024 -0300 switch to wide_int for masks and constants Diff: --- gcc/gimple-fold.cc | 291 +++++++++++++++++++++++------------------------------ 1 file changed, 127 insertions(+), 164 deletions(-) diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc index b579f9789c9d..1c9bdd0452b3 100644 --- a/gcc/gimple-fold.cc +++ b/gcc/gimple-fold.cc @@ -7496,19 +7496,19 @@ follow_load (tree exp, gimple **load) do anything with. */ static tree -decode_field_reference (location_t loc, tree *exp_, HOST_WIDE_INT *pbitsize, +decode_field_reference (tree *exp_, HOST_WIDE_INT *pbitsize, HOST_WIDE_INT *pbitpos, machine_mode *pmode, - int *punsignedp, int *preversep, int *pvolatilep, - tree *pmask, tree *pand_mask, int xor_which, + bool *punsignedp, bool *preversep, bool *pvolatilep, + wide_int *pmask, wide_int *pand_mask, int xor_which, gimple **load) { tree exp = *exp_; tree outer_type = 0; - tree and_mask = 0; - tree mask, inner, offset; - tree unsigned_type; + wide_int and_mask; + tree inner, offset; unsigned int precision; - HOST_WIDE_INT shiftrt = 0; + int shiftrt = 0; + wide_int mask; *load = NULL; @@ -7527,7 +7527,7 @@ decode_field_reference (location_t loc, tree *exp_, HOST_WIDE_INT *pbitsize, if (gimple_bit_and_cst (exp, res_ops, follow_all_ssa_edges)) { exp = res_ops[0]; - and_mask = res_ops[1]; + and_mask = wi::to_wide (res_ops[1]); } if (xor_which) @@ -7564,21 +7564,29 @@ decode_field_reference (location_t loc, tree *exp_, HOST_WIDE_INT *pbitsize, exp = follow_load (exp, load); poly_int64 poly_bitsize, poly_bitpos; + int unsignedp, reversep = *preversep, volatilep = *pvolatilep; inner = get_inner_reference (exp, &poly_bitsize, &poly_bitpos, &offset, - pmode, punsignedp, preversep, pvolatilep); + pmode, &unsignedp, &reversep, &volatilep); - if ((inner == exp && and_mask == 0) - || !poly_bitsize.is_constant (pbitsize) - || !poly_bitpos.is_constant (pbitpos) - || *pbitsize <= shiftrt + HOST_WIDE_INT bs, bp; + if ((inner == exp && !and_mask.get_precision ()) + || !poly_bitsize.is_constant (&bs) + || !poly_bitpos.is_constant (&bp) + || bs <= shiftrt || offset != 0 || TREE_CODE (inner) == PLACEHOLDER_EXPR /* Reject out-of-bound accesses (PR79731). */ || (! AGGREGATE_TYPE_P (TREE_TYPE (inner)) && compare_tree_int (TYPE_SIZE (TREE_TYPE (inner)), - *pbitpos + *pbitsize) < 0)) + bp + bs) < 0)) return NULL_TREE; + *pbitsize = bs; + *pbitpos = bp; + *punsignedp = unsignedp; + *preversep = reversep; + *pvolatilep = volatilep; + if (shiftrt) { if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN) @@ -7594,10 +7602,6 @@ decode_field_reference (location_t loc, tree *exp_, HOST_WIDE_INT *pbitsize, *pbitsize -= excess; } - unsigned_type = lang_hooks.types.type_for_size (*pbitsize, 1); - if (unsigned_type == NULL_TREE) - return NULL_TREE; - *exp_ = exp; /* If the number of bits in the reference is the same as the bitsize of @@ -7607,64 +7611,18 @@ decode_field_reference (location_t loc, tree *exp_, HOST_WIDE_INT *pbitsize, *punsignedp = TYPE_UNSIGNED (outer_type); /* Compute the mask to access the bitfield. */ - precision = TYPE_PRECISION (unsigned_type); + precision = *pbitsize; - mask = build_int_cst_type (unsigned_type, -1); - - mask = int_const_binop (LSHIFT_EXPR, mask, size_int (precision - *pbitsize)); - mask = int_const_binop (RSHIFT_EXPR, mask, size_int (precision - *pbitsize)); + mask = wi::mask (*pbitsize, false, precision); /* Merge it with the mask we found in the BIT_AND_EXPR, if any. */ - if (and_mask != 0) - mask = fold_build2_loc (loc, BIT_AND_EXPR, unsigned_type, - fold_convert_loc (loc, unsigned_type, and_mask), mask); + if (and_mask.get_precision () != 0) + mask &= wide_int::from (and_mask, precision, UNSIGNED); *pmask = mask; *pand_mask = and_mask; return inner; } - -/* Subroutine for fold_truth_andor_1: C is an INTEGER_CST interpreted as a P - bit value. Arrange things so the extra bits will be set to zero if and - only if C is signed-extended to its full width. If MASK is nonzero, - it is an INTEGER_CST that should be AND'ed with the extra bits. */ - -static tree -unextend (tree c, int p, int unsignedp, tree mask) -{ - tree type = TREE_TYPE (c); - int modesize = GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (type)); - tree temp; - - if (p == modesize || unsignedp) - return c; - - /* We work by getting just the sign bit into the low-order bit, then - into the high-order bit, then sign-extend. We then XOR that value - with C. */ - temp = build_int_cst (TREE_TYPE (c), - wi::extract_uhwi (wi::to_wide (c), p - 1, 1)); - - /* We must use a signed type in order to get an arithmetic right shift. - However, we must also avoid introducing accidental overflows, so that - a subsequent call to integer_zerop will work. Hence we must - do the type conversion here. At this point, the constant is either - zero or one, and the conversion to a signed type can never overflow. - We could get an overflow if this conversion is done anywhere else. */ - if (TYPE_UNSIGNED (type)) - temp = fold_convert (signed_type_for (type), temp); - - temp = int_const_binop (LSHIFT_EXPR, temp, size_int (modesize - 1)); - temp = int_const_binop (RSHIFT_EXPR, temp, size_int (modesize - p - 1)); - if (mask != 0) - temp = int_const_binop (BIT_AND_EXPR, temp, - fold_convert (TREE_TYPE (c), mask)); - /* If necessary, convert the type back to match the type of C. */ - if (TYPE_UNSIGNED (type)) - temp = fold_convert (type, temp); - - return fold_convert (type, int_const_binop (BIT_XOR_EXPR, c, temp)); -} /* Return the one bitpos within bit extents L or R that is at an ALIGN-bit alignment boundary, or -1 if there is more than one such @@ -7718,7 +7676,7 @@ compute_split_boundary_from_align (HOST_WIDE_INT align, static tree make_bit_field_load (location_t loc, tree inner, tree orig_inner, tree type, HOST_WIDE_INT bitsize, poly_int64 bitpos, - int unsignedp, int reversep, gimple *point) + bool unsignedp, bool reversep, gimple *point) { tree ref = make_bit_field_ref (loc, unshare_expr (inner), unshare_expr (orig_inner), @@ -7728,9 +7686,17 @@ make_bit_field_load (location_t loc, tree inner, tree orig_inner, tree type, return ref; gimple_stmt_iterator gsi = gsi_for_stmt (point); - return force_gimple_operand_gsi (&gsi, ref, - true, NULL_TREE, - true, GSI_SAME_STMT); + tree ret = force_gimple_operand_gsi (&gsi, ref, + true, NULL_TREE, + true, GSI_NEW_STMT); + /* We know the vuse is supposed to end up being the same as that at the + original load at the insertion point, but if we don't set it, it will be a + generic placeholder that only the global SSA update at the end of the pass + would make equal, too late for us to use in further combinations. So go + ahead and copy the vuse. */ + use_operand_p use_p = gimple_vuse_op (gsi_stmt (gsi)); + SET_USE (use_p, gimple_vuse (point)); + return ret; } /* Initialize ln_arg[0] and ln_arg[1] to a pair of newly-created (at @@ -7798,30 +7764,28 @@ reuse_split_load (tree /* in[0] out[1] */ ln_arg[2], HOST_WIDE_INT /* in[0] out[1] */ bitsiz[2], HOST_WIDE_INT /* in[0] out[0..1] */ toshift[2], HOST_WIDE_INT /* out */ shifted[2], - tree /* out */ mask[2], + wide_int /* out */ mask[2], HOST_WIDE_INT boundary, bool reversep) { + unsigned prec = TYPE_PRECISION (TREE_TYPE (ln_arg[0])); + ln_arg[1] = ln_arg[0]; bitpos[1] = bitpos[0]; bitsiz[1] = bitsiz[0]; shifted[1] = shifted[0] = 0; - tree basemask = build_int_cst_type (TREE_TYPE (ln_arg[0]), -1); - if (reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN) { toshift[1] = toshift[0]; toshift[0] = bitpos[0] + bitsiz[0] - boundary; - mask[0] = int_const_binop (LSHIFT_EXPR, basemask, - bitsize_int (toshift[0])); - mask[1] = int_const_binop (BIT_XOR_EXPR, basemask, mask[0]); + mask[0] = wi::mask (toshift[0], true, prec); + mask[1] = wi::mask (toshift[0], false, prec); } else { toshift[1] = boundary - bitpos[1]; - mask[1] = int_const_binop (LSHIFT_EXPR, basemask, - bitsize_int (toshift[1])); - mask[0] = int_const_binop (BIT_XOR_EXPR, basemask, mask[1]); + mask[1] = wi::mask (toshift[1], true, prec); + mask[0] = wi::mask (toshift[1], false, prec); } } @@ -7905,17 +7869,18 @@ fold_truth_andor_maybe_separate (location_t loc, HOST_WIDE_INT ll_bitsize, ll_bitpos, lr_bitsize, lr_bitpos; HOST_WIDE_INT rl_bitsize, rl_bitpos, rr_bitsize, rr_bitpos; HOST_WIDE_INT xll_bitpos, xlr_bitpos, xrl_bitpos, xrr_bitpos; - HOST_WIDE_INT lnbitsize, lnbitpos, rnbitsize, rnbitpos; - int ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp; - int ll_reversep, lr_reversep, rl_reversep, rr_reversep; + HOST_WIDE_INT lnbitsize, lnbitpos, lnprec; + HOST_WIDE_INT rnbitsize, rnbitpos, rnprec; + bool ll_unsignedp, lr_unsignedp, rl_unsignedp, rr_unsignedp; + bool ll_reversep, lr_reversep, rl_reversep, rr_reversep; machine_mode ll_mode, lr_mode, rl_mode, rr_mode; scalar_int_mode lnmode, lnmode2, rnmode; - tree ll_mask, lr_mask, rl_mask, rr_mask; - tree ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask; - tree l_const, r_const; + wide_int ll_mask, lr_mask, rl_mask, rr_mask; + wide_int ll_and_mask, lr_and_mask, rl_and_mask, rr_and_mask; + wide_int l_const, r_const; tree lntype, rntype, result; HOST_WIDE_INT first_bit, end_bit; - int volatilep; + bool volatilep; bool l_split_load; gcc_checking_assert (!separatep || !*separatep); @@ -7976,23 +7941,23 @@ fold_truth_andor_maybe_separate (location_t loc, ll_reversep = lr_reversep = rl_reversep = rr_reversep = 0; volatilep = 0; int l_xor = prepare_xor (ll_arg, &lr_arg); - ll_inner = decode_field_reference (loc, &ll_arg, + ll_inner = decode_field_reference (&ll_arg, &ll_bitsize, &ll_bitpos, &ll_mode, &ll_unsignedp, &ll_reversep, &volatilep, &ll_mask, &ll_and_mask, l_xor, &ll_load); - lr_inner = decode_field_reference (loc, &lr_arg, + lr_inner = decode_field_reference (&lr_arg, &lr_bitsize, &lr_bitpos, &lr_mode, &lr_unsignedp, &lr_reversep, &volatilep, &lr_mask, &lr_and_mask, 2 * l_xor, &lr_load); int r_xor = prepare_xor (rl_arg, &rr_arg); - rl_inner = decode_field_reference (loc, &rl_arg, + rl_inner = decode_field_reference (&rl_arg, &rl_bitsize, &rl_bitpos, &rl_mode, &rl_unsignedp, &rl_reversep, &volatilep, &rl_mask, &rl_and_mask, r_xor, &rl_load); - rr_inner = decode_field_reference (loc, &rr_arg, + rr_inner = decode_field_reference (&rr_arg, &rr_bitsize, &rr_bitpos, &rr_mode, &rr_unsignedp, &rr_reversep, &volatilep, &rr_mask, &rr_and_mask, 2 * r_xor, @@ -8013,7 +7978,7 @@ fold_truth_andor_maybe_separate (location_t loc, if (TREE_CODE (lr_arg) == INTEGER_CST && TREE_CODE (rr_arg) == INTEGER_CST) { - l_const = lr_arg, r_const = rr_arg; + l_const = wi::to_wide (lr_arg), r_const = wi::to_wide (rr_arg); lr_reversep = ll_reversep; } else if (lr_reversep != rr_reversep @@ -8022,37 +7987,33 @@ fold_truth_andor_maybe_separate (location_t loc, || (lr_load && rr_load && gimple_vuse (lr_load) != gimple_vuse (rr_load))) return 0; - else - l_const = r_const = 0; if (lsignbit) { - tree mask = build_int_cst_type (TREE_TYPE (ll_arg), -1); - tree sign = int_const_binop (LSHIFT_EXPR, mask, - bitsize_int (ll_bitsize - 1)); - if (!ll_mask) + wide_int sign = wi::mask (ll_bitsize - 1, true, + TYPE_PRECISION (TREE_TYPE (ll_arg))); + if (!ll_mask.get_precision ()) ll_mask = sign; else - ll_mask = int_const_binop (BIT_AND_EXPR, ll_mask, sign); - if (!ll_and_mask) + ll_mask &= sign; + if (!ll_and_mask.get_precision ()) ll_and_mask = sign; else - ll_and_mask = int_const_binop (BIT_AND_EXPR, ll_and_mask, sign); + ll_and_mask &= sign; } if (rsignbit) { - tree mask = build_int_cst_type (TREE_TYPE (rl_arg), -1); - tree sign = int_const_binop (LSHIFT_EXPR, mask, - bitsize_int (rl_bitsize - 1)); - if (!rl_mask) + wide_int sign = wi::mask (rl_bitsize - 1, true, + TYPE_PRECISION (TREE_TYPE (rl_arg))); + if (!rl_mask.get_precision ()) rl_mask = sign; else - rl_mask = int_const_binop (BIT_AND_EXPR, rl_mask, sign); - if (!rl_and_mask) + rl_mask &= sign; + if (!rl_and_mask.get_precision ()) rl_and_mask = sign; else - rl_and_mask = int_const_binop (BIT_AND_EXPR, rl_and_mask, sign); + rl_and_mask &= sign; } /* If either comparison code is not correct for our logical operation, @@ -8062,7 +8023,9 @@ fold_truth_andor_maybe_separate (location_t loc, wanted_code = (code == TRUTH_AND_EXPR ? EQ_EXPR : NE_EXPR); if (lcode != wanted_code) { - if (l_const && integer_zerop (l_const) && integer_pow2p (ll_mask)) + if (l_const.get_precision () + && l_const == 0 + && wi::popcount (ll_mask) == 1) { /* Make the left operand unsigned, since we are only interested in the value of one bit. Otherwise we are doing the wrong @@ -8077,7 +8040,9 @@ fold_truth_andor_maybe_separate (location_t loc, /* This is analogous to the code for l_const above. */ if (rcode != wanted_code) { - if (r_const && integer_zerop (r_const) && integer_pow2p (rl_mask)) + if (r_const.get_precision () + && r_const == 0 + && wi::popcount (rl_mask) == 1) { rl_unsignedp = 1; r_const = rl_mask; @@ -8119,7 +8084,7 @@ fold_truth_andor_maybe_separate (location_t loc, /* An additional mask to be applied to LD_ARG, to remove any bits that may have been loaded for use in another compare, but that don't belong in the corresponding compare. */ - tree xmask[2][2] = {}; + wide_int xmask[2][2] = {}; /* The combined compare or compares. */ tree cmp[2]; @@ -8195,6 +8160,7 @@ fold_truth_andor_maybe_separate (location_t loc, gcc_checking_assert (l_split_load); lntype = build_nonstandard_integer_type (lnbitsize, 1); } + lnprec = TYPE_PRECISION (lntype); xll_bitpos = ll_bitpos - lnbitpos, xrl_bitpos = rl_bitpos - lnbitpos; if (ll_reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN) @@ -8203,39 +8169,41 @@ fold_truth_andor_maybe_separate (location_t loc, xrl_bitpos = lnbitsize - xrl_bitpos - rl_bitsize; } - ll_mask = int_const_binop (LSHIFT_EXPR, - fold_convert_loc (loc, lntype, ll_mask), - size_int (xll_bitpos)); - if (!ll_mask) - return 0; - rl_mask = int_const_binop (LSHIFT_EXPR, - fold_convert_loc (loc, lntype, rl_mask), - size_int (xrl_bitpos)); - if (!rl_mask) - return 0; + ll_mask = wi::lshift (wide_int::from (ll_mask, lnprec, UNSIGNED), + xll_bitpos); + rl_mask = wi::lshift (wide_int::from (rl_mask, lnprec, UNSIGNED), + xrl_bitpos); - if (l_const) + if (l_const.get_precision ()) { - l_const = fold_convert_loc (loc, lntype, l_const); - l_const = unextend (l_const, ll_bitsize, ll_unsignedp, ll_and_mask); - l_const = int_const_binop (LSHIFT_EXPR, l_const, size_int (xll_bitpos)); - if (! integer_zerop (int_const_binop (BIT_AND_EXPR, l_const, - fold_build1_loc (loc, BIT_NOT_EXPR, - lntype, ll_mask)))) + l_const = wide_int::from (l_const, lnprec, + TYPE_SIGN (TREE_TYPE (lr_arg))); + if (!TYPE_UNSIGNED (TREE_TYPE (lr_arg))) + { + l_const = wi::zext (l_const, TYPE_PRECISION (TREE_TYPE (lr_arg))); + if (ll_and_mask.get_precision ()) + l_const &= wide_int::from (ll_and_mask, lnprec, UNSIGNED); + } + l_const <<= xll_bitpos; + if ((l_const & ~ll_mask) != 0) { warning (0, "comparison is always %d", wanted_code == NE_EXPR); return constant_boolean_node (wanted_code == NE_EXPR, truth_type); } } - if (r_const) + if (r_const.get_precision ()) { - r_const = fold_convert_loc (loc, lntype, r_const); - r_const = unextend (r_const, rl_bitsize, rl_unsignedp, rl_and_mask); - r_const = int_const_binop (LSHIFT_EXPR, r_const, size_int (xrl_bitpos)); - if (! integer_zerop (int_const_binop (BIT_AND_EXPR, r_const, - fold_build1_loc (loc, BIT_NOT_EXPR, - lntype, rl_mask)))) + r_const = wide_int::from (r_const, lnprec, + TYPE_SIGN (TREE_TYPE (rr_arg))); + if (!TYPE_UNSIGNED (TREE_TYPE (rr_arg))) + { + r_const = wi::zext (r_const, TYPE_PRECISION (TREE_TYPE (rr_arg))); + if (rl_and_mask.get_precision ()) + r_const &= wide_int::from (rl_and_mask, lnprec, UNSIGNED); + } + r_const <<= xrl_bitpos; + if ((r_const & ~rl_mask) != 0) { warning (0, "comparison is always %d", wanted_code == NE_EXPR); @@ -8246,7 +8214,7 @@ fold_truth_andor_maybe_separate (location_t loc, /* If the right sides are not constant, do the same for it. Also, disallow this optimization if a size, signedness or storage order mismatch occurs between the left and right sides. */ - if (l_const == 0) + if (l_const.get_precision () == 0) { if (ll_bitsize != lr_bitsize || rl_bitsize != rr_bitsize || ll_unsignedp != lr_unsignedp || rl_unsignedp != rr_unsignedp @@ -8305,6 +8273,7 @@ fold_truth_andor_maybe_separate (location_t loc, gcc_checking_assert (r_split_load); rntype = build_nonstandard_integer_type (rnbitsize, 1); } + rnprec = TYPE_PRECISION (rntype); xlr_bitpos = lr_bitpos - rnbitpos, xrr_bitpos = rr_bitpos - rnbitpos; if (lr_reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN) @@ -8313,14 +8282,10 @@ fold_truth_andor_maybe_separate (location_t loc, xrr_bitpos = rnbitsize - xrr_bitpos - rr_bitsize; } - lr_mask = int_const_binop (LSHIFT_EXPR, - fold_convert_loc (loc, rntype, lr_mask), - size_int (xlr_bitpos)); - rr_mask = int_const_binop (LSHIFT_EXPR, - fold_convert_loc (loc, rntype, rr_mask), - size_int (xrr_bitpos)); + lr_mask = wide_int::from (lr_mask, rnprec, UNSIGNED) << xlr_bitpos; + rr_mask = wide_int::from (rr_mask, rnprec, UNSIGNED) << xrr_bitpos; - lr_mask = int_const_binop (BIT_IOR_EXPR, lr_mask, rr_mask); + lr_mask |= rr_mask; toshift[1][0] = MIN (xlr_bitpos, xrr_bitpos); shifted[1][0] = 0; @@ -8360,12 +8325,9 @@ fold_truth_andor_maybe_separate (location_t loc, constants must be the same. If not, the condition is always false. Test for this to avoid generating incorrect code below. */ - result = int_const_binop (BIT_AND_EXPR, ll_mask, rl_mask); - if (! integer_zerop (result) - && simple_cst_equal (int_const_binop (BIT_AND_EXPR, - result, l_const), - int_const_binop (BIT_AND_EXPR, - result, r_const)) != 1) + wide_int mask = ll_mask & rl_mask; + if (mask != 0 + && (l_const & mask) != (r_const & mask)) { if (wanted_code == NE_EXPR) return constant_boolean_node (true, truth_type); @@ -8378,7 +8340,7 @@ fold_truth_andor_maybe_separate (location_t loc, /* The constants are combined so as to line up with the loaded field, so use the same parameters. */ - ld_arg[1][0] = int_const_binop (BIT_IOR_EXPR, l_const, r_const); + ld_arg[1][0] = wide_int_to_tree (lntype, l_const | r_const); toshift[1][0] = MIN (xll_bitpos, xrl_bitpos); shifted[1][0] = 0; bitpos[1][0] = lnbitpos; @@ -8390,16 +8352,16 @@ fold_truth_andor_maybe_separate (location_t loc, lnbitpos + GET_MODE_BITSIZE (lnmode), lr_reversep); - lr_mask = build_int_cst_type (TREE_TYPE (ld_arg[1][0]), -1); + lr_mask = wi::mask (0, true, lnprec); /* If the compiler thinks this is used uninitialized below, it's because it can't realize that parts can only be 2 when - comparing wiht constants if l_split_load is also true. This + comparing with constants if l_split_load is also true. This just silences the warning. */ rnbitpos = 0; } - ll_mask = int_const_binop (BIT_IOR_EXPR, ll_mask, rl_mask); + ll_mask |= rl_mask; toshift[0][0] = MIN (xll_bitpos, xrl_bitpos); shifted[0][0] = 0; @@ -8434,22 +8396,23 @@ fold_truth_andor_maybe_separate (location_t loc, for (int i = 0; i < parts; i++) { tree op[2] = { ld_arg[0][i], ld_arg[1][i] }; - tree mask[2] = { ll_mask, lr_mask }; + wide_int mask[2] = { ll_mask, lr_mask }; for (int j = 0; j < 2; j++) { + unsigned prec = TYPE_PRECISION (TREE_TYPE (op[j])); op[j] = unshare_expr (op[j]); /* Mask out the bits belonging to the other part. */ - if (xmask[j][i]) - mask[j] = int_const_binop (BIT_AND_EXPR, mask[j], xmask[j][i]); + if (xmask[j][i].get_precision ()) + mask[j] &= xmask[j][i]; if (shifted[j][i]) { - tree shiftsz = bitsize_int (shifted[j][i]); - mask[j] = int_const_binop (RSHIFT_EXPR, mask[j], shiftsz); + wide_int shift = wide_int::from (shifted[j][i], prec, UNSIGNED); + mask[j] = wi::lrshift (mask[j], shift); } - mask[j] = fold_convert_loc (loc, TREE_TYPE (op[j]), mask[j]); + mask[j] = wide_int::from (mask[j], prec, UNSIGNED); } HOST_WIDE_INT shift = (toshift[0][i] - toshift[1][i]); @@ -8468,7 +8431,7 @@ fold_truth_andor_maybe_separate (location_t loc, tree shiftsz = bitsize_int (shift); op[j] = fold_build2_loc (loc, RSHIFT_EXPR, TREE_TYPE (op[j]), op[j], shiftsz); - mask[j] = int_const_binop (RSHIFT_EXPR, mask[j], shiftsz); + mask[j] = wi::lrshift (mask[j], shift); } /* Convert to the smaller type before masking out unwanted @@ -8481,13 +8444,13 @@ fold_truth_andor_maybe_separate (location_t loc, if (!j) type = TREE_TYPE (op[1]); op[j] = fold_convert_loc (loc, type, op[j]); - mask[j] = fold_convert_loc (loc, type, mask[j]); + mask[j] = wide_int::from (mask[j], TYPE_PRECISION (type), UNSIGNED); } for (int j = 0; j < 2; j++) - if (! integer_all_onesp (mask[j])) + if (mask[j] != wi::mask (0, true, mask[j].get_precision ())) op[j] = build2_loc (loc, BIT_AND_EXPR, type, - op[j], mask[j]); + op[j], wide_int_to_tree (type, mask[j])); cmp[i] = build2_loc (loc, wanted_code, truth_type, op[0], op[1]); }