This patch is like the earlier GET_MODE_UNIT_SIZE one, but for precisions rather than sizes. There is one behavioural change in expand_debug_expr: we shouldn't use lowpart subregs for non-scalar truncations, since that would just reinterpret some of the scalars and drop the rest. (This probably doesn't trigger in practice.) Using TRUNCATE is fine for scalars, since simplify_gen_unary knows when a subreg can be used.
Tested on aarch64-linux-gnu and x86_64-linux-gnu, and by making sure that there were no differences in testsuite assembly output for one target per CPU. OK to install? Richard 2017-08-23 Richard Sandiford <richard.sandif...@linaro.org> Alan Hayward <alan.hayw...@arm.com> David Sherwood <david.sherw...@arm.com> gcc/ * cfgexpand.c (expand_debug_expr): Use GET_MODE_UNIT_PRECISION. (expand_debug_source_expr): Likewise. * combine.c (combine_simplify_rtx): Likewise. * cse.c (fold_rtx): Likewise. * optabs.c (expand_float): Likewise. * simplify-rtx.c (simplify_unary_operation_1): Likewise. (simplify_binary_operation_1): Likewise. Index: gcc/cfgexpand.c =================================================================== --- gcc/cfgexpand.c 2017-08-23 10:48:32.452867722 +0100 +++ gcc/cfgexpand.c 2017-08-23 10:49:04.239202074 +0100 @@ -4355,9 +4355,12 @@ expand_debug_expr (tree exp) else op0 = simplify_gen_unary (FIX, mode, op0, inner_mode); } - else if (CONSTANT_P (op0) - || GET_MODE_PRECISION (mode) <= GET_MODE_PRECISION (inner_mode)) + else if (GET_MODE_UNIT_PRECISION (mode) + == GET_MODE_UNIT_PRECISION (inner_mode)) op0 = lowpart_subreg (mode, op0, inner_mode); + else if (GET_MODE_UNIT_PRECISION (mode) + < GET_MODE_UNIT_PRECISION (inner_mode)) + op0 = simplify_gen_unary (TRUNCATE, mode, op0, inner_mode); else if (UNARY_CLASS_P (exp) ? TYPE_UNSIGNED (TREE_TYPE (TREE_OPERAND (exp, 0))) : unsignedp) @@ -5214,9 +5217,12 @@ expand_debug_source_expr (tree exp) else op0 = simplify_gen_unary (FIX, mode, op0, inner_mode); } - else if (CONSTANT_P (op0) - || GET_MODE_BITSIZE (mode) <= GET_MODE_BITSIZE (inner_mode)) + else if (GET_MODE_UNIT_PRECISION (mode) + == GET_MODE_UNIT_PRECISION (inner_mode)) op0 = lowpart_subreg (mode, op0, inner_mode); + else if (GET_MODE_UNIT_PRECISION (mode) + < GET_MODE_UNIT_PRECISION (inner_mode)) + op0 = simplify_gen_unary (TRUNCATE, mode, op0, inner_mode); else if (TYPE_UNSIGNED (TREE_TYPE (exp))) op0 = simplify_gen_unary (ZERO_EXTEND, mode, op0, inner_mode); else Index: gcc/combine.c =================================================================== --- gcc/combine.c 2017-08-23 10:48:32.453862693 +0100 +++ gcc/combine.c 2017-08-23 10:49:04.240219975 +0100 @@ -5825,7 +5825,7 @@ combine_simplify_rtx (rtx x, machine_mod if (GET_CODE (temp) == ASHIFTRT && CONST_INT_P (XEXP (temp, 1)) - && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1) + && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0), INTVAL (XEXP (temp, 1))); Index: gcc/cse.c =================================================================== --- gcc/cse.c 2017-08-23 10:48:32.454857664 +0100 +++ gcc/cse.c 2017-08-23 10:49:04.241237877 +0100 @@ -3606,7 +3606,7 @@ fold_rtx (rtx x, rtx_insn *insn) enum rtx_code associate_code; if (is_shift - && (INTVAL (const_arg1) >= GET_MODE_PRECISION (mode) + && (INTVAL (const_arg1) >= GET_MODE_UNIT_PRECISION (mode) || INTVAL (const_arg1) < 0)) { if (SHIFT_COUNT_TRUNCATED) @@ -3655,7 +3655,7 @@ fold_rtx (rtx x, rtx_insn *insn) break; if (is_shift - && (INTVAL (inner_const) >= GET_MODE_PRECISION (mode) + && (INTVAL (inner_const) >= GET_MODE_UNIT_PRECISION (mode) || INTVAL (inner_const) < 0)) { if (SHIFT_COUNT_TRUNCATED) @@ -3686,7 +3686,7 @@ fold_rtx (rtx x, rtx_insn *insn) if (is_shift && CONST_INT_P (new_const) - && INTVAL (new_const) >= GET_MODE_PRECISION (mode)) + && INTVAL (new_const) >= GET_MODE_UNIT_PRECISION (mode)) { /* As an exception, we can turn an ASHIFTRT of this form into a shift of the number of bits - 1. */ Index: gcc/optabs.c =================================================================== --- gcc/optabs.c 2017-08-23 10:47:48.126586565 +0100 +++ gcc/optabs.c 2017-08-23 10:49:04.242255779 +0100 @@ -4658,7 +4658,8 @@ expand_float (rtx to, rtx from, int unsi int doing_unsigned = unsignedp; if (fmode != GET_MODE (to) - && significand_size (fmode) < GET_MODE_PRECISION (GET_MODE (from))) + && (significand_size (fmode) + < GET_MODE_UNIT_PRECISION (GET_MODE (from)))) continue; icode = can_float_p (fmode, imode, unsignedp); Index: gcc/simplify-rtx.c =================================================================== --- gcc/simplify-rtx.c 2017-08-23 10:47:48.127582392 +0100 +++ gcc/simplify-rtx.c 2017-08-23 10:49:04.242255779 +0100 @@ -1130,7 +1130,7 @@ simplify_unary_operation_1 (enum rtx_cod C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == ASHIFTRT && CONST_INT_P (XEXP (op, 1)) - && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) + && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) return simplify_gen_binary (LSHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); @@ -1138,7 +1138,7 @@ simplify_unary_operation_1 (enum rtx_cod C is equal to the width of MODE minus 1. */ if (GET_CODE (op) == LSHIFTRT && CONST_INT_P (XEXP (op, 1)) - && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (mode) - 1) + && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1) return simplify_gen_binary (ASHIFTRT, mode, XEXP (op, 0), XEXP (op, 1)); @@ -1438,19 +1438,21 @@ simplify_unary_operation_1 (enum rtx_cod if (lcode == ASHIFTRT) /* Number of bits not shifted off the end. */ - bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1)); + bits = (GET_MODE_UNIT_PRECISION (lmode) + - INTVAL (XEXP (lhs, 1))); else /* lcode == SIGN_EXTEND */ /* Size of inner mode. */ - bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0))); + bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0))); if (rcode == ASHIFTRT) - bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1)); + bits += (GET_MODE_UNIT_PRECISION (rmode) + - INTVAL (XEXP (rhs, 1))); else /* rcode == SIGN_EXTEND */ - bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0))); + bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0))); /* We can only widen multiplies if the result is mathematiclly equivalent. I.e. if overflow was impossible. */ - if (bits <= GET_MODE_PRECISION (GET_MODE (op))) + if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op))) return simplify_gen_binary (MULT, mode, simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode), @@ -1475,8 +1477,8 @@ simplify_unary_operation_1 (enum rtx_cod (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */ if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND) { - gcc_assert (GET_MODE_PRECISION (mode) - > GET_MODE_PRECISION (GET_MODE (op))); + gcc_assert (GET_MODE_UNIT_PRECISION (mode) + > GET_MODE_UNIT_PRECISION (GET_MODE (op))); return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0), GET_MODE (XEXP (op, 0))); } @@ -1575,19 +1577,21 @@ simplify_unary_operation_1 (enum rtx_cod if (lcode == LSHIFTRT) /* Number of bits not shifted off the end. */ - bits = GET_MODE_PRECISION (lmode) - INTVAL (XEXP (lhs, 1)); + bits = (GET_MODE_UNIT_PRECISION (lmode) + - INTVAL (XEXP (lhs, 1))); else /* lcode == ZERO_EXTEND */ /* Size of inner mode. */ - bits = GET_MODE_PRECISION (GET_MODE (XEXP (lhs, 0))); + bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0))); if (rcode == LSHIFTRT) - bits += GET_MODE_PRECISION (rmode) - INTVAL (XEXP (rhs, 1)); + bits += (GET_MODE_UNIT_PRECISION (rmode) + - INTVAL (XEXP (rhs, 1))); else /* rcode == ZERO_EXTEND */ - bits += GET_MODE_PRECISION (GET_MODE (XEXP (rhs, 0))); + bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0))); /* We can only widen multiplies if the result is mathematiclly equivalent. I.e. if overflow was impossible. */ - if (bits <= GET_MODE_PRECISION (GET_MODE (op))) + if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op))) return simplify_gen_binary (MULT, mode, simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode), @@ -2133,7 +2137,6 @@ simplify_binary_operation_1 (enum rtx_co { rtx tem, reversed, opleft, opright; HOST_WIDE_INT val; - unsigned int width = GET_MODE_PRECISION (mode); /* Even if we can't compute a constant result, there are some cases worth simplifying. */ @@ -2698,7 +2701,7 @@ simplify_binary_operation_1 (enum rtx_co && CONST_INT_P (XEXP (opleft, 1)) && CONST_INT_P (XEXP (opright, 1)) && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1)) - == GET_MODE_PRECISION (mode))) + == GET_MODE_UNIT_PRECISION (mode))) return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1)); /* Same, but for ashift that has been "simplified" to a wider mode @@ -3337,11 +3340,12 @@ simplify_binary_operation_1 (enum rtx_co #if defined(HAVE_rotate) && defined(HAVE_rotatert) if (CONST_INT_P (trueop1) && IN_RANGE (INTVAL (trueop1), - GET_MODE_PRECISION (mode) / 2 + (code == ROTATE), - GET_MODE_PRECISION (mode) - 1)) + GET_MODE_UNIT_PRECISION (mode) / 2 + (code == ROTATE), + GET_MODE_UNIT_PRECISION (mode) - 1)) return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE, - mode, op0, GEN_INT (GET_MODE_PRECISION (mode) - - INTVAL (trueop1))); + mode, op0, + GEN_INT (GET_MODE_UNIT_PRECISION (mode) + - INTVAL (trueop1))); #endif /* FALLTHRU */ case ASHIFTRT: @@ -3395,7 +3399,7 @@ simplify_binary_operation_1 (enum rtx_co if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1)) { - val = INTVAL (op1) & (GET_MODE_PRECISION (mode) - 1); + val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1); if (val != INTVAL (op1)) return simplify_gen_binary (code, mode, op0, GEN_INT (val)); } @@ -3419,7 +3423,7 @@ simplify_binary_operation_1 (enum rtx_co if (GET_CODE (op0) == CLZ && CONST_INT_P (trueop1) && STORE_FLAG_VALUE == 1 - && INTVAL (trueop1) < (HOST_WIDE_INT)width) + && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode)) { machine_mode imode = GET_MODE (XEXP (op0, 0)); unsigned HOST_WIDE_INT zero_val = 0;