smallest_int_mode_for_size now returns an optional mode rather than aborting on failure. This patch adjusts a couple of callers so that they fail gracefully when no mode exists.
There should be no behavioural change, since anything that triggers the new return paths would previously have aborted. I just think this is how the code would have been written if the option had been available earlier. Tested on aarch64-linux-gnu. OK to install? Richard gcc/ * dse.cc (find_shift_sequence): Allow smallest_int_mode_for_size to failure. * optabs.cc (expand_twoval_binop_libfunc): Likewise. --- gcc/dse.cc | 16 ++++++++-------- gcc/optabs.cc | 6 ++++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/gcc/dse.cc b/gcc/dse.cc index c3feff06f86..75825a44cb9 100644 --- a/gcc/dse.cc +++ b/gcc/dse.cc @@ -1717,12 +1717,12 @@ dump_insn_info (const char * start, insn_info_t insn_info) line up, we need to extract the value from lower part of the rhs of the store, shift it, and then put it into a form that can be shoved into the read_insn. This function generates a right SHIFT of a - value that is at least ACCESS_SIZE bytes wide of READ_MODE. The + value that is at least ACCESS_BYTES bytes wide of READ_MODE. The shift sequence is returned or NULL if we failed to find a shift. */ static rtx -find_shift_sequence (poly_int64 access_size, +find_shift_sequence (poly_int64 access_bytes, store_info *store_info, machine_mode read_mode, poly_int64 shift, bool speed, bool require_cst) @@ -1734,11 +1734,11 @@ find_shift_sequence (poly_int64 access_size, /* If a constant was stored into memory, try to simplify it here, otherwise the cost of the shift might preclude this optimization e.g. at -Os, even when no actual shift will be needed. */ + auto access_bits = access_bytes * BITS_PER_UNIT; if (store_info->const_rhs - && known_le (access_size, GET_MODE_SIZE (MAX_MODE_INT))) + && known_le (access_bytes, GET_MODE_SIZE (MAX_MODE_INT)) + && smallest_int_mode_for_size (access_bits).exists (&new_mode)) { - auto new_mode = smallest_int_mode_for_size - (access_size * BITS_PER_UNIT).require (); auto byte = subreg_lowpart_offset (new_mode, store_mode); rtx ret = simplify_subreg (new_mode, store_info->const_rhs, store_mode, byte); @@ -1810,7 +1810,7 @@ find_shift_sequence (poly_int64 access_size, } } - if (maybe_lt (GET_MODE_SIZE (new_mode), access_size)) + if (maybe_lt (GET_MODE_SIZE (new_mode), access_bytes)) continue; new_reg = gen_reg_rtx (new_mode); @@ -1839,8 +1839,8 @@ find_shift_sequence (poly_int64 access_size, of the arguments and could be precomputed. It may not be worth doing so. We could precompute if worthwhile or at least cache the results. The result - technically depends on both SHIFT and ACCESS_SIZE, - but in practice the answer will depend only on ACCESS_SIZE. */ + technically depends on both SHIFT and ACCESS_BYTES, + but in practice the answer will depend only on ACCESS_BYTES. */ if (cost > COSTS_N_INSNS (1)) continue; diff --git a/gcc/optabs.cc b/gcc/optabs.cc index ded9cc3d947..2bcb3f7b47a 100644 --- a/gcc/optabs.cc +++ b/gcc/optabs.cc @@ -2551,8 +2551,10 @@ expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1, /* The value returned by the library function will have twice as many bits as the nominal MODE. */ - libval_mode - = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode)).require (); + auto return_size = 2 * GET_MODE_BITSIZE (mode); + if (!smallest_int_mode_for_size (return_size).exists (&libval_mode)) + return false; + start_sequence (); libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, libval_mode, -- 2.25.1