https://gcc.gnu.org/bugzilla/show_bug.cgi?id=112758
--- Comment #16 from Jakub Jelinek <jakub at gcc dot gnu.org> --- Here is what I'd propose, but I can't really test it on any WORD_REGISTER_OPERATIONS target. 2023-12-21 Jakub Jelinek <ja...@redhat.com> PR rtl-optimization/112758 * combine.cc (make_compopund_operation_int): Optimize AND of a SUBREG based on nonzero_bits of SUBREG_REG and constant mask on WORD_REGISTER_OPERATIONS targets only if it is a zero extending MEM load. * gcc.c-torture/execute/pr112758.c: New test. --- gcc/combine.cc.jj 2023-12-11 23:52:03.528513943 +0100 +++ gcc/combine.cc 2023-12-21 20:25:45.461737423 +0100 @@ -8227,12 +8227,20 @@ make_compound_operation_int (scalar_int_ int sub_width; if ((REG_P (sub) || MEM_P (sub)) && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width) - && sub_width < mode_width) + && sub_width < mode_width + && (!WORD_REGISTER_OPERATIONS + || sub_width >= BITS_PER_WORD + /* On WORD_REGISTER_OPERATIONS targets the bits + beyond sub_mode aren't considered undefined, + so optimize only if it is a MEM load when MEM loads + zero extend, because then the upper bits are all zero. */ + || (MEM_P (sub) + && load_extend_op (sub_mode) == ZERO_EXTEND))) { unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode); unsigned HOST_WIDE_INT mask; - /* original AND constant with all the known zero bits set */ + /* Original AND constant with all the known zero bits set. */ mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode)); if ((mask & mode_mask) == mode_mask) { --- gcc/testsuite/gcc.c-torture/execute/pr112758.c.jj 2023-12-21 21:01:43.780755959 +0100 +++ gcc/testsuite/gcc.c-torture/execute/pr112758.c 2023-12-21 21:01:30.521940358 +0100 @@ -0,0 +1,15 @@ +/* PR rtl-optimization/112758 */ + +int a = -__INT_MAX__ - 1; + +int +main () +{ + if (-__INT_MAX__ - 1U == 0x80000000ULL) + { + unsigned long long b = 0xffff00ffffffffffULL; + if ((b & a) != 0xffff00ff80000000ULL) + __builtin_abort (); + } + return 0; +}