Hi!

On gcc.c-torture/execute/pr103376.c in the following code we trigger UB
in the compiler.  n->range is 8 because it is 64-bit load and rsize is 0
because it is a bswap sequence with load and known to be 0:
  /* Find real size of result (highest non-zero byte).  */
  if (n->base_addr)
    for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_MARKER, rsize++);
  else
    rsize = n->range;
The shifts then shift uint64_t by 64 bits.  For this case mask is 0
and we want both *cmpxchg and *cmpnop as 0, the operation can be done as
both nop and bswap and callers will prefer nop.

Bootstrapped/regtested on x86_64-linux and i686-linux, ok for trunk?

2021-11-27  Jakub Jelinek  <ja...@redhat.com>

        PR tree-optimization/103435
        * gimple-ssa-store-merging.c (find_bswap_or_nop_finalize): Avoid UB if
        n->range - rsize == 8, just clear both *cmpnop and *cmpxchg in that
        case.

--- gcc/gimple-ssa-store-merging.c.jj   2021-11-25 10:47:07.000000000 +0100
+++ gcc/gimple-ssa-store-merging.c      2021-11-26 10:54:11.959800560 +0100
@@ -871,12 +871,18 @@ find_bswap_or_nop_finalize (struct symbo
        {
          mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
          *cmpxchg &= mask;
-         *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
+         if (n->range - rsize == sizeof (int64_t))
+           *cmpnop = 0;
+         else
+           *cmpnop >>= (n->range - rsize) * BITS_PER_MARKER;
        }
       else
        {
          mask = ((uint64_t) 1 << (rsize * BITS_PER_MARKER)) - 1;
-         *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
+         if (n->range - rsize == sizeof (int64_t))
+           *cmpxchg = 0;
+         else
+           *cmpxchg >>= (n->range - rsize) * BITS_PER_MARKER;
          *cmpnop &= mask;
        }
       n->range = rsize;

        Jakub

Reply via email to