https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71240

--- Comment #6 from Jakub Jelinek <jakub at gcc dot gnu.org> ---
Perhaps better testcase that also shows a missed optimization:
struct L { unsigned int l[2]; };
union U { double a; struct L l; } u;

void
foo (double a, struct L *p)
{
  u.a = a;
  struct L l = u.l, m;
  m.l[0] = (((l.l[1] & 0xff000000) >> 24)
            | ((l.l[1] & 0x00ff0000) >> 8)
            | ((l.l[1] & 0x0000ff00) << 8)
            | ((l.l[1] & 0x000000ff) << 24));
  m.l[1] = (((l.l[0] & 0xff000000) >> 24)
            | ((l.l[0] & 0x00ff0000) >> 8)
            | ((l.l[0] & 0x0000ff00) << 8)
            | ((l.l[0] & 0x000000ff) << 24));
  *p = m;
}

void
bar (double a, struct L *p)
{
  foo (a, p);
}

Ideally we should turn this into a 64-bit bswap or at two 32-bit bswaps, but we
manage to turn it only into one 32-bit bswap (the low 32-bits of the double).
Guess not removing the BIT_FIELD_REF is not the right thing, we could have
originally BIT_FIELD_REFs picking just 8 bits out of the double instead, like
(which ICEs too):

struct L { unsigned char l[8]; };
struct M { unsigned int m[2]; };
union U { double a; struct L l; } u;

void
foo (double a, struct M *p)
{
  u.a = a;
  struct L l = u.l;
  struct M m;
  m.m[0] = l.l[7] | (l.l[6] << 8) | (l.l[5] << 16) | (l.l[4] << 24);
  m.m[1] = l.l[3] | (l.l[2] << 8) | (l.l[1] << 16) | (l.l[0] << 24);
  *p = m;
}

void
bar (double a, struct M *p)
{
  foo (a, p);
}

Here, we end up with src_stmt that has _7 = BIT_FIELD_REF <a_2(D), 8, 24>;
but we'd still want to use BIT_FIELD_REF <a_2(D), 32, 0>.  And again, we
transform only one of the two bswaps.

BTW,
struct L { unsigned char l[8]; };
union U { double a; struct L l; } u;

void
foo (double a, unsigned long long *p)
{
  u.a = a;
  struct L l = u.l;
  unsigned long long m;
  m = l.l[7] | (l.l[6] << 8) | (l.l[5] << 16) | (l.l[4] << 24);
  m |= ((unsigned long long) (l.l[3] | (l.l[2] << 8) | (l.l[1] << 16) | (l.l[0]
<< 24))) << 32;
  *p = m;
}

void
bar (double a, unsigned long long *p)
{
  foo (a, p);
}

isn't detected as 64-bit bswap either, but still ICEs.
And
struct L { unsigned char l[8]; };
union U { double a; struct L l; } u;

void
foo (double a, unsigned long long *p)
{
  u.a = a;
  struct L l = u.l;
  unsigned long long m;
  m = l.l[7] | (l.l[6] << 8) | (l.l[5] << 16) | (l.l[4] << 24)
      | ((unsigned long long) l.l[3] << 32)
      | ((unsigned long long) l.l[2] << 40)
      | ((unsigned long long) l.l[1] << 48)
      | ((unsigned long long) l.l[0] << 56);
  *p = m;
}

void
bar (double a, unsigned long long *p)
{
  foo (a, p);
}
neither, but nothing is detected there at all.

Reply via email to