https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49244

--- Comment #12 from Jakub Jelinek <jakub at gcc dot gnu.org> ---
The attached patch seems to work (just eyeballed assembly of the following
testcase, haven't turned it into a runtime testcase that would verify it works
right yet).  It should detect both the compile time known bit atomic bit test
and {set,complement,reset} and variable bit, though the latter I guess for
int/long/long long only, not for short (and for char there even isn't bt?b
instruction) - the reason for short being problematic is because of C integral
promotions, so for unsigned short mask = (unsigned short) 1 << cnt; one really
doesn't know if it has exactly one bit set - it could as well have zero bits
set without invoking undefined behavior, while for int/long/long long, 1 << cnt
will be non-zero for any case that doesn't invoke undefined behavior.

The patch currently recognizes just the fetch_and_op builtins, for xor it in
theory could also recognize op_and_fetch case and just invert the bit
afterwards.

-O2 -g -m64:

void bar (void);

int
f1 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  return (__sync_fetch_and_or (a, mask) & mask) != 0;
}

int
f2 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  unsigned int t1 = __atomic_fetch_or (a, mask, __ATOMIC_RELAXED);
  unsigned int t2 = t1 & mask;
  return t2 != 0;
}

long int
f3 (long int *a, int bit)
{
  unsigned long int mask = (1ul << bit);
  return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) == 0;
}

int
f4 (int *a)
{
  unsigned int mask = (1u << 7);
  return (__sync_fetch_and_or (a, mask) & mask) != 0;
}

int
f5 (int *a)
{
  unsigned int mask = (1u << 13);
  return (__atomic_fetch_or (a, mask, __ATOMIC_RELAXED) & mask) != 0;
}

int
f6 (int *a)
{
  unsigned int mask = (1u << 0);
  return (__atomic_fetch_or (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}

void
f7 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  if ((__sync_fetch_and_xor (a, mask) & mask) != 0)
    bar ();
}

void
f8 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  if ((__atomic_fetch_xor (a, mask, __ATOMIC_RELAXED) & mask) == 0)
    bar ();
}

int
f9 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}

int
f10 (int *a)
{
  unsigned int mask = (1u << 7);
  return (__sync_fetch_and_xor (a, mask) & mask) != 0;
}

int
f11 (int *a)
{
  unsigned int mask = (1u << 13);
  return (__atomic_fetch_xor (a, mask, __ATOMIC_RELAXED) & mask) != 0;
}

int
f12 (int *a)
{
  unsigned int mask = (1u << 0);
  return (__atomic_fetch_xor (a, mask, __ATOMIC_SEQ_CST) & mask) != 0;
}

int
f13 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  return (__sync_fetch_and_and (a, ~mask) & mask) != 0;
}

int
f14 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  return (__atomic_fetch_and (a, ~mask, __ATOMIC_RELAXED) & mask) != 0;
}

int
f15 (int *a, int bit)
{
  unsigned int mask = (1u << bit);
  return (__atomic_fetch_and (a, ~mask, __ATOMIC_SEQ_CST) & mask) != 0;
}

int
f16 (int *a)
{
  unsigned int mask = (1u << 7);
  return (__sync_fetch_and_and (a, ~mask) & mask) != 0;
}

int
f17 (int *a)
{
  unsigned int mask = (1u << 13);
  return (__atomic_fetch_and (a, ~mask, __ATOMIC_RELAXED) & mask) != 0;
}

int
f18 (int *a)
{
  unsigned int mask = (1u << 0);
  return (__atomic_fetch_and (a, ~mask, __ATOMIC_SEQ_CST) & mask) != 0;
}

Reply via email to