As described in the PR, we were performing an unconditional store back to the
EXPECT parameter.  This is fine, so long as EXPECT is not in thread shared
memory, e.g. a local variable.  But the example in the PR uses shared memory
where the extra store introduces a race condition.

I've left a note in the code about fixing this conditional store once we have
amacleod's gimple atomic stuff committed, where we'd be able to tell (at some
point in during optimization) if EXPECT is local, e.g. an SSA_NAME.

Tested on x86_64 and i686, and manually inspecting the generated code.
Any ideas how to regression test this?


r~
gcc/
        * builtins.c (expand_builtin_atomic_compare_exchange): Conditionalize
        on failure the store back into EXPECT.
libatomic/
        * cas_n.c (libat_compare_exchange): Conditionalize on failure
        the store back to EPTR.


diff --git a/gcc/builtins.c b/gcc/builtins.c
index f5f55bf..09fefe50 100644
--- a/gcc/builtins.c
+++ b/gcc/builtins.c
@@ -5292,7 +5292,7 @@ static rtx
 expand_builtin_atomic_compare_exchange (enum machine_mode mode, tree exp, 
                                        rtx target)
 {
-  rtx expect, desired, mem, oldval;
+  rtx expect, desired, mem, oldval, label;
   enum memmodel success, failure;
   tree weak;
   bool is_weak;
@@ -5330,14 +5330,23 @@ expand_builtin_atomic_compare_exchange (enum 
machine_mode mode, tree exp,
   if (tree_fits_shwi_p (weak) && tree_to_shwi (weak) != 0)
     is_weak = true;
 
+  if (target == const0_rtx)
+    target = NULL;
   oldval = expect;
-  if (!expand_atomic_compare_and_swap ((target == const0_rtx ? NULL : &target),
-                                      &oldval, mem, oldval, desired,
+
+  if (!expand_atomic_compare_and_swap (&target, &oldval, mem, oldval, desired,
                                       is_weak, success, failure))
     return NULL_RTX;
 
-  if (oldval != expect)
-    emit_move_insn (expect, oldval);
+  /* Conditionally store back to EXPECT, lest we create a race condition
+     with an improper store to memory.  */
+  /* ??? With a rearrangement of atomics at the gimple level, we can handle
+     the normal case where EXPECT is totally private, i.e. a register.  At
+     which point the store can be unconditional.  */
+  label = gen_label_rtx ();
+  emit_cmp_and_jump_insns (target, const0_rtx, NE, NULL, VOIDmode, 1, label);
+  emit_move_insn (expect, oldval);
+  emit_label (label);
 
   return target;
 }
diff --git a/libatomic/cas_n.c b/libatomic/cas_n.c
index 39c7833..801262d 100644
--- a/libatomic/cas_n.c
+++ b/libatomic/cas_n.c
@@ -51,10 +51,9 @@ SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, 
UTYPE newval,
 #if !DONE && N <= WORDSIZE && defined(atomic_compare_exchange_w)
 bool
 SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, UTYPE newval,
-                             int smodel, int fmodel UNUSED)
+                             int smodel, int fmodel)
 {
   UWORD mask, shift, weval, woldval, wnewval, t, *wptr;
-  bool ret = false;
 
   pre_barrier (smodel);
 
@@ -82,12 +81,13 @@ SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, 
UTYPE newval,
     }
   while (!atomic_compare_exchange_w (wptr, &woldval, t, true,
                                     __ATOMIC_RELAXED, __ATOMIC_RELAXED));
-  ret = true;
+  post_barrier (smodel);
+  return true;
+
  failure:
   *eptr = woldval >> shift;
-
-  post_barrier (smodel);
-  return ret;
+  post_barrier (fmodel);
+  return false;
 }
 
 #define DONE 1
@@ -102,18 +102,17 @@ SIZE(libat_compare_exchange) (UTYPE *mptr, UTYPE *eptr, 
UTYPE newval,
 {
   UTYPE oldval;
   UWORD magic;
-  bool ret = false;
+  bool ret;
 
   pre_seq_barrier (smodel);
   magic = protect_start (mptr);
 
   oldval = *mptr;
-  if (oldval == *eptr)
-    {
-      *mptr = newval;
-      ret = true;
-    }
-  *eptr = oldval;
+  ret = (oldval == *eptr);
+  if (ret)
+    *mptr = newval;
+  else
+    *eptr = oldval;
 
   protect_end (mptr, magic);
   post_seq_barrier (smodel);

Reply via email to