The zero_or_v7_operand hack could alternately be handled by
forcing MO_SC for v7 in sparc_options_override.
---
 gcc/config/sparc/predicates.md |    4 ++++
 gcc/config/sparc/sparc.c       |   14 +++++++++-----
 gcc/config/sparc/sync.md       |   15 +++++++++++++++
 3 files changed, 28 insertions(+), 5 deletions(-)

diff --git a/gcc/config/sparc/predicates.md b/gcc/config/sparc/predicates.md
index c886b86..81e5320 100644
--- a/gcc/config/sparc/predicates.md
+++ b/gcc/config/sparc/predicates.md
@@ -111,6 +111,10 @@
 (define_predicate "const_double_or_vector_operand"
   (match_code "const_double,const_vector"))
 
+;; Return true if OP is Zero, or if the target is V7.
+(define_predicate "zero_or_v7_operand"
+  (ior (match_test "op == const0_rtx")
+       (match_test "!TARGET_V8 && !TARGET_V9")))
 
 ;; Predicates for symbolic constants.
 
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index 788063d..22755cb 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -10880,8 +10880,9 @@ sparc_emit_membar_for_model (enum memmodel model,
     {
     case MO_SC:
       /* Sequential Consistency.  All memory transactions are immediately
-        visible in sequential execution order.  No barriers needed.  */
-      return;
+        visible in sequential execution order.  */
+      implied = LoadLoad | StoreLoad | LoadStore | StoreStore;
+      break;
 
     case MO_TSO:
       /* Total Store Ordering: all memory transactions with store semantics
@@ -10894,8 +10895,9 @@ sparc_emit_membar_for_model (enum memmodel model,
         are followed by an implied LoadLoad | LoadStore.  */
       implied |= LoadLoad | LoadStore;
 
-      /* If we're not looking for a raw barrer (before+after), then atomic
-        operations get the benefit of being both load and store.  */
+      /* Atomic operations get the benefit of being both load and store.
+        Be careful to only apply this after the atomic op, and not to
+        apply this to raw barriers (before+after == 3).  */
       if (load_store == 3 && before_after == 2)
        implied |= StoreLoad | StoreStore;
       /* FALLTHRU */
@@ -10936,7 +10938,9 @@ sparc_emit_membar_for_model (enum memmodel model,
   /* Remove the bits implied by the system memory model.  */
   mm &= ~implied;
 
-  if (mm)
+  /* For raw barriers (before+after), always emit a barrier.
+     This will become a compile-time barrier if needed.  */
+  if (mm || before_after == 3)
     emit_insn (gen_membar (GEN_INT (mm)));
 }
 
diff --git a/gcc/config/sparc/sync.md b/gcc/config/sparc/sync.md
index 821d59e..bdb10b7 100644
--- a/gcc/config/sparc/sync.md
+++ b/gcc/config/sparc/sync.md
@@ -44,6 +44,21 @@
   MEM_VOLATILE_P (operands[1]) = 1;
 })
 
+;; A compiler-only memory barrier.  Generic code, when checking for the
+;; existance of various named patterns, uses asm("":::"memory") when we
+;; don't need an actual instruction.  Here, it's easiest to pretend that
+;; membar 0 is such a barrier.  Further, this gives us a nice hook to 
+;; ignore all such barriers on Sparc V7.
+
+(define_insn "*membar_empty"
+  [(set (match_operand:BLK 0 "" "")
+       (unspec:BLK [(match_dup 0) (match_operand:SI 1 "zero_or_v7_operand")]
+                   UNSPEC_MEMBAR))]
+  ""
+  ""
+  [(set_attr "type" "multi")
+   (set_attr "length" "0")])
+
 ;; In V8, loads are blocking and ordered wrt earlier loads, i.e. every load
 ;; is virtually followed by a load barrier (membar #LoadStore | #LoadLoad).
 ;; In PSO, stbar orders the stores (membar #StoreStore).
-- 
1.7.4.4

Reply via email to