Could either of you comment on the below patch?

All atomic functions that return a value should imply full memory
barrier semantics -- this very much includes a compiler barrier / memory
clobber.



---

 arch/m68k/include/asm/atomic.h  | 19 ++++++++++++-------
 arch/m68k/include/asm/cmpxchg.h |  9 ++++++---
 2 files changed, 18 insertions(+), 10 deletions(-)

diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 3e03de7ae33b..062a60417cb9 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -56,7 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)    
        \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (atomic_read(v)));               \
+                       : "g" (i), "2" (atomic_read(v))                 \
+                       : "memory");                                    \
        return t;                                                       \
 }
 
@@ -71,7 +72,8 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)       
                \
                        "       casl %2,%1,%0\n"                        \
                        "       jne 1b"                                 \
                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
-                       : "g" (i), "2" (atomic_read(v)));               \
+                       : "g" (i), "2" (atomic_read(v))                 \
+                       : "memory");                                    \
        return tmp;                                                     \
 }
 
@@ -141,7 +143,7 @@ static inline void atomic_dec(atomic_t *v)
 static inline int atomic_dec_and_test(atomic_t *v)
 {
        char c;
-       __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
+       __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v) : : 
"memory");
        return c != 0;
 }
 
@@ -151,14 +153,15 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
        __asm__ __volatile__(
                "subql #1,%1; slt %0"
                : "=d" (c), "=m" (*v)
-               : "m" (*v));
+               : "m" (*v)
+               : "memory");
        return c != 0;
 }
 
 static inline int atomic_inc_and_test(atomic_t *v)
 {
        char c;
-       __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
+       __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v) : : 
"memory");
        return c != 0;
 }
 
@@ -204,7 +207,8 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
        char c;
        __asm__ __volatile__("subl %2,%1; seq %0"
                             : "=d" (c), "+m" (*v)
-                            : ASM_DI (i));
+                            : ASM_DI (i)
+                            : "memory");
        return c != 0;
 }
 
@@ -213,7 +217,8 @@ static inline int atomic_add_negative(int i, atomic_t *v)
        char c;
        __asm__ __volatile__("addl %2,%1; smi %0"
                             : "=d" (c), "+m" (*v)
-                            : ASM_DI (i));
+                            : ASM_DI (i)
+                            : "memory");
        return c != 0;
 }
 
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index 83b1df80f0ac..d8b3d2b48785 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -98,17 +98,20 @@ static inline unsigned long __cmpxchg(volatile void *p, 
unsigned long old,
        case 1:
                __asm__ __volatile__ ("casb %0,%2,%1"
                                      : "=d" (old), "=m" (*(char *)p)
-                                     : "d" (new), "0" (old), "m" (*(char *)p));
+                                     : "d" (new), "0" (old), "m" (*(char *)p)
+                                     : "memory");
                break;
        case 2:
                __asm__ __volatile__ ("casw %0,%2,%1"
                                      : "=d" (old), "=m" (*(short *)p)
-                                     : "d" (new), "0" (old), "m" (*(short 
*)p));
+                                     : "d" (new), "0" (old), "m" (*(short *)p)
+                                     : "memory");
                break;
        case 4:
                __asm__ __volatile__ ("casl %0,%2,%1"
                                      : "=d" (old), "=m" (*(int *)p)
-                                     : "d" (new), "0" (old), "m" (*(int *)p));
+                                     : "d" (new), "0" (old), "m" (*(int *)p)
+                                     : "memory");
                break;
        default:
                old = __invalid_cmpxchg_size(p, old, new, size);

Reply via email to