On x86, LOCK DEC is cheaper than LOCK CMPXCHG and doesn't require a
retry loop around it.

Signed-off-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -333,14 +333,18 @@ int _read_trylock(rwlock_t *lock)
     return 1;
 }
 
-void _read_unlock(rwlock_t *lock)
-{
-    uint32_t x, y;
+#ifndef _raw_read_unlock
+# define _raw_read_unlock(l) do {                      \
+    uint32_t x = (l)->lock, y;                         \
+    while ( (y = cmpxchg(&(l)->lock, x, x - 1)) != x ) \
+        x = y;                                         \
+} while (0)
+#endif
 
+inline void _read_unlock(rwlock_t *lock)
+{
     preempt_enable();
-    x = lock->lock;
-    while ( (y = cmpxchg(&lock->lock, x, x-1)) != x )
-        x = y;
+    _raw_read_unlock(lock);
 }
 
 void _read_unlock_irq(rwlock_t *lock)
--- 2014-08-25.orig/xen/include/asm-x86/spinlock.h      2014-10-07 
14:42:41.000000000 +0200
+++ 2014-08-25/xen/include/asm-x86/spinlock.h   2014-10-07 15:33:18.000000000 
+0200
@@ -31,4 +31,7 @@ static always_inline int _raw_spin_trylo
     return (oldval > 0);
 }
 
+#define _raw_read_unlock(l) \
+    asm volatile ( "lock; dec%z0 %0" : "+m" ((l)->lock) :: "memory" )
+
 #endif /* __ASM_SPINLOCK_H */



rwlock: allow arch to override read_unlock() atomic

On x86, LOCK DEC is cheaper than LOCK CMPXCHG and doesn't require a
retry loop around it.

Signed-off-by: Jan Beulich <jbeul...@suse.com>

--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -333,14 +333,18 @@ int _read_trylock(rwlock_t *lock)
     return 1;
 }
 
-void _read_unlock(rwlock_t *lock)
-{
-    uint32_t x, y;
+#ifndef _raw_read_unlock
+# define _raw_read_unlock(l) do {                      \
+    uint32_t x = (l)->lock, y;                         \
+    while ( (y = cmpxchg(&(l)->lock, x, x - 1)) != x ) \
+        x = y;                                         \
+} while (0)
+#endif
 
+inline void _read_unlock(rwlock_t *lock)
+{
     preempt_enable();
-    x = lock->lock;
-    while ( (y = cmpxchg(&lock->lock, x, x-1)) != x )
-        x = y;
+    _raw_read_unlock(lock);
 }
 
 void _read_unlock_irq(rwlock_t *lock)
--- 2014-08-25.orig/xen/include/asm-x86/spinlock.h      2014-10-07 
14:42:41.000000000 +0200
+++ 2014-08-25/xen/include/asm-x86/spinlock.h   2014-10-07 15:33:18.000000000 
+0200
@@ -31,4 +31,7 @@ static always_inline int _raw_spin_trylo
     return (oldval > 0);
 }
 
+#define _raw_read_unlock(l) \
+    asm volatile ( "lock; dec%z0 %0" : "+m" ((l)->lock) :: "memory" )
+
 #endif /* __ASM_SPINLOCK_H */
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to