Doing cmpxchg will cause cache bouncing when checking
sem->count. This could cause scalability issue
in a large machine (e.g. a 80 cores box).

A pre-read of sem->count can mitigate this.

Signed-off-by: Alex Shi <alex....@intel.com>
Signed-off-by: Tim Chen <tim.c.c...@linux.intel.com>
---
 include/asm-generic/rwsem.h |    8 ++++----
 lib/rwsem.c                 |   21 +++++++++++++--------
 2 files changed, 17 insertions(+), 12 deletions(-)

diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index bb1e2cd..052d973 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -70,11 +70,11 @@ static inline void __down_write(struct rw_semaphore *sem)
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       long tmp;
+       if (unlikely(&sem->count != RWSEM_UNLOCKED_VALUE))
+               return 0;
 
-       tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
-                     RWSEM_ACTIVE_WRITE_BIAS);
-       return tmp == RWSEM_UNLOCKED_VALUE;
+       return cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+               RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_UNLOCKED_VALUE;
 }
 
 /*
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 19c5fa9..2072af5 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -75,7 +75,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum 
rwsem_wake_type wake_type)
                         * will block as they will notice the queued writer.
                         */
                        wake_up_process(waiter->task);
-               goto out;
+               return sem;
        }
 
        /* Writers might steal the lock before we grant it to the next reader.
@@ -85,15 +85,21 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum 
rwsem_wake_type wake_type)
        adjustment = 0;
        if (wake_type != RWSEM_WAKE_READ_OWNED) {
                adjustment = RWSEM_ACTIVE_READ_BIAS;
- try_reader_grant:
-               oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
-               if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
-                       /* A writer stole the lock. Undo our reader grant. */
+               while (1) {
+                       /* A writer stole the lock. */
+                       if (sem->count < RWSEM_WAITING_BIAS)
+                               return sem;
+
+                       oldcount = rwsem_atomic_update(adjustment, sem)
+                                                               - adjustment;
+                       if (likely(oldcount >= RWSEM_WAITING_BIAS))
+                               break;
+
+                        /* A writer stole the lock.  Undo our reader grant. */
                        if (rwsem_atomic_update(-adjustment, sem) &
                                                RWSEM_ACTIVE_MASK)
-                               goto out;
+                               return sem;
                        /* Last active locker left. Retry waking readers. */
-                       goto try_reader_grant;
                }
        }
 
@@ -136,7 +142,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum 
rwsem_wake_type wake_type)
        sem->wait_list.next = next;
        next->prev = &sem->wait_list;
 
- out:
        return sem;
 }
 
-- 
1.7.4.4



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to