From: Oleg Nesterov <o...@redhat.com>

Based on Peter Zijlstra's earlier patch.

Change percpu_down_read() to use __down_read(), this way we can
do rwsem_acquire_read() unconditionally at the start to make this
code more symmetric and clean.

Originally-From: Peter Zijlstra <pet...@infradead.org>
Signed-off-by: Oleg Nesterov <o...@redhat.com>
Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
---
 kernel/locking/percpu-rwsem.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 25b73448929c..61b678d784ce 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -69,14 +69,14 @@ static bool update_fast_ctr(struct percpu_rw_semaphore 
*brw, unsigned int val)
 void percpu_down_read(struct percpu_rw_semaphore *brw)
 {
        might_sleep();
-       if (likely(update_fast_ctr(brw, +1))) {
-               rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+       rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+       if (likely(update_fast_ctr(brw, +1)))
                return;
-       }
 
-       down_read(&brw->rw_sem);
+       /* Avoid rwsem_acquire_read() and rwsem_release() */
+       __down_read(&brw->rw_sem);
        atomic_inc(&brw->slow_read_ctr);
-       /* avoid up_read()->rwsem_release() */
        __up_read(&brw->rw_sem);
 }
 
-- 
1.8.1.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to