Currently, lock_acquire() is called before acquiring the lock and lock_release() is called before the releasing the lock. As a result, the execution time of lock_release() is added to the lock hold time reducing locking throughput, especially for spinlocks and rwlocks which tend to have a much shorter lock hold time.
As lock_release() is not going to update any shared data that needs protection from the lock, we don't actually need to call it before releasing the lock. So the lock_release() calls are now postponed to after releasing the lock for spinlocks and rwlocks. Signed-off-by: Waiman Long <long...@redhat.com> --- include/linux/rwlock_api_smp.h | 16 ++++++++-------- include/linux/spinlock_api_smp.h | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index 86ebb4bf9c6e..b026940a0962 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -215,63 +215,63 @@ static inline void __raw_write_lock(rwlock_t *lock) static inline void __raw_write_unlock(rwlock_t *lock) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); preempt_enable(); } static inline void __raw_read_unlock(rwlock_t *lock) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); preempt_enable(); } static inline void __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); local_irq_restore(flags); preempt_enable(); } static inline void __raw_read_unlock_irq(rwlock_t *lock) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); local_irq_enable(); preempt_enable(); } static inline void __raw_read_unlock_bh(rwlock_t *lock) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_read_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); local_irq_restore(flags); preempt_enable(); } static inline void __raw_write_unlock_irq(rwlock_t *lock) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); local_irq_enable(); preempt_enable(); } static inline void __raw_write_unlock_bh(rwlock_t *lock) { - rwlock_release(&lock->dep_map, 1, _RET_IP_); do_raw_write_unlock(lock); + rwlock_release(&lock->dep_map, 1, _RET_IP_); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 42dfab89e740..fcb84df0678b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -147,32 +147,32 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); + spin_release(&lock->dep_map, 1, _RET_IP_); preempt_enable(); } static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { - spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); + spin_release(&lock->dep_map, 1, _RET_IP_); local_irq_restore(flags); preempt_enable(); } static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { - spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); + spin_release(&lock->dep_map, 1, _RET_IP_); local_irq_enable(); preempt_enable(); } static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { - spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); + spin_release(&lock->dep_map, 1, _RET_IP_); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); } -- 2.18.0