This patch removes some of the redundant ww_mutex code in __mutex_lock_common().
Signed-off-by: Waiman Long <waiman.l...@hpe.com> --- kernel/locking/mutex.c | 13 ++++--------- 1 files changed, 4 insertions(+), 9 deletions(-) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 0f8a3e6..4a0e16e 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -559,10 +559,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, struct task_struct *task = current; struct mutex_waiter waiter; unsigned long flags; + struct ww_mutex *ww; int ret; if (use_ww_ctx) { - struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); + ww = container_of(lock, struct ww_mutex, base); if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) return -EALREADY; } @@ -573,12 +574,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, if (__mutex_trylock(lock) || mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) { /* got the lock, yay! */ lock_acquired(&lock->dep_map, ip); - if (use_ww_ctx) { - struct ww_mutex *ww; - ww = container_of(lock, struct ww_mutex, base); - + if (use_ww_ctx) ww_mutex_set_context_fastpath(ww, ww_ctx); - } preempt_enable(); return 0; } @@ -649,10 +646,8 @@ skip_wait: /* got the lock - cleanup and rejoice! */ lock_acquired(&lock->dep_map, ip); - if (use_ww_ctx) { - struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); + if (use_ww_ctx) ww_mutex_set_context_slowpath(ww, ww_ctx); - } spin_unlock_mutex(&lock->wait_lock, flags); preempt_enable(); -- 1.7.1