Lock usage bit initialization is consolidated into one function
mark_usage(). Trivial readability improvement. No functional change.

Signed-off-by: Yuyang Du <duyuy...@gmail.com>
---
 kernel/locking/lockdep.c | 22 ++++++++++++++--------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9d2728c..79bc6cd 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3459,8 +3459,12 @@ void trace_softirqs_off(unsigned long ip)
                debug_atomic_inc(redundant_softirqs_off);
 }
 
-static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
+static int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 {
+       if (!check)
+               goto lock_used;
+
        /*
         * If non-trylock use in a hardirq or softirq context, then
         * mark the lock as used in these contexts:
@@ -3504,6 +3508,11 @@ static int mark_irqflags(struct task_struct *curr, 
struct held_lock *hlock)
                }
        }
 
+lock_used:
+       /* mark it as used: */
+       if (!mark_lock(curr, hlock, LOCK_USED))
+               return 0;
+
        return 1;
 }
 
@@ -3545,8 +3554,8 @@ int mark_lock_irq(struct task_struct *curr, struct 
held_lock *this,
        return 1;
 }
 
-static inline int mark_irqflags(struct task_struct *curr,
-               struct held_lock *hlock)
+static inline int
+mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 {
        return 1;
 }
@@ -3832,11 +3841,8 @@ static int __lock_acquire(struct lockdep_map *lock, 
unsigned int subclass,
 #endif
        hlock->pin_count = pin_count;
 
-       if (check && !mark_irqflags(curr, hlock))
-               return 0;
-
-       /* mark it as used: */
-       if (!mark_lock(curr, hlock, LOCK_USED))
+       /* Initialize the lock usage bit */
+       if (!mark_usage(curr, hlock, check))
                return 0;
 
        /*
-- 
1.8.3.1

Reply via email to