After BFS searching, we check whether there is an error. These checks are
exclusive, so we can use "else if" instead of "if", which results in a bit
optimized code.

No functional change.

Signed-off-by: Yuyang Du <duyuy...@gmail.com>
---
 kernel/locking/lockdep.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1d38bf6..3efc00e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1950,21 +1950,21 @@ static void print_lock_class_header(struct lock_class 
*class, int depth)
 
        this.class = hlock_class(prev);
        ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                print_bfs_bug(ret);
                return 0;
        }
-       if (ret == 1)
+       else if (ret == 1)
                return ret;
 
        that.parent = NULL;
        that.class = hlock_class(next);
        ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                print_bfs_bug(ret);
                return 0;
        }
-       if (ret == 1)
+       else if (ret == 1)
                return ret;
 
        print_bad_irq_dependency(curr, &this, &that,
@@ -2282,7 +2282,7 @@ static void print_deadlock_scenario(struct held_lock *nxt,
                debug_atomic_inc(nr_redundant);
                return 2;
        }
-       if (ret < 0) {
+       else if (unlikely(ret < 0)) {
                print_bfs_bug(ret);
                return 0;
        }
@@ -2967,11 +2967,11 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
        root.parent = NULL;
        root.class = hlock_class(this);
        ret = find_usage_forwards(&root, bit, &target_entry);
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                print_bfs_bug(ret);
                return 0;
        }
-       if (ret == 1)
+       else if (ret == 1)
                return ret;
 
        print_irq_inversion_bug(curr, &root, target_entry,
@@ -2994,11 +2994,11 @@ static int mark_lock(struct task_struct *curr, struct 
held_lock *this,
        root.parent = NULL;
        root.class = hlock_class(this);
        ret = find_usage_backwards(&root, bit, &target_entry);
-       if (ret < 0) {
+       if (unlikely(ret < 0)) {
                print_bfs_bug(ret);
                return 0;
        }
-       if (ret == 1)
+       else if (ret == 1)
                return ret;
 
        print_irq_inversion_bug(curr, &root, target_entry,
-- 
1.8.3.1

Reply via email to