Commit a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer
in use") changed the behavior of lockdep_free_key_range() from
unconditionally zapping lock classes into only zapping lock classes if
debug_lock == true. Not zapping lock classes if debug_lock == false leaves
dangling pointers in several lockdep datastructures, e.g. lock_class::name
in the all_lock_classes list. The shell command "cat /proc/lockdep" causes
the kernel to iterate the all_lock_classes list. Hence the "unable to
handle kernel paging request" issue that Shenghui encountered by running
cat /proc/lockdep. Since the new behavior can cause cat /proc/lockdep to
crash, restore the pre-v5.1 behavior.

This patch avoids that cat /proc/lockdep triggers the following crash
with debug_lock == false:

BUG: unable to handle kernel paging request at fffffbfff40ca448
PGD 13bfde067 P4D 13bfde067 PUD 13bf7a067 PMD 1167d3067 PTE 0
Oops: 0000 [#1] PREEMPT SMP KASAN
CPU: 4 PID: 4529 Comm: cat Tainted: G    B   W  O      5.1.0-rc1-dbg+ #4
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
RIP: 0010:__asan_load1+0x28/0x50
Call Trace:
 string+0xac/0x180
 vsnprintf+0x23e/0x820
 seq_vprintf+0x82/0xc0
 seq_printf+0x92/0xb0
 print_name+0x34/0xb0
 l_show+0x184/0x200
 seq_read+0x59e/0x6c0
 proc_reg_read+0x11f/0x170
 __vfs_read+0x4d/0x90
 vfs_read+0xc5/0x1f0
 ksys_read+0xab/0x130
 __x64_sys_read+0x43/0x50
 do_syscall_64+0x71/0x210
 entry_SYSCALL_64_after_hwframe+0x49/0xbe

Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Waiman Long <long...@redhat.com>
Cc: shenghui <shh...@foxmail.com>
Reported-by: shenghui <shh...@foxmail.com>
Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in 
use") # v5.1-rc1.
Signed-off-by: Bart Van Assche <bvanass...@acm.org>
---
Changes compared to v1:
- Restored lockdep recursion protection.
- Made patch description more detailed.

 kernel/locking/lockdep.c | 29 ++++++++++++-----------------
 1 file changed, 12 insertions(+), 17 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 34cdcbedda49..e16766ff184b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
                return;
 
        raw_local_irq_save(flags);
-       if (!graph_lock())
-               goto out_irq;
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
 
        /* closed head */
        pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
         */
        call_rcu_zapped(delayed_free.pf + delayed_free.index);
 
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 }
 
@@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, 
unsigned long size)
 {
        struct pending_free *pf;
        unsigned long flags;
-       int locked;
 
        init_data_structures_once();
 
        raw_local_irq_save(flags);
-       locked = graph_lock();
-       if (!locked)
-               goto out_irq;
-
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
        pf = get_pending_free();
        __lockdep_free_key_range(pf, start, size);
        call_rcu_zapped(pf);
-
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 
        /*
@@ -4911,9 +4907,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
                return;
 
        raw_local_irq_save(flags);
-       if (!graph_lock())
-               goto out_irq;
-
+       arch_spin_lock(&lockdep_lock);
+       current->lockdep_recursion = 1;
        pf = get_pending_free();
        hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
                if (k == key) {
@@ -4925,8 +4920,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
        WARN_ON_ONCE(!found);
        __lockdep_free_key_range(pf, key, 1);
        call_rcu_zapped(pf);
-       graph_unlock();
-out_irq:
+       current->lockdep_recursion = 0;
+       arch_spin_unlock(&lockdep_lock);
        raw_local_irq_restore(flags);
 
        /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
-- 
2.21.0.196.g041f5ea1cf98

Reply via email to