The bitmaps keep track of which locks are irqsafe. Update the bitmaps
when there is new irqsafe usage and when an irqsafe lock is zapped.

Signed-off-by: Yuyang Du <[email protected]>
---
 kernel/locking/lockdep.c | 39 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 38 insertions(+), 1 deletion(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 291cc9c..1b78216 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3107,6 +3107,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct 
held_lock *,
        int excl_bit = exclusive_bit(new_bit);
        int read = new_bit & LOCK_USAGE_READ_MASK;
        int dir = new_bit & LOCK_USAGE_DIR_MASK;
+       struct lock_class *lock = hlock_class(this);
 
        /*
         * mark USED_IN has to look forwards -- to ensure no dependency
@@ -3119,6 +3120,25 @@ typedef int (*check_usage_f)(struct task_struct *, 
struct held_lock *,
                check_usage_backwards : check_usage_forwards;
 
        /*
+        * The bit is already marked so that we update the bitmaps
+        * before validation.
+        */
+       if (!dir) {
+               unsigned long *bitmaps[4] = {
+                       lock_classes_hardirq_safe,
+                       lock_classes_hardirq_safe_read,
+                       lock_classes_softirq_safe,
+                       lock_classes_softirq_safe_read
+               };
+               int index = (new_bit >> 2) << 1;
+
+               if (read)
+                      index += 1;
+
+               __set_bit(lock - lock_classes, bitmaps[index]);
+       }
+
+       /*
         * Validate that this particular lock does not have conflicting
         * usage states.
         */
@@ -3146,7 +3166,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct 
held_lock *,
                        return 0;
        }
 
-       if (state_verbose(new_bit, hlock_class(this)))
+       if (state_verbose(new_bit, lock))
                return 2;
 
        return 1;
@@ -4650,6 +4670,22 @@ static void remove_class_from_lock_chains(struct 
pending_free *pf,
        }
 }
 
+static inline void remove_irqsafe_lock_bitmap(struct lock_class *class)
+{
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+       unsigned long usage = class->usage_mask;
+
+       if (usage & LOCKF_USED_IN_HARDIRQ)
+               __clear_bit(class - lock_classes, lock_classes_hardirq_safe);
+       if (usage & LOCKF_USED_IN_HARDIRQ_READ)
+               __clear_bit(class - lock_classes, 
lock_classes_hardirq_safe_read);
+       if (usage & LOCKF_USED_IN_SOFTIRQ)
+               __clear_bit(class - lock_classes, lock_classes_softirq_safe);
+       if (usage & LOCKF_USED_IN_SOFTIRQ_READ)
+               __clear_bit(class - lock_classes, 
lock_classes_softirq_safe_read);
+#endif
+}
+
 /*
  * Remove all references to a lock class. The caller must hold the graph lock.
  */
@@ -4680,6 +4716,7 @@ static void zap_class(struct pending_free *pf, struct 
lock_class *class)
                WRITE_ONCE(class->name, NULL);
                nr_lock_classes--;
                __clear_bit(class - lock_classes, lock_classes_in_use);
+               remove_irqsafe_lock_bitmap(class);
        } else {
                WARN_ONCE(true, "%s() failed for class %s\n", __func__,
                          class->name);
-- 
1.8.3.1

Reply via email to