Linus,

Please pull the latest locking-urgent-for-linus git tree from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 
locking-urgent-for-linus

   # HEAD: b483cf3bc249d7af706390efa63d6671e80d1c09 locking/lockdep: Disable 
cross-release features for now

Two lockdep fixes for bugs introduced by the cross-release dependency tracking 
feature - plus a commit that disables it because performance regressed in an 
absymal fashion on some systems.

 Thanks,

        Ingo

------------------>
Ingo Molnar (1):
      locking/lockdep: Disable cross-release features for now

Peter Zijlstra (2):
      locking/lockdep: Fix stacktrace mess
      locking/selftest: Avoid false BUG report


 kernel/locking/lockdep.c | 48 ++++++++++++++++++++----------------------------
 lib/Kconfig.debug        |  4 ++--
 lib/locking-selftest.c   |  2 ++
 3 files changed, 24 insertions(+), 30 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 44c8d0d17170..e36e652d996f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
               struct held_lock *next, int distance, struct stack_trace *trace,
               int (*save)(struct stack_trace *trace))
 {
+       struct lock_list *uninitialized_var(target_entry);
        struct lock_list *entry;
-       int ret;
        struct lock_list this;
-       struct lock_list *uninitialized_var(target_entry);
+       int ret;
 
        /*
         * Prove that the new <prev> -> <next> dependency would not
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
        this.class = hlock_class(next);
        this.parent = NULL;
        ret = check_noncircular(&this, hlock_class(prev), &target_entry);
-       if (unlikely(!ret))
+       if (unlikely(!ret)) {
+               if (!trace->entries) {
+                       /*
+                        * If @save fails here, the printing might trigger
+                        * a WARN but because of the !nr_entries it should
+                        * not do bad things.
+                        */
+                       save(trace);
+               }
                return print_circular_bug(&this, target_entry, next, prev, 
trace);
+       }
        else if (unlikely(ret < 0))
                return print_bfs_bug(ret);
 
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock 
*prev,
                return print_bfs_bug(ret);
 
 
-       if (save && !save(trace))
+       if (!trace->entries && !save(trace))
                return 0;
 
        /*
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct 
held_lock *prev,
        if (!ret)
                return 0;
 
-       /*
-        * Debugging printouts:
-        */
-       if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
-               graph_unlock();
-               printk("\n new dependency: ");
-               print_lock_name(hlock_class(prev));
-               printk(KERN_CONT " => ");
-               print_lock_name(hlock_class(next));
-               printk(KERN_CONT "\n");
-               dump_stack();
-               if (!graph_lock())
-                       return 0;
-       }
        return 2;
 }
 
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct 
held_lock *next)
 {
        int depth = curr->lockdep_depth;
        struct held_lock *hlock;
-       struct stack_trace trace;
-       int (*save)(struct stack_trace *trace) = save_trace;
+       struct stack_trace trace = {
+               .nr_entries = 0,
+               .max_entries = 0,
+               .entries = NULL,
+               .skip = 0,
+       };
 
        /*
         * Debugging checks.
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct 
held_lock *next)
                         */
                        if (hlock->read != 2 && hlock->check) {
                                int ret = check_prev_add(curr, hlock, next,
-                                                        distance, &trace, 
save);
+                                                        distance, &trace, 
save_trace);
                                if (!ret)
                                        return 0;
 
                                /*
-                                * Stop saving stack_trace if save_trace() was
-                                * called at least once:
-                                */
-                               if (save && ret == 2)
-                                       save = NULL;
-
-                               /*
                                 * Stop after the first non-trylock entry,
                                 * as non-trylock entries have added their
                                 * own direct dependencies already, so this
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2689b7c50c52..e2705843c524 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING
        select DEBUG_MUTEXES
        select DEBUG_RT_MUTEXES if RT_MUTEXES
        select DEBUG_LOCK_ALLOC
-       select LOCKDEP_CROSSRELEASE
-       select LOCKDEP_COMPLETIONS
+       select LOCKDEP_CROSSRELEASE if BROKEN
+       select LOCKDEP_COMPLETIONS if BROKEN
        select TRACE_IRQFLAGS
        default n
        help
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index cd0b5c964bd0..2b827b8a1d8c 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2031,11 +2031,13 @@ void locking_selftest(void)
        print_testname("mixed read-lock/lock-write ABBA");
        pr_cont("             |");
        dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK);
+#ifdef CONFIG_PROVE_LOCKING
        /*
         * Lockdep does indeed fail here, but there's nothing we can do about
         * that now.  Don't kill lockdep for it.
         */
        unexpected_testcase_failures--;
+#endif
 
        pr_cont("             |");
        dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM);

Reply via email to