With the new lockdep cross-release feature, cpu hotplug reports the
following deadlock:

  takedown_cpu()
    irq_lock_sparse()
    wait_for_completion(&st->done)

                                cpuhp_thread_fun
                                  cpuhp_up_callback
                                    cpuhp_invoke_callback
                                      irq_affinity_online_cpu
                                        irq_local_spare()
                                        irq_unlock_sparse()
                                  complete(&st->done)

However, CPU-up and CPU-down are globally serialized, so the above
scenario cannot in fact happen.

Annotate this by splitting the st->done dependency chain for up and
down.

Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Byungchul Park <max.byungchul.p...@gmail.com>
Cc: Sebastian Andrzej Siewior <bige...@linutronix.de>
Reported-by: Mike Galbraith <efa...@gmx.de>
Tested-by: Mike Galbraith <efa...@gmx.de>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 kernel/cpu.c | 35 +++++++++++++++++++++++++++++------
 1 file changed, 29 insertions(+), 6 deletions(-)

diff --git a/kernel/cpu.c b/kernel/cpu.c
index acf5308fad51..0f44ddf64f24 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -533,6 +533,28 @@ void __init cpuhp_threads_init(void)
        kthread_unpark(this_cpu_read(cpuhp_state.thread));
 }
 
+/*
+ * _cpu_down() and _cpu_up() have different lock ordering wrt st->done, but
+ * because these two functions are globally serialized and st->done is private
+ * to them, we can simply re-init st->done for each of them to separate the
+ * lock chains.
+ *
+ * Must be macro to ensure we have two different call sites.
+ */
+#ifdef CONFIG_LOCKDEP
+#define lockdep_reinit_st_done()                               \
+do {                                                           \
+       int __cpu;                                              \
+       for_each_possible_cpu(__cpu) {                          \
+               struct cpuhp_cpu_state *st =                    \
+                       per_cpu_ptr(&cpuhp_state, __cpu);       \
+               init_completion(&st->done);                     \
+       }                                                       \
+} while(0)
+#else
+#define lockdep_reinit_st_done()
+#endif
+
 #ifdef CONFIG_HOTPLUG_CPU
 /**
  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
@@ -676,12 +698,6 @@ void cpuhp_report_idle_dead(void)
                                 cpuhp_complete_idle_dead, st, 0);
 }
 
-#else
-#define takedown_cpu           NULL
-#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-
 /* Requires cpu_add_remove_lock to be held */
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
                           enum cpuhp_state target)
@@ -697,6 +713,8 @@ static int __ref _cpu_down(unsigned int cpu, int 
tasks_frozen,
 
        cpus_write_lock();
 
+       lockdep_reinit_st_done();
+
        cpuhp_tasks_frozen = tasks_frozen;
 
        prev_state = st->state;
@@ -759,6 +777,9 @@ int cpu_down(unsigned int cpu)
        return do_cpu_down(cpu, CPUHP_OFFLINE);
 }
 EXPORT_SYMBOL(cpu_down);
+
+#else
+#define takedown_cpu           NULL
 #endif /*CONFIG_HOTPLUG_CPU*/
 
 /**
@@ -806,6 +827,8 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum 
cpuhp_state target)
 
        cpus_write_lock();
 
+       lockdep_reinit_st_done();
+
        if (!cpu_present(cpu)) {
                ret = -EINVAL;
                goto out;


Reply via email to