[ Upstream commit 471ba0e686cb13752bc1ff3216c54b69a2d250ea ]

The QEMU PowerPC/PSeries machine model was not expecting a self-IPI,
and it may be a bit surprising thing to do, so have irq_work_queue_on
do local queueing when target is the current CPU.

Suggested-by: Steven Rostedt <rost...@goodmis.org>
Reported-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Tested-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
Signed-off-by: Nicholas Piggin <npig...@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Reviewed-by: Frederic Weisbecker <frede...@kernel.org>
Acked-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Cc: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <c...@kaod.org>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Paul Mackerras <pau...@samba.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Suraj Jitindar Singh <sjitindarsi...@gmail.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: https://lkml.kernel.org/r/20190409093403.20994-1-npig...@gmail.com
[ Simplified the preprocessor comments.
  Fixed unbalanced curly brackets pointed out by Thomas. ]
Signed-off-by: Ingo Molnar <mi...@kernel.org>
Signed-off-by: Sasha Levin <sas...@kernel.org>
---
 kernel/irq_work.c | 75 ++++++++++++++++++++++++++---------------------
 1 file changed, 42 insertions(+), 33 deletions(-)

diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 6b7cdf17ccf89..73288914ed5e7 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
         */
 }
 
-/*
- * Enqueue the irq_work @work on @cpu unless it's already pending
- * somewhere.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
-bool irq_work_queue_on(struct irq_work *work, int cpu)
+/* Enqueue on current CPU, work must already be claimed and preempt disabled */
+static void __irq_work_queue_local(struct irq_work *work)
 {
-       /* All work should have been flushed before going offline */
-       WARN_ON_ONCE(cpu_is_offline(cpu));
-
-#ifdef CONFIG_SMP
-
-       /* Arch remote IPI send/receive backend aren't NMI safe */
-       WARN_ON_ONCE(in_nmi());
+       /* If the work is "lazy", handle it from next tick if any */
+       if (work->flags & IRQ_WORK_LAZY) {
+               if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+                   tick_nohz_tick_stopped())
+                       arch_irq_work_raise();
+       } else {
+               if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+                       arch_irq_work_raise();
+       }
+}
 
+/* Enqueue the irq work @work on the current CPU */
+bool irq_work_queue(struct irq_work *work)
+{
        /* Only queue if not already pending */
        if (!irq_work_claim(work))
                return false;
 
-       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-               arch_send_call_function_single_ipi(cpu);
-
-#else /* #ifdef CONFIG_SMP */
-       irq_work_queue(work);
-#endif /* #else #ifdef CONFIG_SMP */
+       /* Queue the entry and raise the IPI if needed. */
+       preempt_disable();
+       __irq_work_queue_local(work);
+       preempt_enable();
 
        return true;
 }
+EXPORT_SYMBOL_GPL(irq_work_queue);
 
-/* Enqueue the irq work @work on the current CPU */
-bool irq_work_queue(struct irq_work *work)
+/*
+ * Enqueue the irq_work @work on @cpu unless it's already pending
+ * somewhere.
+ *
+ * Can be re-enqueued while the callback is still in progress.
+ */
+bool irq_work_queue_on(struct irq_work *work, int cpu)
 {
+#ifndef CONFIG_SMP
+       return irq_work_queue(work);
+
+#else /* CONFIG_SMP: */
+       /* All work should have been flushed before going offline */
+       WARN_ON_ONCE(cpu_is_offline(cpu));
+
        /* Only queue if not already pending */
        if (!irq_work_claim(work))
                return false;
 
-       /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
-
-       /* If the work is "lazy", handle it from next tick if any */
-       if (work->flags & IRQ_WORK_LAZY) {
-               if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-                   tick_nohz_tick_stopped())
-                       arch_irq_work_raise();
+       if (cpu != smp_processor_id()) {
+               /* Arch remote IPI send/receive backend aren't NMI safe */
+               WARN_ON_ONCE(in_nmi());
+               if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+                       arch_send_call_function_single_ipi(cpu);
        } else {
-               if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-                       arch_irq_work_raise();
+               __irq_work_queue_local(work);
        }
-
        preempt_enable();
 
        return true;
+#endif /* CONFIG_SMP */
 }
-EXPORT_SYMBOL_GPL(irq_work_queue);
+
 
 bool irq_work_needs_cpu(void)
 {
-- 
2.20.1



Reply via email to