From: Sebastian Andrzej Siewior <bige...@linutronix.de>

Joe Korty reported, that __irq_set_affinity_locked() schedules a
workqueue while holding a rawlock which results in a might_sleep()
warning.
This patch moves the invokation into a process context so that we only
wakeup() a process while holding the lock.

Cc: stable...@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bige...@linutronix.de>
---
 include/linux/interrupt.h |    1 +
 kernel/irq/manage.c       |   79 +++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 77 insertions(+), 3 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f70a65b..42f79fb 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -271,6 +271,7 @@ struct irq_affinity_notify {
        unsigned int irq;
        struct kref kref;
        struct work_struct work;
+       struct list_head list;
        void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
        void (*release)(struct kref *ref);
 };
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2e8ca37..88a4ff5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -141,6 +141,62 @@ static inline void
 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
+static struct task_struct *set_affinity_helper;
+static LIST_HEAD(affinity_list);
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
+
+static int set_affinity_thread(void *unused)
+{
+       while (1) {
+               struct irq_affinity_notify *notify;
+               int empty;
+
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               raw_spin_lock_irq(&affinity_list_lock);
+               empty = list_empty(&affinity_list);
+               raw_spin_unlock_irq(&affinity_list_lock);
+
+               if (empty)
+                       schedule();
+               if (kthread_should_stop())
+                       break;
+               set_current_state(TASK_RUNNING);
+try_next:
+               notify = NULL;
+
+               raw_spin_lock_irq(&affinity_list_lock);
+               if (!list_empty(&affinity_list)) {
+                       notify = list_first_entry(&affinity_list,
+                                       struct irq_affinity_notify, list);
+                       list_del_init(&notify->list);
+               }
+               raw_spin_unlock_irq(&affinity_list_lock);
+
+               if (!notify)
+                       continue;
+               _irq_affinity_notify(notify);
+               goto try_next;
+       }
+       return 0;
+}
+
+static void init_helper_thread(void)
+{
+       if (set_affinity_helper)
+               return;
+       set_affinity_helper = kthread_run(set_affinity_thread, NULL,
+                       "affinity-cb");
+       WARN_ON(IS_ERR(set_affinity_helper));
+}
+#else
+
+static inline void init_helper_thread(void) { }
+
+#endif
+
 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask 
*mask)
 {
        struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -166,7 +222,17 @@ int __irq_set_affinity_locked(struct irq_data *data, const 
struct cpumask *mask)
 
        if (desc->affinity_notify) {
                kref_get(&desc->affinity_notify->kref);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+               raw_spin_lock(&affinity_list_lock);
+               if (list_empty(&desc->affinity_notify->list))
+                       list_add_tail(&affinity_list,
+                                       &desc->affinity_notify->list);
+               raw_spin_unlock(&affinity_list_lock);
+               wake_up_process(set_affinity_helper);
+#else
                schedule_work(&desc->affinity_notify->work);
+#endif
        }
        irqd_set(data, IRQD_AFFINITY_SET);
 
@@ -207,10 +273,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct 
cpumask *m)
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 
-static void irq_affinity_notify(struct work_struct *work)
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
 {
-       struct irq_affinity_notify *notify =
-               container_of(work, struct irq_affinity_notify, work);
        struct irq_desc *desc = irq_to_desc(notify->irq);
        cpumask_var_t cpumask;
        unsigned long flags;
@@ -232,6 +296,13 @@ out:
        kref_put(&notify->kref, notify->release);
 }
 
+static void irq_affinity_notify(struct work_struct *work)
+{
+       struct irq_affinity_notify *notify =
+               container_of(work, struct irq_affinity_notify, work);
+       _irq_affinity_notify(notify);
+}
+
 /**
  *     irq_set_affinity_notifier - control notification of IRQ affinity changes
  *     @irq:           Interrupt for which to enable/disable notification
@@ -261,6 +332,8 @@ irq_set_affinity_notifier(unsigned int irq, struct 
irq_affinity_notify *notify)
                notify->irq = irq;
                kref_init(&notify->kref);
                INIT_WORK(&notify->work, irq_affinity_notify);
+               INIT_LIST_HEAD(&notify->list);
+               init_helper_thread();
        }
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-- 
1.7.10.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to