The driver may sleep with holding a spinlock.

The function call paths (from bottom to top) in Linux-4.16 are:

[FUNC] schedule
kernel/locking/rtmutex.c, 1223: 
        schedule in rt_mutex_handle_deadlock
kernel/locking/rtmutex.c, 1273: 
        rt_mutex_handle_deadlock in rt_mutex_slowlock
kernel/locking/rtmutex.c, 1249: 
        _raw_spin_lock_irqsave in rt_mutex_slowlock

To fix the bug, the spinlock is released before schedule() and then acquired 
again.
This is found by my static analysis tool (DSAC).

Signed-off-by: Jia-Ju Bai <baijiaju1...@gmail.com>
---
 kernel/locking/rtmutex.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2823d4163a37..af03e162f812 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1205,7 +1205,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
 }
 
 static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
-                                    struct rt_mutex_waiter *w)
+                                    struct rt_mutex_waiter *w, struct rt_mutex 
*lock)
 {
        /*
         * If the result is not -EDEADLOCK or the caller requested
@@ -1219,8 +1219,10 @@ static void rt_mutex_handle_deadlock(int res, int 
detect_deadlock,
         */
        rt_mutex_print_deadlock(w);
        while (1) {
+               raw_spin_unlock_irq(&lock->wait_lock);
                set_current_state(TASK_INTERRUPTIBLE);
                schedule();
+               raw_spin_lock_irq(&lock->wait_lock);
        }
 }
 
@@ -1269,7 +1271,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        if (unlikely(ret)) {
                __set_current_state(TASK_RUNNING);
                remove_waiter(lock, &waiter);
-               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+               rt_mutex_handle_deadlock(ret, chwalk, &waiter, lock);
        }
 
        /*
-- 
2.17.0

Reply via email to