From: Jerome Glisse <jgli...@redhat.com>

Lock/unlock mutex in proper order to avoid deadlock in case
of GPU reset triggered from VM code path.

Cc: stable at vger.kernel.org [3.5]
Signed-off-by: Jerome Glisse <jglisse at redhat.com>
---
 drivers/gpu/drm/radeon/radeon_gart.c |   11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon_gart.c 
b/drivers/gpu/drm/radeon/radeon_gart.c
index b372005..7eabb59 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -508,14 +508,19 @@ static void radeon_vm_unbind_locked(struct radeon_device 
*rdev,
        while (vm->fence) {
                int r;
                r = radeon_fence_wait(vm->fence, false);
-               if (r)
+               if (r) {
                        DRM_ERROR("error while waiting for fence: %d\n", r);
+               }
                if (r == -EDEADLK) {
+                       /* release mutex and lock in right order */
                        mutex_unlock(&rdev->vm_manager.lock);
+                       mutex_unlock(&vm->mutex);
                        r = radeon_gpu_reset(rdev);
                        mutex_lock(&rdev->vm_manager.lock);
-                       if (!r)
+                       mutex_lock(&vm->mutex);
+                       if (!r) {
                                continue;
+                       }
                }
                break;
        }
@@ -551,7 +556,9 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
        mutex_lock(&rdev->vm_manager.lock);
        /* unbind all active vm */
        list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+               mutex_lock(&vm->mutex);
                radeon_vm_unbind_locked(rdev, vm);
+               mutex_unlock(&vm->mutex);
        }
        rdev->vm_manager.funcs->fini(rdev);
        mutex_unlock(&rdev->vm_manager.lock);
-- 
1.7.10.4

Reply via email to