There is no guarantee that the next entry on the ddelete list stays on
the list when we drop the locks.

Completely rework this mess by moving processed entries on a temporary
list.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c | 77 ++++++++++++++------------------------------
 1 file changed, 25 insertions(+), 52 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 7c1eac4f4b4b..ad0afdd71f21 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -572,71 +572,47 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object 
*bo,
  * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  * encountered buffers.
  */
-
-static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 {
        struct ttm_bo_global *glob = bdev->glob;
-       struct ttm_buffer_object *entry = NULL;
-       int ret = 0;
-
-       spin_lock(&glob->lru_lock);
-       if (list_empty(&bdev->ddestroy))
-               goto out_unlock;
+       struct list_head removed;
+       bool empty;
 
-       entry = list_first_entry(&bdev->ddestroy,
-               struct ttm_buffer_object, ddestroy);
-       kref_get(&entry->list_kref);
+       INIT_LIST_HEAD(&removed);
 
-       for (;;) {
-               struct ttm_buffer_object *nentry = NULL;
-
-               if (entry->ddestroy.next != &bdev->ddestroy) {
-                       nentry = list_first_entry(&entry->ddestroy,
-                               struct ttm_buffer_object, ddestroy);
-                       kref_get(&nentry->list_kref);
-               }
-
-               ret = reservation_object_trylock(entry->resv) ? 0 : -EBUSY;
-               if (remove_all && ret) {
-                       spin_unlock(&glob->lru_lock);
-                       ret = reservation_object_lock(entry->resv, NULL);
-                       spin_lock(&glob->lru_lock);
-               }
+       spin_lock(&glob->lru_lock);
+       while (!list_empty(&bdev->ddestroy)) {
+               struct ttm_buffer_object *bo;
 
-               if (!ret)
-                       ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
-                                                 true);
-               else
-                       spin_unlock(&glob->lru_lock);
+               bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
+                                     ddestroy);
+               kref_get(&bo->list_kref);
+               list_move_tail(&bo->ddestroy, &removed);
+               spin_unlock(&glob->lru_lock);
 
-               kref_put(&entry->list_kref, ttm_bo_release_list);
-               entry = nentry;
+               reservation_object_lock(bo->resv, NULL);
 
-               if (ret || !entry)
-                       goto out;
+               spin_lock(&glob->lru_lock);
+               ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 
+               kref_put(&bo->list_kref, ttm_bo_release_list);
                spin_lock(&glob->lru_lock);
-               if (list_empty(&entry->ddestroy))
-                       break;
        }
-
-out_unlock:
+       list_splice_tail(&removed, &bdev->ddestroy);
+       empty = list_empty(&bdev->ddestroy);
        spin_unlock(&glob->lru_lock);
-out:
-       if (entry)
-               kref_put(&entry->list_kref, ttm_bo_release_list);
-       return ret;
+
+       return empty;
 }
 
 static void ttm_bo_delayed_workqueue(struct work_struct *work)
 {
        struct ttm_bo_device *bdev =
            container_of(work, struct ttm_bo_device, wq.work);
+       unsigned long delay = ((HZ / 100) < 1) ? 1 : HZ / 100;
 
-       if (ttm_bo_delayed_delete(bdev, false)) {
-               schedule_delayed_work(&bdev->wq,
-                                     ((HZ / 100) < 1) ? 1 : HZ / 100);
-       }
+       if (!ttm_bo_delayed_delete(bdev, false))
+               schedule_delayed_work(&bdev->wq, delay);
 }
 
 static void ttm_bo_release(struct kref *kref)
@@ -1573,13 +1549,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
 
        cancel_delayed_work_sync(&bdev->wq);
 
-       while (ttm_bo_delayed_delete(bdev, true))
-               ;
-
-       spin_lock(&glob->lru_lock);
-       if (list_empty(&bdev->ddestroy))
+       if (ttm_bo_delayed_delete(bdev, true))
                TTM_DEBUG("Delayed destroy list was clean\n");
 
+       spin_lock(&glob->lru_lock);
        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
                if (list_empty(&bdev->man[0].lru[0]))
                        TTM_DEBUG("Swap list %d was clean\n", i);
-- 
2.11.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to