Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
---
 drivers/gpu/drm/ttm/ttm_execbuf_util.c  | 141 +++++++++++---------------------
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c |   1 -
 include/drm/ttm/ttm_execbuf_util.h      |   3 -
 3 files changed, 50 insertions(+), 95 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 
b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 9198755df086..ec36206da95a 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,20 +32,12 @@
 #include <linux/sched.h>
 #include <linux/module.h>

-static void ttm_eu_backoff_reservation_locked(struct list_head *list)
+static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
+                                             struct ttm_validate_buffer *entry)
 {
-       struct ttm_validate_buffer *entry;
-
-       list_for_each_entry(entry, list, head) {
+       list_for_each_entry_continue_reverse(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
-               if (!entry->reserved)
-                       continue;

-               entry->reserved = false;
-               if (entry->removed) {
-                       ttm_bo_add_to_lru(bo);
-                       entry->removed = false;
-               }
                ww_mutex_unlock(&bo->resv->lock);
        }
 }
@@ -56,27 +48,9 @@ static void ttm_eu_del_from_lru_locked(struct list_head 
*list)

        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
-               if (!entry->reserved)
-                       continue;
-
-               if (!entry->removed) {
-                       entry->put_count = ttm_bo_del_from_lru(bo);
-                       entry->removed = true;
-               }
-       }
-}
-
-static void ttm_eu_list_ref_sub(struct list_head *list)
-{
-       struct ttm_validate_buffer *entry;
-
-       list_for_each_entry(entry, list, head) {
-               struct ttm_buffer_object *bo = entry->bo;
+               unsigned put_count = ttm_bo_del_from_lru(bo);

-               if (entry->put_count) {
-                       ttm_bo_list_ref_sub(bo, entry->put_count, true);
-                       entry->put_count = 0;
-               }
+               ttm_bo_list_ref_sub(bo, put_count, true);
        }
 }

@@ -91,11 +65,18 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx 
*ticket,

        entry = list_first_entry(list, struct ttm_validate_buffer, head);
        glob = entry->bo->glob;
+
        spin_lock(&glob->lru_lock);
-       ttm_eu_backoff_reservation_locked(list);
+       list_for_each_entry(entry, list, head) {
+               struct ttm_buffer_object *bo = entry->bo;
+
+               ttm_bo_add_to_lru(bo);
+               ww_mutex_unlock(&bo->resv->lock);
+       }
+       spin_unlock(&glob->lru_lock);
+
        if (ticket)
                ww_acquire_fini(ticket);
-       spin_unlock(&glob->lru_lock);
 }
 EXPORT_SYMBOL(ttm_eu_backoff_reservation);

@@ -121,64 +102,56 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
        if (list_empty(list))
                return 0;

-       list_for_each_entry(entry, list, head) {
-               entry->reserved = false;
-               entry->put_count = 0;
-               entry->removed = false;
-       }
-
        entry = list_first_entry(list, struct ttm_validate_buffer, head);
        glob = entry->bo->glob;

        if (ticket)
                ww_acquire_init(ticket, &reservation_ww_class);
-retry:
+
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;

-               /* already slowpath reserved? */
-               if (entry->reserved)
-                       continue;
-
                ret = ttm_bo_reserve_nolru(bo, intr, (ticket == NULL), true,
                                           ticket);

-               if (ret == -EDEADLK) {
-                       /* uh oh, we lost out, drop every reservation and try
-                        * to only reserve this buffer, then start over if
-                        * this succeeds.
-                        */
-                       BUG_ON(ticket == NULL);
-                       spin_lock(&glob->lru_lock);
-                       ttm_eu_backoff_reservation_locked(list);
-                       spin_unlock(&glob->lru_lock);
-                       ttm_eu_list_ref_sub(list);
-
-                       if (intr) {
-                               ret = 
ww_mutex_lock_slow_interruptible(&bo->resv->lock,
-                                                                      ticket);
-                               if (unlikely(ret != 0)) {
-                                       if (ret == -EINTR)
-                                               ret = -ERESTARTSYS;
-                                       goto err_fini;
-                               }
-                       } else
-                               ww_mutex_lock_slow(&bo->resv->lock, ticket);
-
-                       entry->reserved = true;
-                       if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-                               ret = -EBUSY;
-                               goto err;
-                       }
-                       goto retry;
-               } else if (ret)
-                       goto err;
+               if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+                       ww_mutex_unlock(&bo->resv->lock);

-               entry->reserved = true;
-               if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
                        ret = -EBUSY;
-                       goto err;
                }
+
+               if (!ret)
+                       continue;
+
+               /* uh oh, we lost out, drop every reservation and try
+                * to only reserve this buffer, then start over if
+                * this succeeds.
+                */
+               ttm_eu_backoff_reservation_reverse(list, entry);
+
+               if (ret == -EDEADLK && intr) {
+                       ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+                                                              ticket);
+               } else if (ret == -EDEADLK) {
+                       ww_mutex_lock_slow(&bo->resv->lock, ticket);
+                       ret = 0;
+               }
+
+               if (unlikely(ret != 0)) {
+                       if (ret == -EINTR)
+                               ret = -ERESTARTSYS;
+                       if (ticket) {
+                               ww_acquire_done(ticket);
+                               ww_acquire_fini(ticket);
+                       }
+                       return ret;
+               }
+
+               /* move this item to the front of the list,
+                * forces correct iteration of the loop without keeping track
+                */
+               list_del(&entry->head);
+               list_add(&entry->head, list);
        }

        if (ticket)
@@ -186,20 +159,7 @@ retry:
        spin_lock(&glob->lru_lock);
        ttm_eu_del_from_lru_locked(list);
        spin_unlock(&glob->lru_lock);
-       ttm_eu_list_ref_sub(list);
        return 0;
-
-err:
-       spin_lock(&glob->lru_lock);
-       ttm_eu_backoff_reservation_locked(list);
-       spin_unlock(&glob->lru_lock);
-       ttm_eu_list_ref_sub(list);
-err_fini:
-       if (ticket) {
-               ww_acquire_done(ticket);
-               ww_acquire_fini(ticket);
-       }
-       return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);

@@ -229,7 +189,6 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx 
*ticket,
                bo->sync_obj = driver->sync_obj_ref(sync_obj);
                ttm_bo_add_to_lru(bo);
                ww_mutex_unlock(&bo->resv->lock);
-               entry->reserved = false;
        }
        spin_unlock(&bdev->fence_lock);
        spin_unlock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7ca48db74a09..8d87631b3eb8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -264,7 +264,6 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context 
*sw_context,
                ++sw_context->cur_val_buf;
                val_buf = &vval_buf->base;
                val_buf->bo = ttm_bo_reference(bo);
-               val_buf->reserved = false;
                list_add_tail(&val_buf->head, &sw_context->validate_nodes);
        }

diff --git a/include/drm/ttm/ttm_execbuf_util.h 
b/include/drm/ttm/ttm_execbuf_util.h
index fd95fd569ca3..8490cb8ee0d8 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -48,9 +48,6 @@
 struct ttm_validate_buffer {
        struct list_head head;
        struct ttm_buffer_object *bo;
-       bool reserved;
-       bool removed;
-       int put_count;
        void *old_sync_obj;
 };

-- 
1.8.5.2

Reply via email to