As we have already plugged the w->dma into the reservation_object, and
have set ourselves up to automatically signal the request and w->dma on
completion, we do not need to export the rq->fence directly and just use
the w->dma fence.

This avoids having to take the reservation_lock inside the worker which
cross-release lockdep would complain about. :)

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.a...@intel.com>
Reviewed-by: Matthew Auld <matthew.a...@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_client_blt.c     | 14 +++++++-------
 .../drm/i915/gem/selftests/i915_gem_client_blt.c   | 11 -----------
 2 files changed, 7 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 1fdab0767a47..9b01c3b5b31d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct 
i915_address_space *vm,
        vma->ops = &proxy_vma_ops;
 
        sleeve->vma = vma;
-       sleeve->obj = i915_gem_object_get(obj);
        sleeve->pages = pages;
        sleeve->page_sizes = *page_sizes;
 
@@ -85,7 +84,6 @@ static struct i915_sleeve *create_sleeve(struct 
i915_address_space *vm,
 
 static void destroy_sleeve(struct i915_sleeve *sleeve)
 {
-       i915_gem_object_put(sleeve->obj);
        kfree(sleeve);
 }
 
@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work)
 {
        struct clear_pages_work *w = container_of(work, typeof(*w), work);
        struct drm_i915_private *i915 = w->ce->gem_context->i915;
-       struct drm_i915_gem_object *obj = w->sleeve->obj;
+       struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
        struct i915_vma *vma = w->sleeve->vma;
        struct i915_request *rq;
        int err = w->dma.error;
@@ -193,10 +191,12 @@ static void clear_pages_worker(struct work_struct *work)
                        goto out_request;
        }
 
-       /* XXX: more feverish nightmares await */
-       i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(vma);
+       /*
+        * w->dma is already exported via (vma|obj)->resv we need only
+        * keep track of the GPU activity within this vma/request, and
+        * propagate the signal from the request to w->dma.
+        */
+       err = i915_active_ref(&vma->active, rq->fence.context, rq);
        if (err)
                goto out_request;
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index f3a5eb807c1c..855481252bda 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -63,17 +63,6 @@ static int igt_client_fill(void *arg)
                if (err)
                        goto err_unpin;
 
-               /*
-                * XXX: For now do the wait without the object resv lock to
-                * ensure we don't deadlock.
-                */
-               err = i915_gem_object_wait(obj,
-                                          I915_WAIT_INTERRUPTIBLE |
-                                          I915_WAIT_ALL,
-                                          MAX_SCHEDULE_TIMEOUT);
-               if (err)
-                       goto err_unpin;
-
                i915_gem_object_lock(obj);
                err = i915_gem_object_set_to_cpu_domain(obj, false);
                i915_gem_object_unlock(obj);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to