That should now be handled by the common dma_resv framework.

Signed-off-by: Christian König <christian.koe...@amd.com>
Reviewed-by: Daniel Vetter <daniel.vet...@ffwll.ch>
Cc: intel-...@lists.freedesktop.org
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    | 41 ++++---------------
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  8 +---
 drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c  | 15 +------
 .../drm/i915/gem/selftests/i915_gem_migrate.c |  3 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  3 +-
 drivers/gpu/drm/i915/i915_vma.c               |  9 +++-
 6 files changed, 21 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 372bc220faeb..ffde7bc0a95d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -741,30 +741,19 @@ static const struct drm_gem_object_funcs 
i915_gem_object_funcs = {
 /**
  * i915_gem_object_get_moving_fence - Get the object's moving fence if any
  * @obj: The object whose moving fence to get.
+ * @fence: The resulting fence
  *
  * A non-signaled moving fence means that there is an async operation
  * pending on the object that needs to be waited on before setting up
  * any GPU- or CPU PTEs to the object's pages.
  *
- * Return: A refcounted pointer to the object's moving fence if any,
- * NULL otherwise.
+ * Return: Negative error code or 0 for success.
  */
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+                                    struct dma_fence **fence)
 {
-       return dma_fence_get(i915_gem_to_ttm(obj)->moving);
-}
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
-                                     struct dma_fence *fence)
-{
-       struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
-
-       if (*moving == fence)
-               return;
-
-       dma_fence_put(*moving);
-       *moving = dma_fence_get(fence);
+       return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
+                                     fence);
 }
 
 /**
@@ -782,23 +771,9 @@ void i915_gem_object_set_moving_fence(struct 
drm_i915_gem_object *obj,
 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
                                      bool intr)
 {
-       struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
-       int ret;
-
        assert_object_held(obj);
-       if (!fence)
-               return 0;
-
-       ret = dma_fence_wait(fence, intr);
-       if (ret)
-               return ret;
-
-       if (fence->error)
-               return fence->error;
-
-       i915_gem_to_ttm(obj)->moving = NULL;
-       dma_fence_put(fence);
-       return 0;
+       return dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
+                                    intr, MAX_SCHEDULE_TIMEOUT);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 02c37fe4a535..e11d82a9f7c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object 
*obj)
        i915_gem_object_unpin_pages(obj);
 }
 
-struct dma_fence *
-i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
-
-void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
-                                     struct dma_fence *fence);
-
+int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
+                                    struct dma_fence **fence);
 int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
                                      bool intr);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c 
b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 438b8a95b3d1..a10716f4e717 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -467,19 +467,6 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
        return fence;
 }
 
-static int
-prev_deps(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
-         struct i915_deps *deps)
-{
-       int ret;
-
-       ret = i915_deps_add_dependency(deps, bo->moving, ctx);
-       if (!ret)
-               ret = i915_deps_add_resv(deps, bo->base.resv, ctx);
-
-       return ret;
-}
-
 /**
  * i915_ttm_move - The TTM move callback used by i915.
  * @bo: The buffer object.
@@ -534,7 +521,7 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
                struct i915_deps deps;
 
                i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | 
__GFP_NOWARN);
-               ret = prev_deps(bo, ctx, &deps);
+               ret = i915_deps_add_resv(&deps, bo->base.resv, ctx);
                if (ret) {
                        i915_refct_sgt_put(dst_rsgt);
                        return ret;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
index 4997ed18b6e4..0ad443a90c8b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -219,8 +219,7 @@ static int __igt_lmem_pages_migrate(struct intel_gt *gt,
                        err = dma_resv_reserve_fences(obj->base.resv, 1);
                        if (!err)
                                dma_resv_add_fence(obj->base.resv, &rq->fence,
-                                                  DMA_RESV_USAGE_WRITE);
-                       i915_gem_object_set_moving_fence(obj, &rq->fence);
+                                                  DMA_RESV_USAGE_KERNEL);
                        i915_request_put(rq);
                }
                if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3a6e3f6d239f..dfc34cc2ef8c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1221,8 +1221,7 @@ static int __igt_mmap_migrate(struct intel_memory_region 
**placements,
        i915_gem_object_unpin_pages(obj);
        if (rq) {
                dma_resv_add_fence(obj->base.resv, &rq->fence,
-                                  DMA_RESV_USAGE_WRITE);
-               i915_gem_object_set_moving_fence(obj, &rq->fence);
+                                  DMA_RESV_USAGE_KERNEL);
                i915_request_put(rq);
        }
        i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 524477d8939e..d077f7b9eaad 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1357,10 +1357,17 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct 
i915_gem_ww_ctx *ww,
        if (err)
                return err;
 
+       if (vma->obj) {
+               err = i915_gem_object_get_moving_fence(vma->obj, &moving);
+               if (err)
+                       return err;
+       } else {
+               moving = NULL;
+       }
+
        if (flags & PIN_GLOBAL)
                wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
 
-       moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
        if (flags & vma->vm->bind_async_flags || moving) {
                /* lock VM */
                err = i915_vm_lock_objects(vma->vm, ww);
-- 
2.25.1

Reply via email to