Basically just switching over to the new infrastructure like we did for
other drivers as well.

No intentional functional change, but only compile tested.

Signed-off-by: Christian König <christian.koe...@amd.com>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | 56 +++++++++++++++++++++-
 drivers/gpu/drm/vmwgfx/vmwgfx_validation.h | 41 ++--------------
 2 files changed, 59 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c 
b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7625b3f71e0..34436504fcdb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -817,6 +817,59 @@ int vmw_validation_preload_res(struct 
vmw_validation_context *ctx,
        return 0;
 }
 
+/**
+ * vmw_validation_bo_reserve - Reserve buffer objects registered with a
+ * validation context
+ * @ctx: The validation context
+ * @intr: Perform waits interruptible
+ *
+ * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
+ * code on failure
+ */
+int vmw_validation_bo_reserve(struct vmw_validation_context *ctx, bool intr)
+{
+       struct vmw_validation_bo_node *entry;
+       int ret;
+
+       drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT : 0, 0);
+       drm_exec_until_all_locked(&ctx->exec) {
+               list_for_each_entry(entry, &ctx->bo_list, base.head) {
+                       ret = drm_exec_prepare_obj(&ctx->exec,
+                                                  &entry->base.bo->base, 1);
+                       drm_exec_retry_on_contention(&ctx->exec);
+                       if (ret)
+                               goto error;
+               }
+       }
+       return 0;
+
+error:
+       drm_exec_fini(&ctx->exec);
+       return ret;
+}
+
+/**
+ * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
+ * with a validation context
+ * @ctx: The validation context
+ *
+ * This function unreserves the buffer objects previously reserved using
+ * vmw_validation_bo_reserve, and fences them with a fence object.
+ */
+void vmw_validation_bo_fence(struct vmw_validation_context *ctx,
+                            struct vmw_fence_obj *fence)
+{
+       struct vmw_validation_bo_node *entry;
+
+       list_for_each_entry(entry, &ctx->bo_list, base.head) {
+               dma_resv_add_fence(entry->base.bo->base.resv, &fence->base,
+                                  DMA_RESV_USAGE_READ);
+       }
+       drm_exec_fini(&ctx->exec);
+}
+
+
+
 /**
  * vmw_validation_bo_backoff - Unreserve buffer objects registered with a
  * validation context
@@ -842,6 +895,5 @@ void vmw_validation_bo_backoff(struct 
vmw_validation_context *ctx)
                                vmw_bo_dirty_release(vbo);
                }
        }
-
-       ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list);
+       drm_exec_fini(&ctx->exec);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h 
b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 353d837907d8..55a7d8b68d5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -31,8 +31,7 @@
 #include <linux/list.h>
 #include <linux/hashtable.h>
 #include <linux/ww_mutex.h>
-
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/drm_exec.h>
 
 #define VMW_RES_DIRTY_NONE 0
 #define VMW_RES_DIRTY_SET BIT(0)
@@ -59,7 +58,7 @@ struct vmw_validation_context {
        struct list_head resource_ctx_list;
        struct list_head bo_list;
        struct list_head page_list;
-       struct ww_acquire_ctx ticket;
+       struct drm_exec exec;
        struct mutex *res_mutex;
        unsigned int merge_dups;
        unsigned int mem_size_left;
@@ -106,39 +105,6 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
        return !list_empty(&ctx->bo_list);
 }
 
-/**
- * vmw_validation_bo_reserve - Reserve buffer objects registered with a
- * validation context
- * @ctx: The validation context
- * @intr: Perform waits interruptible
- *
- * Return: Zero on success, -ERESTARTSYS when interrupted, negative error
- * code on failure
- */
-static inline int
-vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
-                         bool intr)
-{
-       return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
-                                     NULL);
-}
-
-/**
- * vmw_validation_bo_fence - Unreserve and fence buffer objects registered
- * with a validation context
- * @ctx: The validation context
- *
- * This function unreserves the buffer objects previously reserved using
- * vmw_validation_bo_reserve, and fences them with a fence object.
- */
-static inline void
-vmw_validation_bo_fence(struct vmw_validation_context *ctx,
-                       struct vmw_fence_obj *fence)
-{
-       ttm_eu_fence_buffer_objects(&ctx->ticket, &ctx->bo_list,
-                                   (void *) fence);
-}
-
 /**
  * vmw_validation_align - Align a validation memory allocation
  * @val: The size to be aligned
@@ -185,6 +151,9 @@ int vmw_validation_preload_res(struct 
vmw_validation_context *ctx,
                               unsigned int size);
 void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
                                  void *val_private, u32 dirty);
+int vmw_validation_bo_reserve(struct vmw_validation_context *ctx, bool intr);
+void vmw_validation_bo_fence(struct vmw_validation_context *ctx,
+                            struct vmw_fence_obj *fence);
 void vmw_validation_bo_backoff(struct vmw_validation_context *ctx);
 
 #endif
-- 
2.34.1

Reply via email to