Similar to how TTM vm does this, convert the drm/xe fault
handler to use drm_exec locking.

Cc: Christian König <christian.koe...@amd.com>
Cc: Somalapuram Amaranath <amaranath.somalapu...@amd.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: <dri-devel@lists.freedesktop.org>
Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_bo.c | 38 +++++++++++++++++++++++---------------
 1 file changed, 23 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 3c56858e0751..27d7d36401b5 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -1217,29 +1217,37 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
        struct xe_device *xe = to_xe_device(ddev);
        struct xe_bo *bo = ttm_to_xe_bo(tbo);
        bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
+       struct drm_exec exec;
        vm_fault_t ret;
-       int idx;
+       int idx, err;
 
        if (needs_rpm)
                xe_pm_runtime_get(xe);
 
-       ret = ttm_bo_vm_reserve(tbo, vmf, NULL);
-       if (ret)
-               goto out;
+       drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 16);
+       drm_exec_until_all_locked(&exec) {
+               ret = ttm_bo_vm_reserve(tbo, vmf, &exec);
+               err = drm_exec_retry_on_contention(&exec, 0);
+               if (err)
+                       ret = VM_FAULT_NOPAGE;
+               if (ret)
+                       goto out;
 
-       if (drm_dev_enter(ddev, &idx)) {
-               trace_xe_bo_cpu_fault(bo);
+               if (drm_dev_enter(ddev, &idx)) {
+                       trace_xe_bo_cpu_fault(bo);
 
-               ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
-                                              TTM_BO_VM_NUM_PREFAULT,
-                                              NULL);
-               drm_dev_exit(idx);
-       } else {
-               ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+                       ret = ttm_bo_vm_fault_reserved(vmf, 
vmf->vma->vm_page_prot,
+                                                      TTM_BO_VM_NUM_PREFAULT,
+                                                      &exec);
+                       drm_dev_exit(idx);
+                       err = drm_exec_retry_on_contention(&exec, 0);
+                       if (err)
+                               ret = VM_FAULT_NOPAGE;
+               } else {
+                       ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+               }
        }
 
-       if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
-               goto out;
        /*
         * ttm_bo_vm_reserve() already has dma_resv_lock.
         */
@@ -1250,8 +1258,8 @@ static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
                mutex_unlock(&xe->mem_access.vram_userfault.lock);
        }
 
-       dma_resv_unlock(tbo->base.resv);
 out:
+       drm_exec_fini(&exec);
        if (needs_rpm)
                xe_pm_runtime_put(xe);
 
-- 
2.44.0

Reply via email to