We now use the same notifier lock for SVM and userptr, with that we can
combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit.

v2: (Matt B)
  - Re-use xe_svm_notifier_lock/unlock for userptr.
  - Combine svm/userptr handling further down into op_check_svm_userptr.

Suggested-by: Matthew Brost <matthew.br...@intel.com>
Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c       | 123 +++++++++++--------------------
 drivers/gpu/drm/xe/xe_pt_types.h |   2 -
 2 files changed, 44 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ecd9b0be4997..16024353c165 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1416,8 +1416,8 @@ static int vma_check_userptr(struct xe_vm *vm, struct 
xe_vma *vma,
        return 0;
 }
 
-static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
-                           struct xe_vm_pgtable_update_ops *pt_update)
+static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+                               struct xe_vm_pgtable_update_ops *pt_update)
 {
        int err = 0;
 
@@ -1439,9 +1439,40 @@ static int op_check_userptr(struct xe_vm *vm, struct 
xe_vma_op *op,
        case DRM_GPUVA_OP_UNMAP:
                break;
        case DRM_GPUVA_OP_PREFETCH:
-               err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
-                                       pt_update);
+               if 
(xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) {
+                       struct xe_svm_range *range = op->map_range.range;
+                       unsigned long i;
+
+                       xe_assert(vm->xe,
+                                 
xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
+                       xa_for_each(&op->prefetch_range.range, i, range) {
+                               xe_svm_range_debug(range, "PRE-COMMIT");
+
+                               if (!xe_svm_range_pages_valid(range)) {
+                                       xe_svm_range_debug(range, "PRE-COMMIT - 
RETRY");
+                                       return -ENODATA;
+                               }
+                       }
+               } else {
+                       err = vma_check_userptr(vm, 
gpuva_to_vma(op->base.prefetch.va), pt_update);
+               }
                break;
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+       case DRM_GPUVA_OP_DRIVER:
+               if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+                       struct xe_svm_range *range = op->map_range.range;
+
+                       xe_assert(vm->xe, 
xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+
+                       xe_svm_range_debug(range, "PRE-COMMIT");
+
+                       if (!xe_svm_range_pages_valid(range)) {
+                               xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+                               return -EAGAIN;
+                       }
+               }
+               break;
+#endif
        default:
                drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }
@@ -1449,7 +1480,7 @@ static int op_check_userptr(struct xe_vm *vm, struct 
xe_vma_op *op,
        return err;
 }
 
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
 {
        struct xe_vm *vm = pt_update->vops->vm;
        struct xe_vma_ops *vops = pt_update->vops;
@@ -1462,12 +1493,12 @@ static int xe_pt_userptr_pre_commit(struct 
xe_migrate_pt_update *pt_update)
        if (err)
                return err;
 
-       down_read(&vm->svm.gpusvm.notifier_lock);
+       xe_svm_notifier_lock(vm);
 
        list_for_each_entry(op, &vops->list, link) {
-               err = op_check_userptr(vm, op, pt_update_ops);
+               err = op_check_svm_userptr(vm, op, pt_update_ops);
                if (err) {
-                       up_read(&vm->svm.gpusvm.notifier_lock);
+                       xe_svm_notifier_unlock(vm);
                        break;
                }
        }
@@ -1475,58 +1506,6 @@ static int xe_pt_userptr_pre_commit(struct 
xe_migrate_pt_update *pt_update)
        return err;
 }
 
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
-       struct xe_vm *vm = pt_update->vops->vm;
-       struct xe_vma_ops *vops = pt_update->vops;
-       struct xe_vma_op *op;
-       unsigned long i;
-       int err;
-
-       err = xe_pt_pre_commit(pt_update);
-       if (err)
-               return err;
-
-       xe_svm_notifier_lock(vm);
-
-       list_for_each_entry(op, &vops->list, link) {
-               struct xe_svm_range *range = NULL;
-
-               if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
-                       continue;
-
-               if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
-                       xe_assert(vm->xe,
-                                 
xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
-                       xa_for_each(&op->prefetch_range.range, i, range) {
-                               xe_svm_range_debug(range, "PRE-COMMIT");
-
-                               if (!xe_svm_range_pages_valid(range)) {
-                                       xe_svm_range_debug(range, "PRE-COMMIT - 
RETRY");
-                                       xe_svm_notifier_unlock(vm);
-                                       return -ENODATA;
-                               }
-                       }
-               } else {
-                       xe_assert(vm->xe, 
xe_vma_is_cpu_addr_mirror(op->map_range.vma));
-                       xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
-                       range = op->map_range.range;
-
-                       xe_svm_range_debug(range, "PRE-COMMIT");
-
-                       if (!xe_svm_range_pages_valid(range)) {
-                               xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
-                               xe_svm_notifier_unlock(vm);
-                               return -EAGAIN;
-                       }
-               }
-       }
-
-       return 0;
-}
-#endif
-
 struct xe_pt_stage_unbind_walk {
        /** @base: The pagewalk base-class. */
        struct xe_pt_walk base;
@@ -1828,7 +1807,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct 
xe_tile *tile,
                                                 xe_vma_start(vma),
                                                 xe_vma_end(vma));
                ++pt_update_ops->current_op;
-               pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+               pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
 
                /*
                 * If rebind, we have to invalidate TLB on !LR vms to invalidate
@@ -1936,7 +1915,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
        xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
                                         xe_vma_end(vma));
        ++pt_update_ops->current_op;
-       pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+       pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
        pt_update_ops->needs_invalidation = true;
 
        xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
@@ -2323,22 +2302,12 @@ static const struct xe_migrate_pt_update_ops 
migrate_ops = {
        .pre_commit = xe_pt_pre_commit,
 };
 
-static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
        .populate = xe_vm_populate_pgtable,
        .clear = xe_migrate_clear_pgtable_callback,
-       .pre_commit = xe_pt_userptr_pre_commit,
+       .pre_commit = xe_pt_svm_userptr_pre_commit,
 };
 
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
-       .populate = xe_vm_populate_pgtable,
-       .clear = xe_migrate_clear_pgtable_callback,
-       .pre_commit = xe_pt_svm_pre_commit,
-};
-#else
-static const struct xe_migrate_pt_update_ops svm_migrate_ops;
-#endif
-
 /**
  * xe_pt_update_ops_run() - Run PT update operations
  * @tile: Tile of PT update operations
@@ -2365,9 +2334,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct 
xe_vma_ops *vops)
        int err = 0, i;
        struct xe_migrate_pt_update update = {
                .ops = pt_update_ops->needs_svm_lock ?
-                       &svm_migrate_ops :
-                       pt_update_ops->needs_userptr_lock ?
-                       &userptr_migrate_ops :
+                       &svm_userptr_migrate_ops :
                        &migrate_ops,
                .vops = vops,
                .tile_id = tile->id,
@@ -2502,8 +2469,6 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct 
xe_vma_ops *vops)
 
        if (pt_update_ops->needs_svm_lock)
                xe_svm_notifier_unlock(vm);
-       if (pt_update_ops->needs_userptr_lock)
-               up_read(&vm->svm.gpusvm.notifier_lock);
 
        xe_gt_tlb_inval_job_put(mjob);
        xe_gt_tlb_inval_job_put(ijob);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 17cdd7c7e9f5..881f01e14db8 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -105,8 +105,6 @@ struct xe_vm_pgtable_update_ops {
        u32 current_op;
        /** @needs_svm_lock: Needs SVM lock */
        bool needs_svm_lock;
-       /** @needs_userptr_lock: Needs userptr lock */
-       bool needs_userptr_lock;
        /** @needs_invalidation: Needs invalidation */
        bool needs_invalidation;
        /**
-- 
2.50.1

Reply via email to