We now use the same notifier lock for SVM and userptr, with that we can
combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit.

v2: (Matt B)
  - Re-use xe_svm_notifier_lock/unlock for userptr.
  - Combine svm/userptr handling further down into op_check_svm_userptr.

Suggested-by: Matthew Brost <matthew.br...@intel.com>
Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Cc: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>
Cc: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c       | 90 ++++++++++----------------------
 drivers/gpu/drm/xe/xe_pt_types.h |  2 -
 drivers/gpu/drm/xe/xe_svm.h      | 19 +++----
 3 files changed, 39 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index ff898e518afd..a7db65f9913e 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -1393,8 +1393,8 @@ static int vma_check_userptr(struct xe_vm *vm, struct 
xe_vma *vma,
        return 0;
 }
 
-static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
-                           struct xe_vm_pgtable_update_ops *pt_update)
+static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+                               struct xe_vm_pgtable_update_ops *pt_update)
 {
        int err = 0;
 
@@ -1419,6 +1419,24 @@ static int op_check_userptr(struct xe_vm *vm, struct 
xe_vma_op *op,
                err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
                                        pt_update);
                break;
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+       case DRM_GPUVA_OP_DRIVER:
+               if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+                       struct xe_svm_range *range = op->map_range.range;
+
+                       xe_svm_range_debug(range, "PRE-COMMIT");
+
+                       xe_assert(vm->xe,
+                                 xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+                       xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+
+                       if (!xe_svm_range_pages_valid(range)) {
+                               xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+                               err = -EAGAIN;
+                       }
+               }
+               break;
+#endif
        default:
                drm_warn(&vm->xe->drm, "NOT POSSIBLE");
        }
@@ -1426,7 +1444,7 @@ static int op_check_userptr(struct xe_vm *vm, struct 
xe_vma_op *op,
        return err;
 }
 
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
+static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
 {
        struct xe_vm *vm = pt_update->vops->vm;
        struct xe_vma_ops *vops = pt_update->vops;
@@ -1439,12 +1457,12 @@ static int xe_pt_userptr_pre_commit(struct 
xe_migrate_pt_update *pt_update)
        if (err)
                return err;
 
-       down_read(&vm->svm.gpusvm.notifier_lock);
+       xe_svm_notifier_lock(vm);
 
        list_for_each_entry(op, &vops->list, link) {
-               err = op_check_userptr(vm, op, pt_update_ops);
+               err = op_check_svm_userptr(vm, op, pt_update_ops);
                if (err) {
-                       up_read(&vm->svm.gpusvm.notifier_lock);
+                       xe_svm_notifier_unlock(vm);
                        break;
                }
        }
@@ -1452,42 +1470,6 @@ static int xe_pt_userptr_pre_commit(struct 
xe_migrate_pt_update *pt_update)
        return err;
 }
 
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
-       struct xe_vm *vm = pt_update->vops->vm;
-       struct xe_vma_ops *vops = pt_update->vops;
-       struct xe_vma_op *op;
-       int err;
-
-       err = xe_pt_pre_commit(pt_update);
-       if (err)
-               return err;
-
-       xe_svm_notifier_lock(vm);
-
-       list_for_each_entry(op, &vops->list, link) {
-               struct xe_svm_range *range = op->map_range.range;
-
-               if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
-                       continue;
-
-               xe_svm_range_debug(range, "PRE-COMMIT");
-
-               xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
-               xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
-
-               if (!xe_svm_range_pages_valid(range)) {
-                       xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
-                       xe_svm_notifier_unlock(vm);
-                       return -EAGAIN;
-               }
-       }
-
-       return 0;
-}
-#endif
-
 struct invalidation_fence {
        struct xe_gt_tlb_invalidation_fence base;
        struct xe_gt *gt;
@@ -1858,7 +1840,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct 
xe_tile *tile,
                                                 xe_vma_start(vma),
                                                 xe_vma_end(vma));
                ++pt_update_ops->current_op;
-               pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+               pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
 
                /*
                 * If rebind, we have to invalidate TLB on !LR vms to invalidate
@@ -1966,7 +1948,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
        xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
                                         xe_vma_end(vma));
        ++pt_update_ops->current_op;
-       pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+       pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
        pt_update_ops->needs_invalidation = true;
 
        xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
@@ -2289,22 +2271,12 @@ static const struct xe_migrate_pt_update_ops 
migrate_ops = {
        .pre_commit = xe_pt_pre_commit,
 };
 
-static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
        .populate = xe_vm_populate_pgtable,
        .clear = xe_migrate_clear_pgtable_callback,
-       .pre_commit = xe_pt_userptr_pre_commit,
+       .pre_commit = xe_pt_svm_userptr_pre_commit,
 };
 
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
-       .populate = xe_vm_populate_pgtable,
-       .clear = xe_migrate_clear_pgtable_callback,
-       .pre_commit = xe_pt_svm_pre_commit,
-};
-#else
-static const struct xe_migrate_pt_update_ops svm_migrate_ops;
-#endif
-
 /**
  * xe_pt_update_ops_run() - Run PT update operations
  * @tile: Tile of PT update operations
@@ -2331,9 +2303,7 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct 
xe_vma_ops *vops)
        int err = 0, i;
        struct xe_migrate_pt_update update = {
                .ops = pt_update_ops->needs_svm_lock ?
-                       &svm_migrate_ops :
-                       pt_update_ops->needs_userptr_lock ?
-                       &userptr_migrate_ops :
+                       &svm_userptr_migrate_ops :
                        &migrate_ops,
                .vops = vops,
                .tile_id = tile->id,
@@ -2455,8 +2425,6 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct 
xe_vma_ops *vops)
 
        if (pt_update_ops->needs_svm_lock)
                xe_svm_notifier_unlock(vm);
-       if (pt_update_ops->needs_userptr_lock)
-               up_read(&vm->svm.gpusvm.notifier_lock);
 
        return fence;
 
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 69eab6f37cfe..dc0b2d8c3af8 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -106,8 +106,6 @@ struct xe_vm_pgtable_update_ops {
        u32 current_op;
        /** @needs_svm_lock: Needs SVM lock */
        bool needs_svm_lock;
-       /** @needs_userptr_lock: Needs userptr lock */
-       bool needs_userptr_lock;
        /** @needs_invalidation: Needs invalidation */
        bool needs_invalidation;
        /**
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 923cb074d0cb..696496b52465 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -87,15 +87,6 @@ static inline bool xe_svm_range_has_dma_mapping(struct 
xe_svm_range *range)
        return range->base.pages.flags.has_dma_mapping;
 }
 
-#define xe_svm_assert_in_notifier(vm__) \
-       lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__)     \
-       drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__)   \
-       drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
-
 #else
 #include <linux/interval_tree.h>
 
@@ -187,4 +178,14 @@ static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
 {
 }
 #endif
+
+#define xe_svm_assert_in_notifier(vm__) \
+       lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__)     \
+       drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__)   \
+       drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
 #endif
-- 
2.49.0

Reply via email to