On Mon, Apr 07, 2025 at 08:29:11AM +0100, Matthew Auld wrote: > On 04/04/2025 18:02, Matthew Brost wrote: > > On Fri, Apr 04, 2025 at 09:19:34AM +0100, Matthew Auld wrote: > > > On 03/04/2025 22:25, Matthew Brost wrote: > > > > On Fri, Mar 28, 2025 at 06:10:36PM +0000, Matthew Auld wrote: > > > > > We now use the same notifier lock for SVM and userptr, with that we > > > > > can > > > > > combine xe_pt_userptr_pre_commit and xe_pt_svm_pre_commit. > > > > > > > > > > Suggested-by: Matthew Brost <matthew.br...@intel.com> > > > > > Signed-off-by: Matthew Auld <matthew.a...@intel.com> > > > > > Cc: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com> > > > > > Cc: Thomas Hellström <thomas.hellst...@linux.intel.com> > > > > > --- > > > > > drivers/gpu/drm/xe/xe_pt.c | 95 > > > > > +++++++++++++------------------------- > > > > > 1 file changed, 33 insertions(+), 62 deletions(-) > > > > > > > > > > diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c > > > > > index b097c91e2e02..947b82aa19a6 100644 > > > > > --- a/drivers/gpu/drm/xe/xe_pt.c > > > > > +++ b/drivers/gpu/drm/xe/xe_pt.c > > > > > @@ -1396,7 +1396,7 @@ static int op_check_userptr(struct xe_vm *vm, > > > > > struct xe_vma_op *op, > > > > > return err; > > > > > } > > > > > -static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update > > > > > *pt_update) > > > > > +static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update > > > > > *pt_update) > > > > > { > > > > > struct xe_vm *vm = pt_update->vops->vm; > > > > > struct xe_vma_ops *vops = pt_update->vops; > > > > > @@ -1409,55 +1409,40 @@ static int xe_pt_userptr_pre_commit(struct > > > > > xe_migrate_pt_update *pt_update) > > > > > if (err) > > > > > return err; > > > > > - down_read(&vm->svm.gpusvm.notifier_lock); > > > > > + drm_gpusvm_notifier_lock(&vm->svm.gpusvm); > > > > > > > > Also any reason not use xe_svm_notifier_lock / xe_svm_notifier_unlock > > > > wrappers? > > > > > > Just that those are hidden behind CONFIG_DRM_XE_GPUSVM, so looks possible > > > to > > > disable svm, but then I think we should still have working userptr. > > > Should I > > > pull xe_svm_notifier_lock out of CONFIG_DRM_XE_GPUSVM? > > > > > > > How would userptr work without GPU SVM if we building it on top of it? > > Here I mean CONFIG_DRM_*XE*_GPUSVM, which looks to be specific to everything > around xe_svm.c AFAICT, which useptr doesn't touch. We do now ofc require > DRM_GPUSVM by default, but that's a different config. >
Ah ok, that makes sense I guess. We could pull xe_svm_notifier_lock / xe_svm_notifier_unlock out these functions to not be hidden behind CONFIG_DRM_*XE*_GPUSVM though. Matt > > > > Matt > > > > > > > > > > Matt > > > > > > > > > list_for_each_entry(op, &vops->list, link) { > > > > > - err = op_check_userptr(vm, op, pt_update_ops); > > > > > - if (err) { > > > > > - up_read(&vm->svm.gpusvm.notifier_lock); > > > > > - break; > > > > > + if (pt_update_ops->needs_svm_lock) { > > > > > +#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) > > > > > + struct xe_svm_range *range = > > > > > op->map_range.range; > > > > > + > > > > > + if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) > > > > > + continue; > > > > > + > > > > > + xe_svm_range_debug(range, "PRE-COMMIT"); > > > > > + > > > > > + xe_assert(vm->xe, > > > > > + > > > > > xe_vma_is_cpu_addr_mirror(op->map_range.vma)); > > > > > + xe_assert(vm->xe, op->subop == > > > > > XE_VMA_SUBOP_MAP_RANGE); > > > > > + > > > > > + if (!xe_svm_range_pages_valid(range)) { > > > > > + xe_svm_range_debug(range, "PRE-COMMIT - > > > > > RETRY"); > > > > > + > > > > > drm_gpusvm_notifier_unlock(&vm->svm.gpusvm); > > > > > + return -EAGAIN; > > > > > + } > > > > > +#endif > > > > > + } else { > > > > > + err = op_check_userptr(vm, op, pt_update_ops); > > > > > + if (err) { > > > > > + > > > > > drm_gpusvm_notifier_unlock(&vm->svm.gpusvm); > > > > > + break; > > > > > + } > > > > > } > > > > > } > > > > > return err; > > > > > } > > > > > -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) > > > > > -static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update > > > > > *pt_update) > > > > > -{ > > > > > - struct xe_vm *vm = pt_update->vops->vm; > > > > > - struct xe_vma_ops *vops = pt_update->vops; > > > > > - struct xe_vma_op *op; > > > > > - int err; > > > > > - > > > > > - err = xe_pt_pre_commit(pt_update); > > > > > - if (err) > > > > > - return err; > > > > > - > > > > > - xe_svm_notifier_lock(vm); > > > > > - > > > > > - list_for_each_entry(op, &vops->list, link) { > > > > > - struct xe_svm_range *range = op->map_range.range; > > > > > - > > > > > - if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) > > > > > - continue; > > > > > - > > > > > - xe_svm_range_debug(range, "PRE-COMMIT"); > > > > > - > > > > > - xe_assert(vm->xe, > > > > > xe_vma_is_cpu_addr_mirror(op->map_range.vma)); > > > > > - xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE); > > > > > - > > > > > - if (!xe_svm_range_pages_valid(range)) { > > > > > - xe_svm_range_debug(range, "PRE-COMMIT - RETRY"); > > > > > - xe_svm_notifier_unlock(vm); > > > > > - return -EAGAIN; > > > > > - } > > > > > - } > > > > > - > > > > > - return 0; > > > > > -} > > > > > -#endif > > > > > - > > > > > struct invalidation_fence { > > > > > struct xe_gt_tlb_invalidation_fence base; > > > > > struct xe_gt *gt; > > > > > @@ -2255,22 +2240,12 @@ static const struct xe_migrate_pt_update_ops > > > > > migrate_ops = { > > > > > .pre_commit = xe_pt_pre_commit, > > > > > }; > > > > > -static const struct xe_migrate_pt_update_ops userptr_migrate_ops = { > > > > > +static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops > > > > > = { > > > > > .populate = xe_vm_populate_pgtable, > > > > > .clear = xe_migrate_clear_pgtable_callback, > > > > > - .pre_commit = xe_pt_userptr_pre_commit, > > > > > + .pre_commit = xe_pt_svm_userptr_pre_commit, > > > > > }; > > > > > -#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM) > > > > > -static const struct xe_migrate_pt_update_ops svm_migrate_ops = { > > > > > - .populate = xe_vm_populate_pgtable, > > > > > - .clear = xe_migrate_clear_pgtable_callback, > > > > > - .pre_commit = xe_pt_svm_pre_commit, > > > > > -}; > > > > > -#else > > > > > -static const struct xe_migrate_pt_update_ops svm_migrate_ops; > > > > > -#endif > > > > > - > > > > > /** > > > > > * xe_pt_update_ops_run() - Run PT update operations > > > > > * @tile: Tile of PT update operations > > > > > @@ -2296,10 +2271,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, > > > > > struct xe_vma_ops *vops) > > > > > struct xe_vma_op *op; > > > > > int err = 0, i; > > > > > struct xe_migrate_pt_update update = { > > > > > - .ops = pt_update_ops->needs_svm_lock ? > > > > > - &svm_migrate_ops : > > > > > - pt_update_ops->needs_userptr_lock ? > > > > > - &userptr_migrate_ops : > > > > > + .ops = pt_update_ops->needs_svm_lock || > > > > > pt_update_ops->needs_userptr_lock ? > > > > > + &svm_userptr_migrate_ops : > > > > > &migrate_ops, > > > > > .vops = vops, > > > > > .tile_id = tile->id, > > > > > @@ -2419,10 +2392,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, > > > > > struct xe_vma_ops *vops) > > > > > &ifence->base.base, > > > > > &mfence->base.base); > > > > > } > > > > > - if (pt_update_ops->needs_svm_lock) > > > > > - xe_svm_notifier_unlock(vm); > > > > > - if (pt_update_ops->needs_userptr_lock) > > > > > - up_read(&vm->svm.gpusvm.notifier_lock); > > > > > + if (pt_update_ops->needs_svm_lock || > > > > > pt_update_ops->needs_userptr_lock) > > > > > + drm_gpusvm_notifier_unlock(&vm->svm.gpusvm); > > > > > return fence; > > > > > -- > > > > > 2.48.1 > > > > > > > > >