Introduce xe_svm_ranges_zap_ptes_in_range(), a function to zap page table entries (PTEs) for all SVM ranges within a user-specified address range.
-v2 (Matthew Brost) Lock should be called even for tlb_invalidation v3(Matthew Brost) - Update comment - s/notifier->itree.start/drm_gpusvm_notifier_start - s/notifier->itree.last + 1/drm_gpusvm_notifier_end - use WRITE_ONCE Cc: Matthew Brost <matthew.br...@intel.com> Signed-off-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com> Reviewed-by: Matthew Brost <matthew.br...@intel.com> --- drivers/gpu/drm/xe/xe_pt.c | 14 ++++++++++- drivers/gpu/drm/xe/xe_svm.c | 50 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_svm.h | 8 ++++++ 3 files changed, 71 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index ba7a50bf3a2d..bf50a821853e 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -950,7 +950,19 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm, struct xe_pt *pt = vm->pt_root[tile->id]; u8 pt_mask = (range->tile_present & ~range->tile_invalidated); - xe_svm_assert_in_notifier(vm); + /* + * Locking rules: + * + * - notifier_lock (write): full protection against page table changes + * and MMU notifier invalidations. + * + * - notifier_lock (read) + vm_lock (write): combined protection against + * invalidations and concurrent page table modifications. (e.g., madvise) + * + */ + lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) || + (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && + lockdep_is_held_type(&vm->lock, 0))); if (!(pt_mask & BIT(tile->id))) return false; diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index ce42100cb753..c2306000f15e 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -1031,6 +1031,56 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, return err; } +/** + * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range + * @vm: Pointer to the xe_vm structure + * @start: Start of the input range + * @end: End of the input range + * + * This function removes the page table entries (PTEs) associated + * with the svm ranges within the given input start and end + * + * Return: tile_mask for which gt's need to be tlb invalidated. + */ +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end) +{ + struct drm_gpusvm_notifier *notifier; + struct xe_svm_range *range; + u64 adj_start, adj_end; + struct xe_tile *tile; + u8 tile_mask = 0; + u8 id; + + lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) && + lockdep_is_held_type(&vm->lock, 0)); + + drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) { + struct drm_gpusvm_range *r = NULL; + + adj_start = max(start, drm_gpusvm_notifier_start(notifier)); + adj_end = min(end, drm_gpusvm_notifier_end(notifier)); + drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) { + range = to_xe_range(r); + for_each_tile(tile, vm->xe, id) { + if (xe_pt_zap_ptes_range(tile, vm, range)) { + tile_mask |= BIT(id); + /* + * WRITE_ONCE pairs with READ_ONCE in + * xe_vm_has_valid_gpu_mapping(). + * Must not fail after setting + * tile_invalidated and before + * TLB invalidation. + */ + WRITE_ONCE(range->tile_invalidated, + range->tile_invalidated | BIT(id)); + } + } + } + } + + return tile_mask; +} + #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile) diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index 184b3f4f0b2a..046a9c4e95c2 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -92,6 +92,8 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *v void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end); +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end); + /** * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping * @range: SVM range @@ -310,6 +312,12 @@ void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end) { } +static inline +u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end) +{ + return 0; +} + #define xe_svm_assert_in_notifier(...) do {} while (0) #define xe_svm_range_has_dma_mapping(...) false -- 2.34.1