This should no longer be necessary, TTM doesn't lock the BO without a reference any more.
Only compile tested! Signed-off-by: Christian König <christian.koe...@amd.com> --- drivers/gpu/drm/xe/xe_bo.c | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 9411114c6d5c..250ebd9ff184 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1409,31 +1409,6 @@ static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo, static void __xe_bo_vunmap(struct xe_bo *bo); -/* - * TODO: Move this function to TTM so we don't rely on how TTM does its - * locking, thereby abusing TTM internals. - */ -static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo) -{ - struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); - bool locked; - - xe_assert(xe, !kref_read(&ttm_bo->base.refcount)); - - /* - * We can typically only race with TTM trylocking under the - * lru_lock, which will immediately be unlocked again since - * the ttm_bo refcount is zero at this point. So trylocking *should* - * always succeed here, as long as we hold the lru lock. - */ - spin_lock(&ttm_bo->bdev->lru_lock); - locked = dma_resv_trylock(ttm_bo->base.resv); - spin_unlock(&ttm_bo->bdev->lru_lock); - xe_assert(xe, locked); - - return locked; -} - static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) { struct dma_resv_iter cursor; @@ -1454,8 +1429,11 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo) if (ttm_bo->base.resv != &ttm_bo->base._resv) return; - if (!xe_ttm_bo_lock_in_destructor(ttm_bo)) - return; + /* + * This should never fail since there are no other references to the BO + * any more. + */ + WARN_ON(!dma_resv_trylock(ttm_bo->base.resv)); /* * Scrub the preempt fences if any. The unbind fence is already -- 2.43.0