When a user unmaps a userq VA, the driver must ensure
the queue has no in-flight jobs. If there is pending work,
the kernel should wait for the attached eviction (bookkeeping)
fence to signal before deleting the mapping.

Suggested-by: Christian König <[email protected]>
Signed-off-by: Prike Liang <[email protected]>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 29 ++++++++++++++++++++++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    | 11 +++++++++
 3 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index f90f384d55b6..fb838651c91b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -675,7 +675,6 @@ amdgpu_userq_create(struct drm_file *filp, union 
drm_amdgpu_userq *args)
                }
        }
 
-
        args->out.queue_id = qid;
 
 unlock:
@@ -1212,3 +1211,31 @@ int 
amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
        mutex_unlock(&adev->userq_mutex);
        return ret;
 }
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+                               struct amdgpu_bo_va_mapping *mapping)
+{
+       u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+       struct amdgpu_bo_va *bo_va = mapping->bo_va;
+       struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
+       int ret;
+
+       if (!ip_mask)
+               return 0;
+
+       /**
+        * The userq VA mapping reservation should include the eviction fence, 
if the eviction fence
+        * can't signal successfully during unmapping, then driver will warn to 
flag this improper unmap
+        * of the userq VA.
+        * Note: The eviction fence may be attached to different BOs, and this 
unmap is only for one kind
+        * of userq VAs, so at this point suppose the eviction fence is always 
unsignaled.
+        */
+       if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
+               ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, 
true, MAX_SCHEDULE_TIMEOUT);
+               if (ret <= 0)
+                       return -EBUSY;
+       }
+
+       return 0;
+
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index 27066906526a..865e5d4f61b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -148,4 +148,6 @@ bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
 int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr);
 int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
                        struct amdgpu_usermode_queue *queue);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+                               struct amdgpu_bo_va_mapping *mapping);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f042372d9f2e..c04eef1e7451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1929,6 +1929,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
        struct amdgpu_bo_va_mapping *mapping;
        struct amdgpu_vm *vm = bo_va->base.vm;
        bool valid = true;
+       int r;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
 
@@ -1949,6 +1950,16 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                        return -ENOENT;
        }
 
+       /* It's unlikely to happen that the mapping userq hasn't been idled
+        * during user requests GEM unmap IOCTL except for forcing the unmap
+        * from user space.
+        */
+       if (bo_va->queue_refcount) {
+               r = amdgpu_userq_gem_va_unmap_validate(adev, mapping);
+               if (unlikely(r == -EBUSY))
+                       dev_warn(adev->dev, "Here should be an improper unmap 
request from user space\n");
+       }
+
        list_del(&mapping->list);
        amdgpu_vm_it_remove(mapping, &vm->va);
        mapping->bo_va = NULL;
-- 
2.34.1

Reply via email to