[AMD Official Use Only - AMD Internal Distribution Only]

+       user_addr = ALIGN_DOWN(addr >> AMDGPU_GPU_PAGE_SHIFT, 0x100);

This is a PAGE_ALIGN_DOWN() macro can simplify your code logic.

Best Regards,
Kevin

-----Original Message-----
From: amd-gfx <amd-gfx-boun...@lists.freedesktop.org> On Behalf Of Prike Liang
Sent: Friday, May 30, 2025 15:55
To: amd-gfx@lists.freedesktop.org
Cc: Deucher, Alexander <alexander.deuc...@amd.com>; Koenig, Christian 
<christian.koe...@amd.com>; Lazar, Lijo <lijo.la...@amd.com>; Liang, Prike 
<prike.li...@amd.com>
Subject: [PATCH 5/9] drm/amdgpu: add userq object va track helpers

Add the userq object virtual address get(),mapped() and put() helpers for 
tracking the userq obj va address usage.

Signed-off-by: Prike Liang <prike.li...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 120 +++++++++++++++++++++-  
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h |  11 ++
 2 files changed, 130 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 32e88064bdea..3854b1696d4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -79,6 +79,122 @@ int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, 
u64 addr,
        return -EINVAL;
 }

+int amdgpu_userq_buffer_va_get(struct amdgpu_vm *vm, u64 addr) {
+       struct amdgpu_bo_va_mapping *mapping;
+       u64 user_addr;
+       int r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       user_addr = ALIGN_DOWN(addr >> AMDGPU_GPU_PAGE_SHIFT, 0x100);
+
+       r = amdgpu_bo_reserve(vm->root.bo, false);
+       if (r)
+               return r;
+
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       if (!mapping)
+               goto out_err;
+
+       /*
+        * Need to unify the following userq va reference.
+        *  mqd  bo
+        *  rptr bo
+        *  wptr bo
+        *  eop  bo
+        *  doorbell bo
+        */
+       //amdgpu_bo_ref(mapping->bo_va->base.bo);
+       mapping->bo_va->queue_refcount++;
+
+       amdgpu_bo_unreserve(vm->root.bo);
+       return 0;
+
+out_err:
+       amdgpu_bo_unreserve(vm->root.bo);
+       return -EINVAL;
+}
+
+bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr) {
+       struct amdgpu_bo_va_mapping *mapping;
+       u64 user_addr;
+       bool r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       user_addr = ALIGN_DOWN(addr >> AMDGPU_GPU_PAGE_SHIFT, 0x100);
+
+       if (amdgpu_bo_reserve(vm->root.bo, false))
+               return false;
+
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       if (!IS_ERR_OR_NULL(mapping) && mapping->bo_va->queue_refcount > 0)
+               r = true;
+       else
+               r = false;
+       amdgpu_bo_unreserve(vm->root.bo);
+
+       return r;
+}
+
+bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
+                       struct amdgpu_usermode_queue *queue) {
+
+       if (amdgpu_userq_buffer_va_mapped(vm, queue->doorbell_handle) ||
+               amdgpu_userq_buffer_va_mapped(vm, queue->queue_va) ||
+               amdgpu_userq_buffer_va_mapped(vm, queue->rptr_va) ||
+               amdgpu_userq_buffer_va_mapped(vm, queue->wptr_va))
+               return true;
+       else
+               return false;
+}
+
+int amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr) {
+       struct amdgpu_bo_va_mapping *mapping;
+       u64 user_addr;
+       int r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       user_addr = ALIGN_DOWN(addr >> AMDGPU_GPU_PAGE_SHIFT, 0x100);
+
+       r = amdgpu_bo_reserve(vm->root.bo, false);
+       if (r)
+               return r;
+
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       if (!mapping)
+               goto out_err;
+       /*
+        * As the GEM userq bo will be unmapped by amdgpu_vm_bo_unmap() which is
+        * invoked before userq is destroyed. So if we want to 
reference/dereference
+        * the userq vm bo, then ensure the userq vm bo dereferenced  before
+        * amdgpu_vm_bo_unmap() or fallback to unmap the userq vm bo at 
amdgpu_userq_destroy().
+        */
+       //amdgpu_bo_unref(&mapping->bo_va->base.bo);
+
+       mapping->bo_va->queue_refcount--;
+
+       amdgpu_bo_unreserve(vm->root.bo);
+       return 0;
+
+out_err:
+       amdgpu_bo_unreserve(vm->root.bo);
+       return -EINVAL;
+}
+
+int amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
+                       struct amdgpu_usermode_queue *queue) {
+       amdgpu_userq_buffer_va_put(vm, queue->doorbell_handle);
+       amdgpu_userq_buffer_va_put(vm, queue->queue_va);
+       amdgpu_userq_buffer_va_put(vm, queue->rptr_va);
+       amdgpu_userq_buffer_va_put(vm, queue->wptr_va);
+
+       return 0;
+}
+
 static int
 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
                          struct amdgpu_usermode_queue *queue) @@ -451,6 +567,9 
@@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
        queue->queue_type = args->in.ip_type;
        queue->vm = &fpriv->vm;
        queue->priority = priority;
+       queue->queue_va = args->in.queue_va;
+       queue->rptr_va = args->in.rptr_va;
+       queue->wptr_va = args->in.wptr_va;

        db_info.queue_type = queue->queue_type;
        db_info.doorbell_handle = queue->doorbell_handle; @@ -481,7 +600,6 @@ 
amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
                goto unlock;
        }

-
        qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, 
GFP_KERNEL);
        if (qid < 0) {
                drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index 375fba639c94..65f9bd91ac73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -52,6 +52,10 @@ struct amdgpu_usermode_queue {
        enum amdgpu_userq_state state;
        uint64_t                doorbell_handle;
        uint64_t                doorbell_index;
+       uint64_t                queue_va;
+       uint64_t                rptr_va;
+       uint64_t                wptr_va;
+
        uint64_t                flags;
        struct amdgpu_mqd_prop  *userq_prop;
        struct amdgpu_userq_mgr *userq_mgr;
@@ -134,4 +138,11 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct 
amdgpu_device *adev,

 int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
                                        u64 expected_size);
+int amdgpu_userq_buffer_va_get(struct amdgpu_vm *vm, u64 addr); bool
+amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr); bool
+amdgpu_userq_buffer_vas_mapped(struct amdgpu_vm *vm,
+                       struct amdgpu_usermode_queue *queue); int
+amdgpu_userq_buffer_va_put(struct amdgpu_vm *vm, u64 addr); int
+amdgpu_userq_buffer_vas_put(struct amdgpu_vm *vm,
+                       struct amdgpu_usermode_queue *queue);
 #endif
--
2.34.1

Reply via email to