It needs to validate the userq object virtual address whether
it is validated in vm mapping.

Signed-off-by: Prike Liang <prike.li...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c  | 44 ++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h  |  2 +
 drivers/gpu/drm/amd/amdgpu/mes_userqueue.c | 25 ++++++++++++
 3 files changed, 71 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index acaea4416ed2..a20f7ccbe98e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -31,6 +31,8 @@
 #include "amdgpu_userq.h"
 #include "amdgpu_userq_fence.h"
 
+#define amdgpu_userq_va_align(va) (va & AMDGPU_GMC_HOLE_MASK) >> 
AMDGPU_GPU_PAGE_SHIFT
+
 u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
 {
        int i;
@@ -44,6 +46,36 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device 
*adev)
        return userq_ip_mask;
 }
 
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+                               u64 expected_size)
+{
+       struct amdgpu_bo_va_mapping *va_map;
+       u64 user_addr;
+       u64 size;
+       int r;
+
+       user_addr = amdgpu_userq_va_align(addr);
+       size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+       r = amdgpu_bo_reserve(vm->root.bo, false);
+       if (r)
+               return r;
+
+       va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+       if (!va_map)
+               goto out_err;
+       /* Only validate the userq whether resident in the VM mapping range */
+       if (user_addr >= va_map->start &&
+           (size != 0 && user_addr + size - 1 <= va_map->last)) {
+               amdgpu_bo_unreserve(vm->root.bo);
+               return 0;
+       }
+
+out_err:
+       amdgpu_bo_unreserve(vm->root.bo);
+       return -EINVAL;
+}
+
 static int
 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
                          struct amdgpu_usermode_queue *queue)
@@ -391,6 +423,14 @@ amdgpu_userq_create(struct drm_file *filp, union 
drm_amdgpu_userq *args)
                r = -EINVAL;
                goto unlock;
        }
+       /* Validate the userq virtual address.*/
+       if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, 
args->in.queue_size) ||
+           amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, 
PAGE_SIZE) ||
+           amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, 
PAGE_SIZE)) {
+               drm_file_err(uq_mgr->file, "Usermode queue input virt address 
is invalid\n");
+               r = -EINVAL;
+               goto unlock;
+       }
 
        queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
        if (!queue) {
@@ -500,6 +540,10 @@ static int amdgpu_userq_input_args_validate(struct 
drm_device *dev,
                        return -EINVAL;
                }
 
+               /* As the queue_va and wptr_va etc are the useq cpu access 
address and they
+                * are also come from the user space IOCTL request, so they may 
need a access_ok()
+                * check before trying accessing them.
+                */
                if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
                    args->in.queue_va == 0 ||
                    args->in.queue_size == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index ec040c2fd6c9..704935ca0c36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -132,4 +132,6 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct 
amdgpu_device *adev,
 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
                                                   u32 idx);
 
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+                       u64 expected_size);
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c 
b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index dbacdfcb6f7b..4615d3fba530 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -206,6 +206,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
*uq_mgr,
        struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
        struct drm_amdgpu_userq_in *mqd_user = args_in;
        struct amdgpu_mqd_prop *userq_props;
+       struct amdgpu_gfx_shadow_info shadow_info;
        int r;
 
        /* Structure to initialize MQD for userqueue using generic MQD init 
function */
@@ -231,6 +232,8 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
*uq_mgr,
        userq_props->doorbell_index = queue->doorbell_index;
        userq_props->fence_address = queue->fence_drv->gpu_addr;
 
+       if (adev->gfx.funcs->get_gfx_shadow_info)
+               adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
        if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
                struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
 
@@ -247,6 +250,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
*uq_mgr,
                        goto free_mqd;
                }
 
+               if (amdgpu_userq_input_va_validate(queue->vm, 
compute_mqd->eop_va,
+                                       max_t(u32, PAGE_SIZE, 
AMDGPU_GPU_PAGE_SIZE))) {
+                       drm_file_err(uq_mgr->file, "EOP VA is invalid\n");
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
+
                userq_props->eop_gpu_addr = compute_mqd->eop_va;
                userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
                userq_props->hqd_queue_priority = 
AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -274,6 +284,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
*uq_mgr,
                userq_props->csa_addr = mqd_gfx_v11->csa_va;
                userq_props->tmz_queue =
                        mqd_user->flags & 
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+               if (amdgpu_userq_input_va_validate(queue->vm, 
mqd_gfx_v11->shadow_va,
+                                       shadow_info.shadow_size)) {
+                       drm_file_err(uq_mgr->file, "shadow VA is invalid\n");
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
+
                kfree(mqd_gfx_v11);
        } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
                struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -291,6 +309,13 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr 
*uq_mgr,
                        goto free_mqd;
                }
 
+               if (amdgpu_userq_input_va_validate(queue->vm, 
mqd_sdma_v11->csa_va,
+                                       shadow_info.csa_size)) {
+                       drm_file_err(uq_mgr->file, "CSA VA is invalid\n");
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
+
                userq_props->csa_addr = mqd_sdma_v11->csa_va;
                kfree(mqd_sdma_v11);
        }
-- 
2.34.1

Reply via email to