change the DRM_ERROR to drm_file_err to ad process name and pid to the logging.
Signed-off-by: Sunil Khatri <sunil.kha...@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 52 +++++++++++-------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c index ea43bcd63feb..4957c7b04fe8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c @@ -123,25 +123,25 @@ int amdgpu_userqueue_create_object(struct amdgpu_userq_mgr *uq_mgr, r = amdgpu_bo_create(adev, &bp, &userq_obj->obj); if (r) { - DRM_ERROR("Failed to allocate BO for userqueue (%d)", r); + drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r); return r; } r = amdgpu_bo_reserve(userq_obj->obj, true); if (r) { - DRM_ERROR("Failed to reserve BO to map (%d)", r); + drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r); goto free_obj; } r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo); if (r) { - DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r); + drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r); goto unresv; } r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr); if (r) { - DRM_ERROR("Failed to map BO for userqueue (%d)", r); + drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r); goto unresv; } @@ -177,7 +177,7 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle); if (gobj == NULL) { - DRM_ERROR("Can't find GEM object for doorbell\n"); + drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n"); return -EINVAL; } @@ -187,13 +187,15 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, /* Pin the BO before generating the index, unpin in queue destroy */ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL); if (r) { - DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n"); + drm_file_err(uq_mgr->file, + "[Usermode queues] Failed to pin doorbell object\n"); goto unref_bo; } r = amdgpu_bo_reserve(db_obj->obj, true); if (r) { - DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n"); + drm_file_err(uq_mgr->file, + "[Usermode queues] Failed to pin doorbell object\n"); goto unpin_bo; } @@ -215,7 +217,8 @@ amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr, break; default: - DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type); + drm_file_err(uq_mgr->file, + "[Usermode queues] IP %d not support\n", db_info->queue_type); r = -EINVAL; goto unpin_bo; } @@ -282,7 +285,8 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) if (args->in.ip_type != AMDGPU_HW_IP_GFX && args->in.ip_type != AMDGPU_HW_IP_DMA && args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { - DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type); + drm_file_err(uq_mgr->file, + "Usermode queue doesn't support IP type %u\n", args->in.ip_type); return -EINVAL; } @@ -304,14 +308,16 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) uq_funcs = adev->userq_funcs[args->in.ip_type]; if (!uq_funcs) { - DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type); + drm_file_err(uq_mgr->file, + "Usermode queue is not supported for this IP (%u)\n", + args->in.ip_type); r = -EINVAL; goto unlock; } queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL); if (!queue) { - DRM_ERROR("Failed to allocate memory for queue\n"); + drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); r = -ENOMEM; goto unlock; } @@ -327,7 +333,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) /* Convert relative doorbell offset into absolute doorbell index */ index = amdgpu_userqueue_get_doorbell_index(uq_mgr, &db_info, filp); if (index == (uint64_t)-EINVAL) { - DRM_ERROR("Failed to get doorbell for queue\n"); + drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); kfree(queue); goto unlock; } @@ -336,13 +342,13 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC); r = amdgpu_userq_fence_driver_alloc(adev, queue); if (r) { - DRM_ERROR("Failed to alloc fence driver\n"); + drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n"); goto unlock; } r = uq_funcs->mqd_create(uq_mgr, &args->in, queue); if (r) { - DRM_ERROR("Failed to create Queue\n"); + drm_file_err(uq_mgr->file, "Failed to create Queue\n"); amdgpu_userq_fence_driver_free(queue); kfree(queue); goto unlock; @@ -350,7 +356,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL); if (qid < 0) { - DRM_ERROR("Failed to allocate a queue id\n"); + drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n"); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); kfree(queue); @@ -360,7 +366,7 @@ amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args) r = uq_funcs->map(uq_mgr, queue); if (r) { - DRM_ERROR("Failed to map Queue\n"); + drm_file_err(uq_mgr->file, "Failed to map Queue\n"); idr_remove(&uq_mgr->userq_idr, qid); amdgpu_userq_fence_driver_free(queue); uq_funcs->mqd_destroy(uq_mgr, queue); @@ -388,7 +394,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, return -EINVAL; r = amdgpu_userqueue_create(filp, args); if (r) - DRM_ERROR("Failed to create usermode queue\n"); + drm_file_err(filp, "Failed to create usermode queue\n"); break; case AMDGPU_USERQ_OP_FREE: @@ -406,7 +412,7 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data, return -EINVAL; r = amdgpu_userqueue_destroy(filp, args->in.queue_id); if (r) - DRM_ERROR("Failed to destroy usermode queue\n"); + drm_file_err(filp, "Failed to destroy usermode queue\n"); break; default: @@ -479,7 +485,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr) ret = amdgpu_vm_lock_pd(vm, &exec, 2); drm_exec_retry_on_contention(&exec); if (unlikely(ret)) { - DRM_ERROR("Failed to lock PD\n"); + drm_file_err(uq_mgr->file, "Failed to lock PD\n"); goto unlock_all; } @@ -519,7 +525,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr) bo = bo_va->base.bo; ret = amdgpu_userqueue_validate_vm_bo(NULL, bo); if (ret) { - DRM_ERROR("Failed to validate BO\n"); + drm_file_err(uq_mgr->file, "Failed to validate BO\n"); goto unlock_all; } @@ -550,7 +556,7 @@ amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr) ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); if (ret) - DRM_ERROR("Failed to replace eviction fence\n"); + drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n"); unlock_all: drm_exec_fini(&exec); @@ -569,13 +575,13 @@ static void amdgpu_userqueue_resume_worker(struct work_struct *work) ret = amdgpu_userqueue_validate_bos(uq_mgr); if (ret) { - DRM_ERROR("Failed to validate BOs to restore\n"); + drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); goto unlock; } ret = amdgpu_userqueue_resume_all(uq_mgr); if (ret) { - DRM_ERROR("Failed to resume all queues\n"); + drm_file_err(uq_mgr->file, "Failed to resume all queues\n"); goto unlock; } -- 2.34.1