[Public] Series is Reviewed-by: Prike Liang <prike.li...@amd.com>
Regards, Prike > -----Original Message----- > From: amd-gfx <amd-gfx-boun...@lists.freedesktop.org> On Behalf Of Alex > Deucher > Sent: Thursday, April 17, 2025 2:47 AM > To: amd-gfx@lists.freedesktop.org > Cc: Deucher, Alexander <alexander.deuc...@amd.com> > Subject: [PATCH 2/2] drm/amdgpu/userq: move waiting for last fence before umap > > Need to wait for the last fence before unmapping. This also fixes a memory > leak > in amdgpu_userqueue_cleanup() when the fence isn't signalled. > > Fixes: 5b1163621548 ("drm/amdgpu/userq: rework front end call sequence") > Signed-off-by: Alex Deucher <alexander.deuc...@amd.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 23 ++++++++++++------- > 1 file changed, 15 insertions(+), 8 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c > b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c > index b449c685302e1..06e41023a04ba 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c > @@ -109,22 +109,27 @@ amdgpu_userqueue_map_helper(struct > amdgpu_userq_mgr *uq_mgr, } > > static void > -amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, > - struct amdgpu_usermode_queue *queue, > - int queue_id) > +amdgpu_userqueue_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr, > + struct amdgpu_usermode_queue *queue) > { > struct amdgpu_device *adev = uq_mgr->adev; > - const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue- > >queue_type]; > struct dma_fence *f = queue->last_fence; > int ret; > > if (f && !dma_fence_is_signaled(f)) { > ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100)); > - if (ret <= 0) { > - DRM_ERROR("Timed out waiting for fence f=%p\n", f); > - return; > - } > + if (ret <= 0) > + dev_err(adev->dev, "Timed out waiting for fence > f=%p\n", f); > } > +} > + > +static void > +amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr, > + struct amdgpu_usermode_queue *queue, > + int queue_id) > +{ > + struct amdgpu_device *adev = uq_mgr->adev; > + const struct amdgpu_userq_funcs *uq_funcs = > +adev->userq_funcs[queue->queue_type]; > > uq_funcs->mqd_destroy(uq_mgr, queue); > queue->fence_drv->fence_drv_xa_ptr = NULL; @@ -330,6 +335,7 @@ > amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id) > mutex_unlock(&uq_mgr->userq_mutex); > return -EINVAL; > } > + amdgpu_userqueue_wait_for_last_fence(uq_mgr, queue); > r = amdgpu_userqueue_unmap_helper(uq_mgr, queue); > amdgpu_bo_unpin(queue->db_obj.obj); > amdgpu_bo_unref(&queue->db_obj.obj); > @@ -818,6 +824,7 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr > *userq_mgr) > > mutex_lock(&userq_mgr->userq_mutex); > idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) { > + amdgpu_userqueue_wait_for_last_fence(userq_mgr, queue); > amdgpu_userqueue_unmap_helper(userq_mgr, queue); > amdgpu_userqueue_cleanup(userq_mgr, queue, queue_id); > } > -- > 2.49.0