Re-emit the unprocessed state after resetting the queue.

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f699c8b0f7488..f56354a1a8a96 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -7167,6 +7167,8 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
+       amdgpu_ring_backup_unprocessed_commands(ring, &job->hw_fence.base, 
true);
+
        spin_lock_irqsave(&kiq->ring_lock, flags);
 
        if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
@@ -7216,19 +7218,25 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
        }
        kiq->pmf->kiq_map_queues(kiq_ring, ring);
        amdgpu_ring_commit(kiq_ring);
-       spin_unlock_irqrestore(&kiq->ring_lock, flags);
        r = amdgpu_ring_test_ring(kiq_ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
        if (r) {
                DRM_ERROR("fail to remap queue\n");
                return r;
        }
-
        r = amdgpu_ring_test_ring(ring);
        if (r)
                return r;
+
        dma_fence_set_error(&job->base.s_fence->finished, -ETIME);
-       amdgpu_fence_driver_force_completion(ring);
+       /* signal the fence of the bad job */
+       amdgpu_fence_driver_guilty_force_completion(&job->hw_fence.base);
        atomic_inc(&ring->adev->gpu_reset_counter);
+       r = amdgpu_ring_reemit_unprocessed_commands(ring);
+       if (r)
+               /* if we fail to reemit, force complete all fences */
+               amdgpu_fence_driver_force_completion(ring);
+
        return 0;
 }
 
-- 
2.49.0

Reply via email to