We need to know the wptr and sequence number associated
with a job so that we can re-emit the unprocessed state
after a ring reset.  Pre-allocate storage space for
the ring buffer contents and add a helper to save off
the unprocessed state so that it can be re-emitted
after the queue is reset.

Add a helper that ring reset callbacks can use to verify
that the ring has reset successfully and to reemit any
unprocessed ring contents from subsequent jobs.

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 15 ++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c    | 13 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   | 11 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c  | 92 +++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h  | 15 ++++
 5 files changed, 143 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 569e0e5373927..25a664273bf0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -141,6 +141,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct 
dma_fence **f, struct amd
        }
        fence = &am_fence->base;
        am_fence->ring = ring;
+       am_fence->start_ring_wptr = 0;
+       am_fence->end_ring_wptr = 0;
+       am_fence->context = 0;
 
        seq = ++ring->fence_drv.sync_seq;
        if (job && job->job_run_counter) {
@@ -748,6 +751,18 @@ void amdgpu_fence_driver_force_completion(struct 
amdgpu_ring *ring)
        amdgpu_fence_process(ring);
 }
 
+/**
+ * amdgpu_fence_driver_seq_force_completion - force signal of specified 
sequence
+ *
+ * @ring: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_seq_force_completion(struct amdgpu_ring *ring, u32 
seq)
+{
+       amdgpu_fence_write(ring, seq);
+       amdgpu_fence_process(ring);
+}
+
 /*
  * Common fence implementation
  */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 802743efa3b39..636941697a740 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -126,7 +126,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
int num_ibs,
                       struct dma_fence **f)
 {
        struct amdgpu_device *adev = ring->adev;
+       u64 start_ring_wptr, end_ring_wptr;
        struct amdgpu_ib *ib = &ibs[0];
+       struct amdgpu_fence *am_fence;
        struct dma_fence *tmp = NULL;
        bool need_ctx_switch;
        struct amdgpu_vm *vm;
@@ -138,7 +140,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
int num_ibs,
        int vmid = AMDGPU_JOB_GET_VMID(job);
        bool need_pipe_sync = false;
        unsigned int cond_exec;
-
        unsigned int i;
        int r = 0;
 
@@ -187,6 +188,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
int num_ibs,
                dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
                return r;
        }
+       start_ring_wptr = ring->wptr;
 
        need_ctx_switch = ring->current_ctx != fence_ctx;
        if (ring->funcs->emit_pipeline_sync && job &&
@@ -306,6 +308,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned 
int num_ibs,
 
        amdgpu_ring_ib_end(ring);
        amdgpu_ring_commit(ring);
+       /* This must be last for resets to work properly
+        * as we need to save the wptr associated with this
+        * fence.
+        */
+       end_ring_wptr = ring->wptr;
+       am_fence = container_of(*f, struct amdgpu_fence, base);
+       am_fence->start_ring_wptr = start_ring_wptr;
+       am_fence->end_ring_wptr = end_ring_wptr;
+
        return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 23b6a0fc0c691..73c26e2e01647 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,8 +89,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct 
drm_sched_job *s_job)
 {
        struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
        struct amdgpu_job *job = to_amdgpu_job(s_job);
-       struct amdgpu_task_info *ti;
+       struct amdgpu_fence *am_fence = &job->hw_fence;
        struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_task_info *ti;
        bool set_error = false;
        int idx, r;
 
@@ -154,7 +155,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct 
drm_sched_job *s_job)
                else
                        is_guilty = true;
 
-               if (is_guilty)
+               amdgpu_ring_backup_unprocessed_jobs(ring, is_guilty, am_fence);
+               if (is_guilty) {
                        dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
                        set_error = true;
                }
@@ -409,6 +411,7 @@ static struct dma_fence *amdgpu_job_run(struct 
drm_sched_job *sched_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct amdgpu_device *adev = ring->adev;
        struct dma_fence *fence = NULL, *finished;
+       struct amdgpu_fence *am_fence;
        struct amdgpu_job *job;
        int r = 0;
 
@@ -433,6 +436,10 @@ static struct dma_fence *amdgpu_job_run(struct 
drm_sched_job *sched_job)
                                "Error scheduling IBs (%d) in ring(%s)", r,
                                ring->name);
        }
+       if (fence && finished) {
+               am_fence = container_of(fence, struct amdgpu_fence, base);
+               am_fence->context = finished->context;
+       }
 
        job->job_run_counter++;
        amdgpu_job_free_resources(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 426834806fbf2..3a0e0883bd8e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -333,6 +333,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
amdgpu_ring *ring,
        /*  Initialize cached_rptr to 0 */
        ring->cached_rptr = 0;
 
+       if (!ring->ring_backup) {
+               ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+               if (!ring->ring_backup)
+                       return -ENOMEM;
+       }
+
        /* Allocate ring buffer */
        if (ring->ring_obj == NULL) {
                r = amdgpu_bo_create_kernel(adev, ring->ring_size + 
ring->funcs->extra_dw, PAGE_SIZE,
@@ -342,6 +348,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct 
amdgpu_ring *ring,
                                            (void **)&ring->ring);
                if (r) {
                        dev_err(adev->dev, "(%d) ring create failed\n", r);
+                       kvfree(ring->ring_backup);
                        return r;
                }
                amdgpu_ring_clear_ring(ring);
@@ -385,6 +392,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
        amdgpu_bo_free_kernel(&ring->ring_obj,
                              &ring->gpu_addr,
                              (void **)&ring->ring);
+       kvfree(ring->ring_backup);
+       ring->ring_backup = NULL;
 
        dma_fence_put(ring->vmid_wait);
        ring->vmid_wait = NULL;
@@ -753,3 +762,86 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
 
        return true;
 }
+
+static void amdgpu_ring_backup_unprocessed_job(struct amdgpu_ring *ring,
+                                              unsigned int idx,
+                                              u64 start_wptr, u32 end_wptr)
+{
+       unsigned int first_idx = start_wptr & ring->buf_mask;
+       unsigned int last_idx = end_wptr & ring->buf_mask;
+       unsigned int i, j, entries_to_copy;
+
+       if (last_idx < first_idx) {
+               entries_to_copy = ring->buf_mask + 1 - first_idx;
+               for (i = 0; i < entries_to_copy; i++)
+                       ring->ring_backup[idx + i] = ring->ring[first_idx + i];
+               ring->ring_backup_entries_to_copy += entries_to_copy;
+               entries_to_copy = last_idx;
+               for (j = 0; j < entries_to_copy; j++)
+                       ring->ring_backup[idx + i + j] = ring->ring[j];
+               ring->ring_backup_entries_to_copy += entries_to_copy;
+       } else {
+               entries_to_copy = last_idx - first_idx;
+               for (i = 0; i < entries_to_copy; i++)
+                       ring->ring_backup[idx + i] = ring->ring[first_idx + i];
+               ring->ring_backup_entries_to_copy += entries_to_copy;
+       }
+}
+
+void amdgpu_ring_backup_unprocessed_jobs(struct amdgpu_ring *ring,
+                                        bool is_guilty,
+                                        struct amdgpu_fence *bad_fence)
+{
+       struct amdgpu_fence *fence;
+       struct dma_fence *old, **ptr;
+       int i;
+
+       ring->ring_backup_entries_to_copy = 0;
+       for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
+               ptr = &ring->fence_drv.fences[i];
+               rcu_read_lock();
+               old = rcu_dereference(*ptr);
+
+               if (old && !dma_fence_is_signaled(old)) {
+                       fence = container_of(old, struct amdgpu_fence, base);
+                       /* save everything if the ring is not guilty, otherwise
+                        * just save the content from other contexts.
+                        */
+                       if (!is_guilty || (fence->context != 
bad_fence->context))
+                               amdgpu_ring_backup_unprocessed_job(ring,
+                                                                  
ring->ring_backup_entries_to_copy,
+                                                                  
fence->start_ring_wptr,
+                                                                  
fence->end_ring_wptr);
+               }
+               rcu_read_unlock();
+       }
+
+       ring->ring_bad_seq = bad_fence->base.seqno;
+}
+
+int amdgpu_ring_reemit_unprocessed_jobs(struct amdgpu_ring *ring)
+{
+       unsigned int i;
+       int r;
+
+       /* verify that the ring is functional */
+       r = amdgpu_ring_test_ring(ring);
+       if (r)
+               return r;
+       /* re-emit the unprocessed ring contents */
+       if (ring->ring_backup_entries_to_copy) {
+               r = amdgpu_ring_alloc(ring, ring->ring_backup_entries_to_copy);
+               if (r)
+                       return r;
+               /* signal the fence of the bad job */
+               amdgpu_fence_driver_seq_force_completion(ring, 
ring->ring_bad_seq);
+               for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+                       amdgpu_ring_write(ring, ring->ring_backup[i]);
+               amdgpu_ring_commit(ring);
+       } else {
+               /* signal the fence of the bad job */
+               amdgpu_fence_driver_seq_force_completion(ring, 
ring->ring_bad_seq);
+       }
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index e1f25218943a4..69b71401adb7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -141,6 +141,12 @@ struct amdgpu_fence {
        /* RB, DMA, etc. */
        struct amdgpu_ring              *ring;
        ktime_t                         start_timestamp;
+
+       /* wptrs for the fence for resets */
+       u64                             start_ring_wptr;
+       u64                             end_ring_wptr;
+       /* fence context for resets */
+       u64                             context;
 };
 
 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
@@ -148,6 +154,8 @@ extern const struct drm_sched_backend_ops amdgpu_sched_ops;
 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_seq_force_completion(struct amdgpu_ring *ring,
+                                             u32 seq);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -284,6 +292,9 @@ struct amdgpu_ring {
 
        struct amdgpu_bo        *ring_obj;
        uint32_t                *ring;
+       uint32_t                *ring_backup;
+       unsigned int            ring_backup_entries_to_copy;
+       uint64_t                ring_bad_seq;
        unsigned                rptr_offs;
        u64                     rptr_gpu_addr;
        volatile u32            *rptr_cpu_addr;
@@ -550,4 +561,8 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev);
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_jobs(struct amdgpu_ring *ring,
+                                        bool is_guilty,
+                                        struct amdgpu_fence *bad_fence);
+int amdgpu_ring_reemit_unprocessed_jobs(struct amdgpu_ring *ring);
 #endif
-- 
2.49.0

Reply via email to