Due to fun and games in our preempt-to-busy, it is possible for a
request to be completed in the background. Be vigilant and avoid setting
an error on already signaled request, as dma_fence_set_error() throws a
warning.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 21 +++++++++++----------
 1 file changed, 11 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c 
b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 0e28263c7b87..3a23a7bda50e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring);
 
+static void mark_eio(struct i915_request *rq)
+{
+       if (!i915_request_signaled(rq))
+               dma_fence_set_error(&rq->fence, -EIO);
+       i915_request_mark_complete(rq);
+}
+
 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
 {
        return (i915_ggtt_offset(engine->status_page.vma) +
@@ -2495,12 +2502,8 @@ static void execlists_cancel_requests(struct 
intel_engine_cs *engine)
        __execlists_reset(engine, true);
 
        /* Mark all executing requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link) {
-               if (!i915_request_signaled(rq))
-                       dma_fence_set_error(&rq->fence, -EIO);
-
-               i915_request_mark_complete(rq);
-       }
+       list_for_each_entry(rq, &engine->active.requests, sched.link)
+               mark_eio(rq);
 
        /* Flush the queued requests to the timeline list (for retiring). */
        while ((rb = rb_first_cached(&execlists->queue))) {
@@ -2510,8 +2513,7 @@ static void execlists_cancel_requests(struct 
intel_engine_cs *engine)
                priolist_for_each_request_consume(rq, rn, p, i) {
                        list_del_init(&rq->sched.link);
                        __i915_request_submit(rq);
-                       dma_fence_set_error(&rq->fence, -EIO);
-                       i915_request_mark_complete(rq);
+                       mark_eio(rq);
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
@@ -2530,8 +2532,7 @@ static void execlists_cancel_requests(struct 
intel_engine_cs *engine)
                if (ve->request) {
                        ve->request->engine = engine;
                        __i915_request_submit(ve->request);
-                       dma_fence_set_error(&ve->request->fence, -EIO);
-                       i915_request_mark_complete(ve->request);
+                       mark_eio(ve->request);
                        ve->base.execlists.queue_priority_hint = INT_MIN;
                        ve->request = NULL;
                }
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to