In the next patch, we will want to start skipping requests on failing to
complete their payloads. So export the utility function current used to
make requests inoperable following a failed gpu reset.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c     | 25 +++----------------------
 drivers/gpu/drm/i915/i915_request.c | 21 +++++++++++++++++++++
 drivers/gpu/drm/i915/i915_request.h |  2 ++
 3 files changed, 26 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c0a1a959d0b..f516c289647c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3081,25 +3081,6 @@ int i915_gem_reset_prepare(struct drm_i915_private 
*dev_priv)
        return err;
 }
 
-static void skip_request(struct i915_request *request)
-{
-       void *vaddr = request->ring->vaddr;
-       u32 head;
-
-       /* As this request likely depends on state from the lost
-        * context, clear out all the user operations leaving the
-        * breadcrumb at the end (so we get the fence notifications).
-        */
-       head = request->head;
-       if (request->postfix < head) {
-               memset(vaddr + head, 0, request->ring->size - head);
-               head = 0;
-       }
-       memset(vaddr + head, 0, request->postfix - head);
-
-       dma_fence_set_error(&request->fence, -EIO);
-}
-
 static void engine_skip_context(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
@@ -3114,10 +3095,10 @@ static void engine_skip_context(struct i915_request 
*request)
 
        list_for_each_entry_continue(request, &engine->timeline.requests, link)
                if (request->gem_context == hung_ctx)
-                       skip_request(request);
+                       i915_request_skip(request, -EIO);
 
        list_for_each_entry(request, &timeline->requests, link)
-               skip_request(request);
+               i915_request_skip(request, -EIO);
 
        spin_unlock(&timeline->lock);
        spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -3160,7 +3141,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 
        if (stalled) {
                i915_gem_context_mark_guilty(request->gem_context);
-               skip_request(request);
+               i915_request_skip(request, -EIO);
 
                /* If this context is now banned, skip all pending requests. */
                if (i915_gem_context_is_banned(request->gem_context))
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index a2f7e9358450..7ae08b68121e 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1013,6 +1013,27 @@ i915_request_await_object(struct i915_request *to,
        return ret;
 }
 
+void i915_request_skip(struct i915_request *rq, int error)
+{
+       void *vaddr = rq->ring->vaddr;
+       u32 head;
+
+       GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+       dma_fence_set_error(&rq->fence, error);
+
+       /*
+        * As this request likely depends on state from the lost
+        * context, clear out all the user operations leaving the
+        * breadcrumb at the end (so we get the fence notifications).
+        */
+       head = rq->infix;
+       if (rq->postfix < head) {
+               memset(vaddr + head, 0, rq->ring->size - head);
+               head = 0;
+       }
+       memset(vaddr + head, 0, rq->postfix - head);
+}
+
 /*
  * NB: This function is not allowed to fail. Doing so would mean the the
  * request is not being tracked for completion but the work itself is
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index 7ee220ded9c9..a355a081485f 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -258,6 +258,8 @@ void i915_request_add(struct i915_request *rq);
 void __i915_request_submit(struct i915_request *request);
 void i915_request_submit(struct i915_request *request);
 
+void i915_request_skip(struct i915_request *request, int error);
+
 void __i915_request_unsubmit(struct i915_request *request);
 void i915_request_unsubmit(struct i915_request *request);
 
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to