Hangcheck, and some of the recent reset code for guilty batches need to
know which address space the object was in at the time of a hangcheck.
This is because we use offsets in the (PP|G)GTT to determine this
information, and those offsets can differ depending on which VM they are
bound into.

Since we still only have 1 VM ever, this code shouldn't yet any any
impact.

Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_gem.c | 30 +++++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dbf72d5..b4c35f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2110,10 +2110,11 @@ i915_gem_request_remove_from_client(struct 
drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
+                                   struct i915_address_space *vm)
 {
-       if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
-           acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+       if (acthd >= i915_gem_obj_offset(obj, vm) &&
+           acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
                return true;
 
        return false;
@@ -2136,6 +2137,17 @@ static bool i915_head_inside_request(const u32 
acthd_unmasked,
        return false;
 }
 
+static struct i915_address_space *
+request_to_vm(struct drm_i915_gem_request *request)
+{
+       struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
+       struct i915_address_space *vm;
+
+       vm = &dev_priv->gtt.base;
+
+       return vm;
+}
+
 static bool i915_request_guilty(struct drm_i915_gem_request *request,
                                const u32 acthd, bool *inside)
 {
@@ -2143,9 +2155,9 @@ static bool i915_request_guilty(struct 
drm_i915_gem_request *request,
         * pointing inside the ring, matches the batch_obj address range.
         * However this is extremely unlikely.
         */
-
        if (request->batch_obj) {
-               if (i915_head_inside_object(acthd, request->batch_obj)) {
+               if (i915_head_inside_object(acthd, request->batch_obj,
+                                           request_to_vm(request))) {
                        *inside = true;
                        return true;
                }
@@ -2165,17 +2177,21 @@ static void i915_set_reset_status(struct 
intel_ring_buffer *ring,
 {
        struct i915_ctx_hang_stats *hs = NULL;
        bool inside, guilty;
+       unsigned long offset = 0;
 
        /* Innocent until proven guilty */
        guilty = false;
 
+       if (request->batch_obj)
+               offset = i915_gem_obj_offset(request->batch_obj,
+                                            request_to_vm(request));
+
        if (ring->hangcheck.action != wait &&
            i915_request_guilty(request, acthd, &inside)) {
                DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
                          ring->name,
                          inside ? "inside" : "flushing",
-                         request->batch_obj ?
-                         i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
+                         offset,
                          request->ctx ? request->ctx->id : 0,
                          acthd);
 
-- 
1.8.3.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to