Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_irq.c | 53 ++++++++++++++++++++++++++++-------------
 1 file changed, 37 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 28880bf..e1653fd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1499,6 +1499,7 @@ static void i915_get_extra_instdone(struct drm_device 
*dev,
 static struct drm_i915_error_object *
 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
                               struct drm_i915_gem_object *src,
+                              struct i915_address_space *vm,
                               const int num_pages)
 {
        struct drm_i915_error_object *dst;
@@ -1512,8 +1513,7 @@ i915_error_object_create_sized(struct drm_i915_private 
*dev_priv,
        if (dst == NULL)
                return NULL;
 
-       /* FIXME: must handle per faulty VM */
-       reloc_offset = i915_gem_ggtt_offset(src);
+       reloc_offset = i915_gem_obj_offset(src, vm);
        for (i = 0; i < num_pages; i++) {
                unsigned long flags;
                void *d;
@@ -1565,7 +1565,7 @@ i915_error_object_create_sized(struct drm_i915_private 
*dev_priv,
                reloc_offset += PAGE_SIZE;
        }
        dst->page_count = num_pages;
-       dst->gtt_offset = i915_gem_ggtt_offset(src);
+       dst->gtt_offset = i915_gem_obj_offset(src, vm);
 
        return dst;
 
@@ -1575,8 +1575,9 @@ unwind:
        kfree(dst);
        return NULL;
 }
-#define i915_error_object_create(dev_priv, src) \
+#define i915_error_object_create(dev_priv, src, vm) \
        i915_error_object_create_sized((dev_priv), (src), \
+                                      vm, \
                                       (src)->base.size>>PAGE_SHIFT)
 
 static void
@@ -1617,14 +1618,14 @@ i915_error_state_free(struct kref *error_ref)
        kfree(error);
 }
 static void capture_bo(struct drm_i915_error_buffer *err,
-                      struct drm_i915_gem_object *obj)
+                      struct drm_i915_gem_object *obj,
+                      struct i915_address_space *vm)
 {
        err->size = obj->base.size;
        err->name = obj->base.name;
        err->rseqno = obj->last_read_seqno;
        err->wseqno = obj->last_write_seqno;
-       /* FIXME: plumb the actual context into here to pull the right VM */
-       err->gtt_offset = i915_gem_ggtt_offset(obj);
+       err->gtt_offset = i915_gem_obj_offset(obj, vm);
        err->read_domains = obj->base.read_domains;
        err->write_domain = obj->base.write_domain;
        err->fence_reg = obj->fence_reg;
@@ -1647,7 +1648,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer 
*err,
        int i = 0;
 
        list_for_each_entry(vma, head, mm_list) {
-               capture_bo(err++, vma->obj);
+               capture_bo(err++, vma->obj, vma->vm);
                if (++i == count)
                        break;
        }
@@ -1662,10 +1663,14 @@ static u32 capture_pinned_bo(struct 
drm_i915_error_buffer *err,
        int i = 0;
 
        list_for_each_entry(obj, head, global_list) {
+               struct i915_vma *vma;
                if (obj->pin_count == 0)
                        continue;
 
-               capture_bo(err++, obj);
+               /* Object may be pinned in multiple VMs, just take first */
+               vma = list_first_entry(&obj->vma_list, struct i915_vma,
+                                      vma_link);
+               capture_bo(err++, obj, vma->vm);
                if (++i == count)
                        break;
        }
@@ -1713,6 +1718,7 @@ i915_error_first_batchbuffer(struct drm_i915_private 
*dev_priv,
        struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
        u32 seqno;
+       u32 pp_db;
 
        if (!ring->get_seqno)
                return NULL;
@@ -1729,11 +1735,19 @@ i915_error_first_batchbuffer(struct drm_i915_private 
*dev_priv,
                obj = ring->private;
                if (acthd >= i915_gem_ggtt_offset(obj) &&
                    acthd < i915_gem_ggtt_offset(obj) + obj->base.size)
-                       return i915_error_object_create(dev_priv, obj);
+                       return i915_error_object_create(dev_priv, obj,
+                                                       &dev_priv->gtt.base);
        }
 
+       pp_db = I915_READ(RING_PP_DIR_BASE(ring));
        seqno = ring->get_seqno(ring, false);
+
        list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+               struct i915_hw_ppgtt *ppgtt =
+                       container_of(vm, struct i915_hw_ppgtt, base);
+               if (!is_i915_ggtt(vm) && pp_db >> 10 != ppgtt->pd_offset)
+                       continue;
+
                list_for_each_entry(vma, &vm->active_list, mm_list) {
                        obj = vma->obj;
                        if (obj->ring != ring)
@@ -1749,7 +1763,7 @@ i915_error_first_batchbuffer(struct drm_i915_private 
*dev_priv,
                         * simplest method to avoid being overwritten by
                         * userspace.
                         */
-                       return i915_error_object_create(dev_priv, obj);
+                       return i915_error_object_create(dev_priv, obj, vm);
                }
        }
 
@@ -1806,6 +1820,7 @@ static void i915_gem_record_active_context(struct 
intel_ring_buffer *ring,
                                           struct drm_i915_error_ring *ering)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct i915_address_space *ggtt = &dev_priv->gtt.base;
        struct drm_i915_gem_object *obj;
 
        /* Currently render ring is the only HW context user */
@@ -1813,9 +1828,14 @@ static void i915_gem_record_active_context(struct 
intel_ring_buffer *ring,
                return;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if (!i915_gem_obj_bound(obj, ggtt))
+                       continue;
+
                if ((error->ccid & PAGE_MASK) == i915_gem_ggtt_offset(obj)) {
                        ering->ctx = i915_error_object_create_sized(dev_priv,
-                                                                   obj, 1);
+                                                                   obj,
+                                                                   ggtt,
+                                                                   1);
                }
        }
 }
@@ -1835,8 +1855,8 @@ static void i915_gem_record_rings(struct drm_device *dev,
                        i915_error_first_batchbuffer(dev_priv, ring);
 
                error->ring[i].ringbuffer =
-                       i915_error_object_create(dev_priv, ring->obj);
-
+                       i915_error_object_create(dev_priv, ring->obj,
+                                                &dev_priv->gtt.base);
 
                i915_gem_record_active_context(ring, error, &error->ring[i]);
 
@@ -1912,7 +1932,7 @@ static void i915_gem_capture_buffers(struct 
drm_i915_private *dev_priv,
                                     struct drm_i915_error_state *error)
 {
        struct i915_address_space *vm;
-       int cnt = 0;
+       int cnt = 0, i = 0;
 
        list_for_each_entry(vm, &dev_priv->vm_list, global_link)
                cnt++;
@@ -1929,7 +1949,8 @@ static void i915_gem_capture_buffers(struct 
drm_i915_private *dev_priv,
        error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
                                         GFP_ATOMIC);
 
-       i915_gem_capture_vm(dev_priv, error, vm, 0);
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               i915_gem_capture_vm(dev_priv, error, vm, i++);
 }
 
 /**
-- 
1.8.3.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to