The mm_list is used for the active/inactive LRUs. Since those LRUs are
per address space, the link should be per VM area

Because we'll only ever have 1 VMA before this point, it's not incorrect
to defer this change until this point in the patch series, and doing it
here makes the change much easier to understand.

v2: only bump GGTT LRU in i915_gem_object_set_to_gtt_domain (Chris)

Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_debugfs.c    | 49 ++++++++++++++++++++++------------
 drivers/gpu/drm/i915/i915_drv.h        |  5 ++--
 drivers/gpu/drm/i915/i915_gem.c        | 31 ++++++++++++++-------
 drivers/gpu/drm/i915/i915_gem_evict.c  | 14 +++++-----
 drivers/gpu/drm/i915/i915_gem_stolen.c |  2 +-
 drivers/gpu/drm/i915/i915_irq.c        | 13 +++++----
 6 files changed, 71 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 7d01fb6..60d2a94 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -152,7 +152,7 @@ static int i915_gem_object_list_info(struct seq_file *m, 
void *data)
        struct list_head *head;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        size_t total_obj_size, total_gtt_size;
        int count, ret;
 
@@ -160,6 +160,7 @@ static int i915_gem_object_list_info(struct seq_file *m, 
void *data)
        if (ret)
                return ret;
 
+       /* FIXME: the user of this interface might want more than just GGTT */
        switch (list) {
        case ACTIVE_LIST:
                seq_printf(m, "Active:\n");
@@ -175,13 +176,12 @@ static int i915_gem_object_list_info(struct seq_file *m, 
void *data)
        }
 
        total_obj_size = total_gtt_size = count = 0;
-       list_for_each_entry(obj, head, mm_list) {
+       list_for_each_entry(vma, head, mm_list) {
                seq_printf(m, "   ");
-               describe_obj(m, obj);
+               describe_obj(m, vma->obj);
                seq_printf(m, "\n");
-               total_obj_size += obj->base.size;
-               /* FIXME: Add size of all VMs */
-               total_gtt_size += i915_gem_ggtt_size(obj);
+               total_obj_size += vma->obj->base.size;
+               total_gtt_size += i915_gem_ggtt_size(vma->obj);
                count++;
        }
        mutex_unlock(&dev->struct_mutex);
@@ -229,6 +229,17 @@ static int per_file_stats(int id, void *ptr, void *data)
        return 0;
 }
 
+#define count_vmas(list, member) do { \
+       list_for_each_entry(vma, list, member) { \
+               size += i915_gem_ggtt_size(vma->obj); \
+               ++count; \
+               if (vma->obj->map_and_fenceable) { \
+                       mappable_size += i915_gem_ggtt_size(vma->obj); \
+                       ++mappable_count; \
+               } \
+       } \
+} while (0)
+
 static int i915_gem_object_info(struct seq_file *m, void* data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -238,6 +249,7 @@ static int i915_gem_object_info(struct seq_file *m, void* 
data)
        size_t size, mappable_size, purgeable_size;
        struct drm_i915_gem_object *obj;
        struct drm_file *file;
+       struct i915_vma *vma;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -254,12 +266,12 @@ static int i915_gem_object_info(struct seq_file *m, void* 
data)
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(ggtt_list(active_list), mm_list);
+       count_vmas(ggtt_list(active_list), mm_list);
        seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(ggtt_list(inactive_list), mm_list);
+       count_vmas(ggtt_list(inactive_list), mm_list);
        seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
@@ -1966,6 +1978,8 @@ i915_drop_caches_set(void *data, u64 val)
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
+       struct i915_address_space *vm;
+       struct i915_vma *vma, *x;
        int ret;
 
        DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -1986,15 +2000,16 @@ i915_drop_caches_set(void *data, u64 val)
                i915_gem_retire_requests(dev);
 
        if (val & DROP_BOUND) {
-               /* FIXME: Do this for all vms? */
-               list_for_each_entry_safe(obj, next, ggtt_list(inactive_list),
-                                        mm_list)
-                       if (obj->pin_count)
-                               continue;
-
-                       ret = i915_gem_object_unbind(obj, &dev_priv->gtt.base);
-                       if (ret)
-                               goto unlock;
+               list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+                       list_for_each_entry_safe(vma, x, &vm->inactive_list,
+                                                mm_list)
+                               if (vma->obj->pin_count == 0) {
+                                       ret = i915_gem_object_unbind(vma->obj,
+                                                                    vm);
+                                       if (ret)
+                                               goto unlock;
+                               }
+               }
        }
 
        if (val & DROP_UNBOUND) {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b0d1008..cc18349 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -545,6 +545,9 @@ struct i915_vma {
        /* Page aligned offset (helper for stolen) */
        unsigned long deferred_offset;
 
+       /** This object's place on the active/inactive lists */
+       struct list_head mm_list;
+
        struct list_head vma_link; /* Link in the object's VMA list */
 };
 
@@ -1237,9 +1240,7 @@ struct drm_i915_gem_object {
        struct drm_mm_node *stolen;
        struct list_head global_list;
 
-       /** This object's place on the active/inactive lists */
        struct list_head ring_list;
-       struct list_head mm_list;
        /** This object's place in the batchbuffer or on the eviction list */
        struct list_head exec_list;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 83e2eb3..950a14b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1878,6 +1878,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object 
*obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 seqno = intel_ring_get_seqno(ring);
+       struct i915_vma *vma;
 
        BUG_ON(ring == NULL);
        obj->ring = ring;
@@ -1889,7 +1890,8 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object 
*obj,
        }
 
        /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj->mm_list, &vm->active_list);
+       vma = i915_gem_obj_to_vma(obj, vm);
+       list_move_tail(&vma->mm_list, &vm->active_list);
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
@@ -1912,10 +1914,13 @@ static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
                                 struct i915_address_space *vm)
 {
+       struct i915_vma *vma;
+
        BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
        BUG_ON(!obj->active);
 
-       list_move_tail(&obj->mm_list, &vm->inactive_list);
+       vma = i915_gem_obj_to_vma(obj, vm);
+       list_move_tail(&vma->mm_list, &vm->inactive_list);
 
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
@@ -2290,9 +2295,9 @@ void i915_gem_restore_fences(struct drm_device *dev)
 bool i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj;
        struct intel_ring_buffer *ring;
        struct i915_address_space *vm;
+       struct i915_vma *vma;
        int i;
        bool ctx_banned = false;
 
@@ -2305,8 +2310,8 @@ bool i915_gem_reset(struct drm_device *dev)
         * necessary invalidation upon reuse.
         */
        list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-               list_for_each_entry(obj, &vm->inactive_list, mm_list)
-                       obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+               list_for_each_entry(vma, &vm->inactive_list, mm_list)
+                       vma->obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 
        /* The fence registers are invalidated so clear them out */
        i915_gem_restore_fences(dev);
@@ -2653,12 +2658,12 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
        i915_gem_gtt_finish_object(obj);
        i915_gem_object_unpin_pages(obj);
 
-       list_del(&obj->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        if (is_i915_ggtt(vm))
                obj->map_and_fenceable = true;
 
        vma = i915_gem_obj_to_vma(obj, vm);
+       list_del(&vma->mm_list);
        list_del(&vma->vma_link);
        drm_mm_remove_node(&vma->node);
        i915_gem_vma_destroy(vma);
@@ -3208,7 +3213,7 @@ search_free:
        }
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &vm->inactive_list);
+       list_add_tail(&vma->mm_list, &vm->inactive_list);
        /* Keep GGTT vmas first to make debug easier */
        if (is_i915_ggtt(vm))
                list_add(&vma->vma_link, &obj->vma_list);
@@ -3364,8 +3369,14 @@ i915_gem_object_set_to_gtt_domain(struct 
drm_i915_gem_object *obj, bool write)
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       if (i915_gem_object_is_inactive(obj))
-               list_move_tail(&obj->mm_list, ggtt_list(inactive_list));
+       if (i915_gem_object_is_inactive(obj)) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
+               if (vma)
+                       list_move_tail(&vma->mm_list,
+                                      &dev_priv->gtt.base.inactive_list);
+
+       }
 
        return 0;
 }
@@ -3943,7 +3954,6 @@ unlock:
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
-       INIT_LIST_HEAD(&obj->mm_list);
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
@@ -4086,6 +4096,7 @@ struct i915_vma *i915_gem_vma_create(struct 
drm_i915_gem_object *obj,
                return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&vma->vma_link);
+       INIT_LIST_HEAD(&vma->mm_list);
        vma->vm = vm;
        vma->obj = obj;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7a210b8..028e8b1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -86,8 +86,7 @@ i915_gem_evict_something(struct drm_device *dev, struct 
i915_address_space *vm,
                drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(obj, &vm->inactive_list, mm_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+       list_for_each_entry(vma, &vm->inactive_list, mm_list) {
                if (mark_free(vma, &unwind_list))
                        goto found;
        }
@@ -96,8 +95,7 @@ i915_gem_evict_something(struct drm_device *dev, struct 
i915_address_space *vm,
                goto none;
 
        /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(obj, &vm->active_list, mm_list) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+       list_for_each_entry(vma, &vm->active_list, mm_list) {
                if (mark_free(vma, &unwind_list))
                        goto found;
        }
@@ -158,8 +156,8 @@ int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj, *next;
        struct i915_address_space *vm;
+       struct i915_vma *vma, *next;
        bool lists_empty = true;
        int ret;
 
@@ -187,9 +185,9 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        /* Having flushed everything, unbind() should never raise an error */
        list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
-                       if (obj->pin_count == 0)
-                               WARN_ON(i915_gem_object_unbind(obj, vm));
+               list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+                       if (vma->obj->pin_count == 0)
+                               WARN_ON(i915_gem_object_unbind(vma->obj, vm));
        }
 
 
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 4863219..d393298 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -397,7 +397,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
drm_device *dev,
        obj->has_global_gtt_mapping = 1;
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&obj->mm_list, &gtt_vm->inactive_list);
+       list_add_tail(&vma->mm_list, &gtt_vm->inactive_list);
 
        return obj;
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b786fcd..28880bf 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1643,11 +1643,11 @@ static void capture_bo(struct drm_i915_error_buffer 
*err,
 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
                             int count, struct list_head *head)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        int i = 0;
 
-       list_for_each_entry(obj, head, mm_list) {
-               capture_bo(err++, obj);
+       list_for_each_entry(vma, head, mm_list) {
+               capture_bo(err++, vma->obj);
                if (++i == count)
                        break;
        }
@@ -1710,6 +1710,7 @@ i915_error_first_batchbuffer(struct drm_i915_private 
*dev_priv,
                             struct intel_ring_buffer *ring)
 {
        struct i915_address_space *vm;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
        u32 seqno;
 
@@ -1733,7 +1734,8 @@ i915_error_first_batchbuffer(struct drm_i915_private 
*dev_priv,
 
        seqno = ring->get_seqno(ring, false);
        list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               list_for_each_entry(obj, &vm->active_list, mm_list) {
+               list_for_each_entry(vma, &vm->active_list, mm_list) {
+                       obj = vma->obj;
                        if (obj->ring != ring)
                                continue;
 
@@ -1872,11 +1874,12 @@ static void i915_gem_capture_vm(struct drm_i915_private 
*dev_priv,
                                const int ndx)
 {
        struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+       struct i915_vma *vma;
        struct drm_i915_gem_object *obj;
        int i;
 
        i = 0;
-       list_for_each_entry(obj, &vm->active_list, mm_list)
+       list_for_each_entry(vma, &vm->active_list, mm_list)
                i++;
        error->active_bo_count[ndx] = i;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-- 
1.8.3.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to