On Thu, Jun 27, 2013 at 04:30:27PM -0700, Ben Widawsky wrote:
> for file in `ls drivers/gpu/drm/i915/*.c` ; do sed -i 
> "s/dev_priv->mm.inactive_list/i915_gtt_mm-\>inactive_list/" $file; done
> for file in `ls drivers/gpu/drm/i915/*.c` ; do sed -i 
> "s/dev_priv->mm.active_list/i915_gtt_mm-\>active_list/" $file; done
> 
> I've also opted to move the comments out of line a bit so one can get a
> better picture of what the various lists do.

Bikeshed: That makes you now inconsistent with all the other in-detail
structure memeber comments we have. And I don't see how it looks better,
so I'd vote to keep things as-is with per-member comments.

> v2: Leave the bound list as a global one. (Chris, indirectly)
> 
> CC: Chris Wilson <ch...@chris-wilson.co.uk>
> Signed-off-by: Ben Widawsky <b...@bwidawsk.net>

The real comment though is on the commit message, it fails to explain why
we want to move the active/inactive lists from mm/obj to the address
space/vma pair. I think I understand, but this should be explained more
in-depth.

I think in the first commit which starts moving those lists and execution
tracking state you should also mention why some of the state
(bound/unbound lists e.g.) are not moved.

Cheers, Daniel

> ---
>  drivers/gpu/drm/i915/i915_debugfs.c    | 11 ++++----
>  drivers/gpu/drm/i915/i915_drv.h        | 49 
> ++++++++++++++--------------------
>  drivers/gpu/drm/i915/i915_gem.c        | 24 +++++++----------
>  drivers/gpu/drm/i915/i915_gem_debug.c  |  2 +-
>  drivers/gpu/drm/i915/i915_gem_evict.c  | 10 +++----
>  drivers/gpu/drm/i915/i915_gem_stolen.c |  2 +-
>  drivers/gpu/drm/i915/i915_irq.c        |  6 ++---
>  7 files changed, 46 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
> b/drivers/gpu/drm/i915/i915_debugfs.c
> index f3c76ab..a0babc7 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -158,11 +158,11 @@ static int i915_gem_object_list_info(struct seq_file 
> *m, void *data)
>       switch (list) {
>       case ACTIVE_LIST:
>               seq_printf(m, "Active:\n");
> -             head = &dev_priv->mm.active_list;
> +             head = &i915_gtt_vm->active_list;
>               break;
>       case INACTIVE_LIST:
>               seq_printf(m, "Inactive:\n");
> -             head = &dev_priv->mm.inactive_list;
> +             head = &i915_gtt_vm->inactive_list;
>               break;
>       default:
>               mutex_unlock(&dev->struct_mutex);
> @@ -247,12 +247,12 @@ static int i915_gem_object_info(struct seq_file *m, 
> void* data)
>                  count, mappable_count, size, mappable_size);
>  
>       size = count = mappable_size = mappable_count = 0;
> -     count_objects(&dev_priv->mm.active_list, mm_list);
> +     count_objects(&i915_gtt_vm->active_list, mm_list);
>       seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
>                  count, mappable_count, size, mappable_size);
>  
>       size = count = mappable_size = mappable_count = 0;
> -     count_objects(&dev_priv->mm.inactive_list, mm_list);
> +     count_objects(&i915_gtt_vm->inactive_list, mm_list);
>       seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
>                  count, mappable_count, size, mappable_size);
>  
> @@ -1977,7 +1977,8 @@ i915_drop_caches_set(void *data, u64 val)
>               i915_gem_retire_requests(dev);
>  
>       if (val & DROP_BOUND) {
> -             list_for_each_entry_safe(obj, next, 
> &dev_priv->mm.inactive_list, mm_list)
> +             list_for_each_entry_safe(obj, next, &i915_gtt_vm->inactive_list,
> +                                      mm_list)
>                       if (obj->pin_count == 0) {
>                               ret = i915_gem_object_unbind(obj);
>                               if (ret)
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index e65cf57..0553410 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -448,6 +448,22 @@ struct i915_address_space {
>       unsigned long start;            /* Start offset always 0 for dri2 */
>       size_t total;           /* size addr space maps (ex. 2GB for ggtt) */
>  
> +/* We use many types of lists for object tracking:
> + *  active_list: List of objects currently involved in rendering.
> + *   Includes buffers having the contents of their GPU caches flushed, not
> + *   necessarily primitives. last_rendering_seqno represents when the
> + *   rendering involved will be completed. A reference is held on the buffer
> + *   while on this list.
> + *  inactive_list: LRU list of objects which are not in the ringbuffer
> + *   objects are ready to unbind but are still mapped.
> + *   last_rendering_seqno is 0 while an object is in this list.
> + *   A reference is not held on the buffer while on this list,
> + *   as merely being GTT-bound shouldn't prevent its being
> + *   freed, and we'll pull it off the list in the free path.
> + */
> +     struct list_head active_list;
> +     struct list_head inactive_list;
> +
>       struct {
>               dma_addr_t addr;
>               struct page *page;
> @@ -835,42 +851,17 @@ struct intel_l3_parity {
>  };
>  
>  struct i915_gem_mm {
> -     /** List of all objects in gtt_space. Used to restore gtt
> -      * mappings on resume */
> -     struct list_head bound_list;
>       /**
> -      * List of objects which are not bound to the GTT (thus
> -      * are idle and not used by the GPU) but still have
> -      * (presumably uncached) pages still attached.
> +      * Lists of objects which are [not] bound to a VM. Unbound objects are
> +      * idle are idle but still have (presumably uncached) pages still
> +      * attached.
>        */
> +     struct list_head bound_list;
>       struct list_head unbound_list;
>  
>       struct shrinker inactive_shrinker;
>       bool shrinker_no_lock_stealing;
>  
> -     /**
> -      * List of objects currently involved in rendering.
> -      *
> -      * Includes buffers having the contents of their GPU caches
> -      * flushed, not necessarily primitives.  last_rendering_seqno
> -      * represents when the rendering involved will be completed.
> -      *
> -      * A reference is held on the buffer while on this list.
> -      */
> -     struct list_head active_list;
> -
> -     /**
> -      * LRU list of objects which are not in the ringbuffer and
> -      * are ready to unbind, but are still in the GTT.
> -      *
> -      * last_rendering_seqno is 0 while an object is in this list.
> -      *
> -      * A reference is not held on the buffer while on this list,
> -      * as merely being GTT-bound shouldn't prevent its being
> -      * freed, and we'll pull it off the list in the free path.
> -      */
> -     struct list_head inactive_list;
> -
>       /** LRU list of objects with fence regs on them. */
>       struct list_head fence_list;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 608b6b5..7da06df 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1706,7 +1706,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, 
> long target,
>       }
>  
>       list_for_each_entry_safe(obj, next,
> -                              &dev_priv->mm.inactive_list,
> +                              &i915_gtt_vm->inactive_list,
>                                mm_list) {
>               if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
>                   i915_gem_object_unbind(obj) == 0 &&
> @@ -1881,7 +1881,7 @@ i915_gem_object_move_to_active(struct 
> drm_i915_gem_object *obj,
>       }
>  
>       /* Move from whatever list we were on to the tail of execution. */
> -     list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
> +     list_move_tail(&obj->mm_list, &i915_gtt_vm->active_list);
>       list_move_tail(&obj->ring_list, &ring->active_list);
>  
>       obj->last_read_seqno = seqno;
> @@ -1909,7 +1909,7 @@ i915_gem_object_move_to_inactive(struct 
> drm_i915_gem_object *obj)
>       BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
>       BUG_ON(!obj->active);
>  
> -     list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> +     list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
>  
>       list_del_init(&obj->ring_list);
>       obj->ring = NULL;
> @@ -2279,12 +2279,8 @@ bool i915_gem_reset(struct drm_device *dev)
>       /* Move everything out of the GPU domains to ensure we do any
>        * necessary invalidation upon reuse.
>        */
> -     list_for_each_entry(obj,
> -                         &dev_priv->mm.inactive_list,
> -                         mm_list)
> -     {
> +     list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list)
>               obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
> -     }
>  
>       /* The fence registers are invalidated so clear them out */
>       i915_gem_restore_fences(dev);
> @@ -3162,7 +3158,7 @@ search_free:
>       }
>  
>       list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> -     list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> +     list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
>  
>       obj->gtt_space = node;
>       obj->gtt_offset = node->start;
> @@ -3313,7 +3309,7 @@ i915_gem_object_set_to_gtt_domain(struct 
> drm_i915_gem_object *obj, bool write)
>  
>       /* And bump the LRU for this access */
>       if (i915_gem_object_is_inactive(obj))
> -             list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> +             list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
>  
>       return 0;
>  }
> @@ -4291,7 +4287,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void 
> *data,
>               return ret;
>       }
>  
> -     BUG_ON(!list_empty(&dev_priv->mm.active_list));
> +     BUG_ON(!list_empty(&i915_gtt_vm->active_list));
>       mutex_unlock(&dev->struct_mutex);
>  
>       ret = drm_irq_install(dev);
> @@ -4352,8 +4348,8 @@ i915_gem_load(struct drm_device *dev)
>                                 SLAB_HWCACHE_ALIGN,
>                                 NULL);
>  
> -     INIT_LIST_HEAD(&dev_priv->mm.active_list);
> -     INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
> +     INIT_LIST_HEAD(&i915_gtt_vm->active_list);
> +     INIT_LIST_HEAD(&i915_gtt_vm->inactive_list);
>       INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
>       INIT_LIST_HEAD(&dev_priv->mm.bound_list);
>       INIT_LIST_HEAD(&dev_priv->mm.fence_list);
> @@ -4652,7 +4648,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, 
> struct shrink_control *sc)
>       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
>               if (obj->pages_pin_count == 0)
>                       cnt += obj->base.size >> PAGE_SHIFT;
> -     list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list)
> +     list_for_each_entry(obj, &i915_gtt_vm->inactive_list, global_list)
>               if (obj->pin_count == 0 && obj->pages_pin_count == 0)
>                       cnt += obj->base.size >> PAGE_SHIFT;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c 
> b/drivers/gpu/drm/i915/i915_gem_debug.c
> index 582e6a5..bf945a3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_debug.c
> +++ b/drivers/gpu/drm/i915/i915_gem_debug.c
> @@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
>               }
>       }
>  
> -     list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
> +     list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
>               if (obj->base.dev != dev ||
>                   !atomic_read(&obj->base.refcount.refcount)) {
>                       DRM_ERROR("freed inactive %p\n", obj);
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
> b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 6e620f86..92856a2 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -86,7 +86,7 @@ i915_gem_evict_something(struct drm_device *dev, int 
> min_size,
>                                cache_level);
>  
>       /* First see if there is a large enough contiguous idle region... */
> -     list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
> +     list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list) {
>               if (mark_free(obj, &unwind_list))
>                       goto found;
>       }
> @@ -95,7 +95,7 @@ i915_gem_evict_something(struct drm_device *dev, int 
> min_size,
>               goto none;
>  
>       /* Now merge in the soon-to-be-expired objects... */
> -     list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
> +     list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
>               if (mark_free(obj, &unwind_list))
>                       goto found;
>       }
> @@ -158,8 +158,8 @@ i915_gem_evict_everything(struct drm_device *dev)
>       bool lists_empty;
>       int ret;
>  
> -     lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
> -                    list_empty(&dev_priv->mm.active_list));
> +     lists_empty = (list_empty(&i915_gtt_vm->inactive_list) &&
> +                    list_empty(&i915_gtt_vm->active_list));
>       if (lists_empty)
>               return -ENOSPC;
>  
> @@ -177,7 +177,7 @@ i915_gem_evict_everything(struct drm_device *dev)
>  
>       /* Having flushed everything, unbind() should never raise an error */
>       list_for_each_entry_safe(obj, next,
> -                              &dev_priv->mm.inactive_list, mm_list)
> +                              &i915_gtt_vm->inactive_list, mm_list)
>               if (obj->pin_count == 0)
>                       WARN_ON(i915_gem_object_unbind(obj));
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
> b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 49e8be7..3f6564d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -384,7 +384,7 @@ i915_gem_object_create_stolen_for_preallocated(struct 
> drm_device *dev,
>       obj->has_global_gtt_mapping = 1;
>  
>       list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> -     list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> +     list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
>  
>       return obj;
>  }
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 1e25920..5dc055a 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -1722,7 +1722,7 @@ i915_error_first_batchbuffer(struct drm_i915_private 
> *dev_priv,
>       }
>  
>       seqno = ring->get_seqno(ring, false);
> -     list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
> +     list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
>               if (obj->ring != ring)
>                       continue;
>  
> @@ -1857,7 +1857,7 @@ static void i915_gem_capture_buffers(struct 
> drm_i915_private *dev_priv,
>       int i;
>  
>       i = 0;
> -     list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
> +     list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list)
>               i++;
>       error->active_bo_count = i;
>       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
> @@ -1877,7 +1877,7 @@ static void i915_gem_capture_buffers(struct 
> drm_i915_private *dev_priv,
>               error->active_bo_count =
>                       capture_active_bo(error->active_bo,
>                                         error->active_bo_count,
> -                                       &dev_priv->mm.active_list);
> +                                       &i915_gtt_vm->active_list);
>  
>       if (error->pinned_bo)
>               error->pinned_bo_count =
> -- 
> 1.8.3.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to