On Tue, Aug 06, 2013 at 08:39:50PM +0200, Daniel Vetter wrote:
> On Wed, Jul 31, 2013 at 05:00:11PM -0700, Ben Widawsky wrote:
> > Eviction code, like the rest of the converted code needs to be aware of
> > the address space for which it is evicting (or the everything case, all
> > addresses). With the updated bind/unbind interfaces of the last patch,
> > we can now safely move the eviction code over.
> > 
> > Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
> 
> Two comments below.
> -Daniel
> 
> > ---
> >  drivers/gpu/drm/i915/i915_drv.h       |  4 ++-
> >  drivers/gpu/drm/i915/i915_gem.c       |  2 +-
> >  drivers/gpu/drm/i915/i915_gem_evict.c | 53 
> > +++++++++++++++++++----------------
> >  3 files changed, 33 insertions(+), 26 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h 
> > b/drivers/gpu/drm/i915/i915_drv.h
> > index 0610588..bf1ecef 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -1946,7 +1946,9 @@ static inline void i915_gem_chipset_flush(struct 
> > drm_device *dev)
> >  
> >  
> >  /* i915_gem_evict.c */
> > -int __must_check i915_gem_evict_something(struct drm_device *dev, int 
> > min_size,
> > +int __must_check i915_gem_evict_something(struct drm_device *dev,
> > +                                     struct i915_address_space *vm,
> > +                                     int min_size,
> >                                       unsigned alignment,
> >                                       unsigned cache_level,
> >                                       bool mappable,
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c 
> > b/drivers/gpu/drm/i915/i915_gem.c
> > index 0cb36c2..1013105 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -3159,7 +3159,7 @@ search_free:
> >                                               size, alignment,
> >                                               obj->cache_level, 0, gtt_max);
> >     if (ret) {
> > -           ret = i915_gem_evict_something(dev, size, alignment,
> > +           ret = i915_gem_evict_something(dev, vm, size, alignment,
> >                                            obj->cache_level,
> >                                            map_and_fenceable,
> >                                            nonblocking);
> > diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
> > b/drivers/gpu/drm/i915/i915_gem_evict.c
> > index 9205a41..61bf5e2 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> > @@ -32,26 +32,21 @@
> >  #include "i915_trace.h"
> >  
> >  static bool
> > -mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
> > +mark_free(struct i915_vma *vma, struct list_head *unwind)
> >  {
> > -   struct drm_device *dev = obj->base.dev;
> > -   struct drm_i915_private *dev_priv = dev->dev_private;
> > -   struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
> > -
> > -   if (obj->pin_count)
> > +   if (vma->obj->pin_count)
> >             return false;
> >  
> > -   list_add(&obj->exec_list, unwind);
> > +   list_add(&vma->obj->exec_list, unwind);
> >     return drm_mm_scan_add_block(&vma->node);
> >  }
> >  
> >  int
> > -i915_gem_evict_something(struct drm_device *dev, int min_size,
> > -                    unsigned alignment, unsigned cache_level,
> > +i915_gem_evict_something(struct drm_device *dev, struct i915_address_space 
> > *vm,
> > +                    int min_size, unsigned alignment, unsigned cache_level,
> >                      bool mappable, bool nonblocking)
> >  {
> >     drm_i915_private_t *dev_priv = dev->dev_private;
> > -   struct i915_address_space *vm = &dev_priv->gtt.base;
> >     struct list_head eviction_list, unwind_list;
> >     struct i915_vma *vma;
> >     struct drm_i915_gem_object *obj;
> > @@ -83,16 +78,18 @@ i915_gem_evict_something(struct drm_device *dev, int 
> > min_size,
> >      */
> >  
> >     INIT_LIST_HEAD(&unwind_list);
> > -   if (mappable)
> > +   if (mappable) {
> > +           BUG_ON(!i915_is_ggtt(vm));
> >             drm_mm_init_scan_with_range(&vm->mm, min_size,
> >                                         alignment, cache_level, 0,
> >                                         dev_priv->gtt.mappable_end);
> > -   else
> > +   } else
> >             drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
> >  
> >     /* First see if there is a large enough contiguous idle region... */
> >     list_for_each_entry(obj, &vm->inactive_list, mm_list) {
> > -           if (mark_free(obj, &unwind_list))
> > +           struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
> > +           if (mark_free(vma, &unwind_list))
> >                     goto found;
> >     }
> >  
> > @@ -101,7 +98,8 @@ i915_gem_evict_something(struct drm_device *dev, int 
> > min_size,
> >  
> >     /* Now merge in the soon-to-be-expired objects... */
> >     list_for_each_entry(obj, &vm->active_list, mm_list) {
> > -           if (mark_free(obj, &unwind_list))
> > +           struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
> > +           if (mark_free(vma, &unwind_list))
> >                     goto found;
> >     }
> >  
> > @@ -111,7 +109,7 @@ none:
> >             obj = list_first_entry(&unwind_list,
> >                                    struct drm_i915_gem_object,
> >                                    exec_list);
> > -           vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
> > +           vma = i915_gem_obj_to_vma(obj, vm);
> >             ret = drm_mm_scan_remove_block(&vma->node);
> >             BUG_ON(ret);
> >  
> > @@ -132,7 +130,7 @@ found:
> >             obj = list_first_entry(&unwind_list,
> >                                    struct drm_i915_gem_object,
> >                                    exec_list);
> > -           vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
> > +           vma = i915_gem_obj_to_vma(obj, vm);
> >             if (drm_mm_scan_remove_block(&vma->node)) {
> >                     list_move(&obj->exec_list, &eviction_list);
> >                     drm_gem_object_reference(&obj->base);
> > @@ -147,7 +145,7 @@ found:
> >                                    struct drm_i915_gem_object,
> >                                    exec_list);
> >             if (ret == 0)
> > -                   ret = i915_gem_object_ggtt_unbind(obj);
> > +                   ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
> 
> Again I think the ggtt_unbind->vma_unbind conversion seems to leak the
> vma. It feels like vma_unbind should call vma_destroy?
> 

The VMA should get cleaned up when an object backing the vmas is
destroyed also. I agree that at present, unbind and destroy have little
distinction, but I can foresee that changing. Proper faulting is one
such case OTTOMH

Anyway, let me know if your leak concern is addressed.

> >  
> >             list_del_init(&obj->exec_list);
> >             drm_gem_object_unreference(&obj->base);
> > @@ -160,13 +158,18 @@ int
> >  i915_gem_evict_everything(struct drm_device *dev)
> >  {
> >     drm_i915_private_t *dev_priv = dev->dev_private;
> > -   struct i915_address_space *vm = &dev_priv->gtt.base;
> > +   struct i915_address_space *vm;
> >     struct drm_i915_gem_object *obj, *next;
> > -   bool lists_empty;
> > +   bool lists_empty = true;
> >     int ret;
> >  
> > -   lists_empty = (list_empty(&vm->inactive_list) &&
> > -                  list_empty(&vm->active_list));
> > +   list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> > +           lists_empty = (list_empty(&vm->inactive_list) &&
> > +                          list_empty(&vm->active_list));
> > +           if (!lists_empty)
> > +                   lists_empty = false;
> > +   }
> > +
> >     if (lists_empty)
> >             return -ENOSPC;
> >  
> > @@ -183,9 +186,11 @@ i915_gem_evict_everything(struct drm_device *dev)
> >     i915_gem_retire_requests(dev);
> >  
> >     /* Having flushed everything, unbind() should never raise an error */
> > -   list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
> > -           if (obj->pin_count == 0)
> > -                   WARN_ON(i915_gem_object_ggtt_unbind(obj));
> > +   list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
> > +           list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
> > +                   if (obj->pin_count == 0)
> > +                           
> > WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)));
> > +   }
> 
> The conversion of evict_everything looks a bit strange. Essentially we
> have tree callers:
> - ums+gem support code in leavevt to rid the gtt of all gem objects when
>   the userspace X ums ddx stops controlling the hw.
> - When we seriously ran out of memory in, shrink_all.
> - In execbuf when we've fragmented the gtt address space so badly that we
>   need to start over completely fresh.
> 
> With this it imo would make sense to just loop over the global bound
> object lists. But for the execbuf caller adding a vm parameter (and only
> evicting from that special vm, skipping all others) would make sense.
> Other callers would pass NULL since they want everything to get evicted.
> Volunteered for that follow-up?
> 

We need evict_everything for #1. For #2, we call evict_something already
for the vm when we go through the out of space path. If that failed,
evicting everything for a specific VM is just the same operation. But
maybe I've glossed over something in the details. Please correct if I'm
wrong. Is there a case where evict something might fail with ENOSPC, and
evict everything in a VM would help?

> >  
> >     return 0;
> >  }
-- 
Ben Widawsky, Intel Open Source Technology Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to