Chris Wilson <ch...@chris-wilson.co.uk> writes:

> Previously, we want to shrink the pages of freed objects before they
> were RCU collected. However, by removing the struct_mutex serialisation
> around the active reference, we need to acquire an extra reference
> around the wait. Unfortunately this means that we have to skip objects
> that are waiting RCU collection.
>
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_object.c   | 47 +-------------------
>  drivers/gpu/drm/i915/gem/i915_gem_shrinker.c |  5 +++
>  2 files changed, 6 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index 272ce30ce1d3..1b571fd26ed4 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -149,33 +149,6 @@ void i915_gem_close_object(struct drm_gem_object *gem, 
> struct drm_file *file)
>       }
>  }
>  
> -static bool discard_backing_storage(struct drm_i915_gem_object *obj)
> -{
> -     /*
> -      * If we are the last user of the backing storage (be it shmemfs
> -      * pages or stolen etc), we know that the pages are going to be
> -      * immediately released. In this case, we can then skip copying
> -      * back the contents from the GPU.
> -      */
> -     if (!i915_gem_object_is_shrinkable(obj))
> -             return false;
> -
> -     if (obj->mm.madv != I915_MADV_WILLNEED)
> -             return false;
> -
> -     if (!obj->base.filp)
> -             return true;
> -
> -     /* At first glance, this looks racy, but then again so would be
> -      * userspace racing mmap against close. However, the first external
> -      * reference to the filp can only be obtained through the
> -      * i915_gem_mmap_ioctl() which safeguards us against the user
> -      * acquiring such a reference whilst we are in the middle of
> -      * freeing the object.
> -      */
> -     return file_count(obj->base.filp) == 1;
> -}
> -
>  static void __i915_gem_free_objects(struct drm_i915_private *i915,
>                                   struct llist_node *freed)
>  {
> @@ -225,8 +198,7 @@ static void __i915_gem_free_objects(struct 
> drm_i915_private *i915,
>               if (obj->ops->release)
>                       obj->ops->release(obj);
>  
> -             if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
> -                     atomic_set(&obj->mm.pages_pin_count, 0);
> +             atomic_set(&obj->mm.pages_pin_count, 0);
>               __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
>               GEM_BUG_ON(i915_gem_object_has_pages(obj));
>  
> @@ -324,23 +296,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  {
>       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
>  
> -     if (obj->mm.quirked)
> -             __i915_gem_object_unpin_pages(obj);
> -
> -     if (discard_backing_storage(obj)) {
> -             struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -
> -             obj->mm.madv = I915_MADV_DONTNEED;
> -
> -             if (i915_gem_object_has_pages(obj)) {
> -                     unsigned long flags;
> -
> -                     spin_lock_irqsave(&i915->mm.obj_lock, flags);
> -                     list_move_tail(&obj->mm.link, &i915->mm.purge_list);
> -                     spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
> -             }
> -     }
> -
>       /*
>        * Before we free the object, make sure any pure RCU-only
>        * read-side critical sections are complete, e.g.
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> index c851c4029597..3a926a8755c6 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
> @@ -241,6 +241,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
>                       if (!can_release_pages(obj))
>                               continue;
>  
> +                     if (!kref_get_unless_zero(&obj->base.refcount))
> +                             continue;
> +

The comment above, in this function, seems a little bit
stale on talking about struct mutex. Straighten it up.

Reviewed-by: Mika Kuoppala <mika.kuopp...@linux.intel.com>

>                       spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
>  
>                       if (unsafe_drop_pages(obj)) {
> @@ -253,7 +256,9 @@ i915_gem_shrink(struct drm_i915_private *i915,
>                               }
>                               mutex_unlock(&obj->mm.lock);
>                       }
> +
>                       scanned += obj->base.size >> PAGE_SHIFT;
> +                     i915_gem_object_put(obj);
>  
>                       spin_lock_irqsave(&i915->mm.obj_lock, flags);
>               }
> -- 
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to