Due to the lazy retirement semantics, even though we have unbound an
object, it may still hold onto an active reference. So in the debug code,
play safe.

v2: Export i915_gem_shrink() rather than opencoding it.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c |  34 ++---------
 drivers/gpu/drm/i915/i915_drv.h     |   6 ++
 drivers/gpu/drm/i915/i915_gem.c     | 111 +++++++++++++++++++++---------------
 3 files changed, 74 insertions(+), 77 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 2cbc85f3b237..063b44817e08 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3826,7 +3826,6 @@ i915_drop_caches_set(void *data, u64 val)
 {
        struct drm_device *dev = data;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj, *next;
        int ret;
 
        DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -3846,36 +3845,11 @@ i915_drop_caches_set(void *data, u64 val)
        if (val & (DROP_RETIRE | DROP_ACTIVE))
                i915_gem_retire_requests(dev);
 
-       if (val & DROP_BOUND) {
-               list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
-                                        global_list) {
-                       struct i915_vma *vma, *v;
+       if (val & DROP_BOUND)
+               i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
 
-                       ret = 0;
-                       drm_gem_object_reference(&obj->base);
-                       list_for_each_entry_safe(vma, v, &obj->vma_list, 
vma_link) {
-                               if (vma->pin_count)
-                                       continue;
-
-                               ret = i915_vma_unbind(vma);
-                               if (ret)
-                                       break;
-                       }
-                       drm_gem_object_unreference(&obj->base);
-                       if (ret)
-                               goto unlock;
-               }
-       }
-
-       if (val & DROP_UNBOUND) {
-               list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
-                                        global_list)
-                       if (obj->pages_pin_count == 0) {
-                               ret = i915_gem_object_put_pages(obj);
-                               if (ret)
-                                       goto unlock;
-                       }
-       }
+       if (val & DROP_UNBOUND)
+               i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
 
 unlock:
        mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 241b2bfda6f2..e3ca8dfa60df 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2365,6 +2365,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, 
void *data,
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+                             long target,
+                             unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ca3a6dcf10b..1ef6700e5d98 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -60,7 +60,6 @@ static unsigned long i915_gem_shrinker_scan(struct shrinker 
*shrinker,
 static int i915_gem_shrinker_oom(struct notifier_block *nb,
                                 unsigned long event,
                                 void *ptr);
-static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long 
target);
 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1741,7 +1740,11 @@ static int i915_gem_object_create_mmap_offset(struct 
drm_i915_gem_object *obj)
         * offsets on purgeable objects by truncating it and marking it purged,
         * which prevents userspace from ever using that object again.
         */
-       i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+       i915_gem_shrink(dev_priv,
+                       obj->base.size >> PAGE_SHIFT,
+                       I915_SHRINK_BOUND |
+                       I915_SHRINK_UNBOUND |
+                       I915_SHRINK_PURGEABLE);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
                goto out;
@@ -1938,12 +1941,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object 
*obj)
        return 0;
 }
 
-static unsigned long
-__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
-                 bool purgeable_only)
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+               long target, unsigned flags)
 {
-       struct list_head still_in_list;
-       struct drm_i915_gem_object *obj;
+       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
        unsigned long count = 0;
 
        /*
@@ -1965,62 +1967,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, 
long target,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       INIT_LIST_HEAD(&still_in_list);
-       while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-               obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                      typeof(*obj), global_list);
-               list_move_tail(&obj->global_list, &still_in_list);
+       if (flags & I915_SHRINK_UNBOUND) {
+               struct list_head still_in_list;
 
-               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                       continue;
+               INIT_LIST_HEAD(&still_in_list);
+               while (count < target && 
!list_empty(&dev_priv->mm.unbound_list)) {
+                       struct drm_i915_gem_object *obj;
 
-               drm_gem_object_reference(&obj->base);
+                       obj = list_first_entry(&dev_priv->mm.unbound_list,
+                                              typeof(*obj), global_list);
+                       list_move_tail(&obj->global_list, &still_in_list);
 
-               if (i915_gem_object_put_pages(obj) == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
+                       if (!i915_gem_object_is_purgeable(obj) && 
purgeable_only)
+                               continue;
 
-               drm_gem_object_unreference(&obj->base);
+                       drm_gem_object_reference(&obj->base);
+
+                       if (i915_gem_object_put_pages(obj) == 0)
+                               count += obj->base.size >> PAGE_SHIFT;
+
+                       drm_gem_object_unreference(&obj->base);
+               }
+               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
        }
-       list_splice(&still_in_list, &dev_priv->mm.unbound_list);
 
-       INIT_LIST_HEAD(&still_in_list);
-       while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
-               struct i915_vma *vma, *v;
+       if (flags & I915_SHRINK_BOUND) {
+               struct list_head still_in_list;
 
-               obj = list_first_entry(&dev_priv->mm.bound_list,
-                                      typeof(*obj), global_list);
-               list_move_tail(&obj->global_list, &still_in_list);
+               INIT_LIST_HEAD(&still_in_list);
+               while (count < target && !list_empty(&dev_priv->mm.bound_list)) 
{
+                       struct drm_i915_gem_object *obj;
+                       struct i915_vma *vma, *v;
 
-               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                       continue;
+                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                                              typeof(*obj), global_list);
+                       list_move_tail(&obj->global_list, &still_in_list);
 
-               drm_gem_object_reference(&obj->base);
+                       if (!i915_gem_object_is_purgeable(obj) && 
purgeable_only)
+                               continue;
 
-               list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
-                       if (i915_vma_unbind(vma))
-                               break;
+                       drm_gem_object_reference(&obj->base);
 
-               if (i915_gem_object_put_pages(obj) == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
+                       list_for_each_entry_safe(vma, v, &obj->vma_list, 
vma_link)
+                               if (i915_vma_unbind(vma))
+                                       break;
 
-               drm_gem_object_unreference(&obj->base);
+                       if (i915_gem_object_put_pages(obj) == 0)
+                               count += obj->base.size >> PAGE_SHIFT;
+
+                       drm_gem_object_unreference(&obj->base);
+               }
+               list_splice(&still_in_list, &dev_priv->mm.bound_list);
        }
-       list_splice(&still_in_list, &dev_priv->mm.bound_list);
 
        return count;
 }
 
 static unsigned long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
-{
-       return __i915_gem_shrink(dev_priv, target, true);
-}
-
-static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
        i915_gem_evict_everything(dev_priv->dev);
-       return __i915_gem_shrink(dev_priv, LONG_MAX, false);
+       return i915_gem_shrink(dev_priv, LONG_MAX,
+                              I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
 }
 
 static int
@@ -2067,7 +2075,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj)
        for (i = 0; i < page_count; i++) {
                page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                if (IS_ERR(page)) {
-                       i915_gem_purge(dev_priv, page_count);
+                       i915_gem_shrink(dev_priv,
+                                       page_count,
+                                       I915_SHRINK_BOUND |
+                                       I915_SHRINK_UNBOUND |
+                                       I915_SHRINK_PURGEABLE);
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                }
                if (IS_ERR(page)) {
@@ -5261,11 +5273,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, 
struct shrink_control *sc)
        if (!i915_gem_shrinker_lock(dev, &unlock))
                return SHRINK_STOP;
 
-       freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+       freed = i915_gem_shrink(dev_priv,
+                               sc->nr_to_scan,
+                               I915_SHRINK_BOUND |
+                               I915_SHRINK_UNBOUND |
+                               I915_SHRINK_PURGEABLE);
        if (freed < sc->nr_to_scan)
-               freed += __i915_gem_shrink(dev_priv,
-                                          sc->nr_to_scan - freed,
-                                          false);
+               freed += i915_gem_shrink(dev_priv,
+                                        sc->nr_to_scan - freed,
+                                        I915_SHRINK_BOUND |
+                                        I915_SHRINK_UNBOUND);
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
 
-- 
2.1.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to