The use of the mm_list by deferred-free breaks the following patches to
extend the range of objects tracked. We can simplify things if we just
make the unbind during free uninterrible.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c |   22 +-
 drivers/gpu/drm/i915/i915_drv.h     |   15 +-
 drivers/gpu/drm/i915/i915_gem.c     |  441 +++++++++++++++++++----------------
 3 files changed, 256 insertions(+), 222 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index ba6b8a6..87d2acc 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -46,7 +46,7 @@ enum {
        ACTIVE_LIST,
        FLUSHING_LIST,
        INACTIVE_LIST,
-       DEFERRED_FREE_LIST,
+       UNBOUND_LIST,
 };
 
 static const char *yesno(int v)
@@ -173,17 +173,17 @@ static int i915_gem_object_list_info(struct seq_file *m, 
void *data)
                seq_printf(m, "Active:\n");
                head = &dev_priv->mm.active_list;
                break;
-       case INACTIVE_LIST:
-               seq_printf(m, "Inactive:\n");
-               head = &dev_priv->mm.inactive_list;
-               break;
        case FLUSHING_LIST:
                seq_printf(m, "Flushing:\n");
                head = &dev_priv->mm.flushing_list;
                break;
-       case DEFERRED_FREE_LIST:
-               seq_printf(m, "Deferred free:\n");
-               head = &dev_priv->mm.deferred_free_list;
+       case INACTIVE_LIST:
+               seq_printf(m, "Inactive:\n");
+               head = &dev_priv->mm.inactive_list;
+               break;
+       case UNBOUND_LIST:
+               seq_printf(m, "Unbound:\n");
+               head = &dev_priv->mm.unbound_list;
                break;
        default:
                mutex_unlock(&dev->struct_mutex);
@@ -252,8 +252,8 @@ static int i915_gem_object_info(struct seq_file *m, void* 
data)
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
-       count_objects(&dev_priv->mm.deferred_free_list, mm_list);
-       seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
+       count_objects(&dev_priv->mm.unbound_list, mm_list);
+       seq_printf(m, "  %u [%u] unbound objects, %zu [%zu] bytes\n",
                   count, mappable_count, size, mappable_size);
 
        size = count = mappable_size = mappable_count = 0;
@@ -1796,7 +1796,7 @@ static struct drm_info_list i915_debugfs_list[] = {
        {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
        {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) 
FLUSHING_LIST},
        {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) 
INACTIVE_LIST},
-       {"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) 
DEFERRED_FREE_LIST},
+       {"i915_gem_unbound", i915_gem_object_list_info, 0, (void *) 
UNBOUND_LIST},
        {"i915_gem_pageflip", i915_gem_pageflip_info, 0},
        {"i915_gem_request", i915_gem_request_info, 0},
        {"i915_gem_seqno", i915_gem_seqno_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c17aeab..4eee0bf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -645,16 +645,15 @@ typedef struct drm_i915_private {
                 */
                struct list_head inactive_list;
 
-               /** LRU list of objects with fence regs on them. */
-               struct list_head fence_list;
-
                /**
-                * List of objects currently pending being freed.
-                *
-                * These objects are no longer in use, but due to a signal
-                * we were prevented from freeing them at the appointed time.
+                * List of objects which are not bound to the GTT (thus
+                * are idle and not used by the GPU) but still have
+                * (presumably uncached) pages still attached.
                 */
-               struct list_head deferred_free_list;
+               struct list_head unbound_list;
+
+               /** LRU list of objects with fence regs on them. */
+               struct list_head fence_list;
 
                /**
                 * We leave the user IRQ off as much as possible,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 474a34f..3a6b776 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -54,12 +54,19 @@ static int i915_gem_phys_pwrite(struct drm_device *dev,
                                struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
                                struct drm_file *file);
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
                                    struct shrink_control *sc);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
+static struct drm_i915_gem_object *
+first_unbound_bo(struct drm_i915_private *dev_priv)
+{
+       return list_first_entry(&dev_priv->mm.unbound_list,
+                               struct drm_i915_gem_object,
+                               mm_list);
+}
+
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
@@ -1367,59 +1374,55 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void 
*data,
        return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
-
-static int
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
-                             gfp_t gfpmask)
+/* Immediately discard the backing storage */
+static void
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
-       int page_count, i;
-       struct address_space *mapping;
        struct inode *inode;
-       struct page *page;
 
-       /* Get the list of pages out of our struct file.  They'll be pinned
-        * at this point until we release them.
+       /* Our goal here is to return as much of the memory as
+        * is possible back to the system as we are called from OOM.
+        * To do this we must instruct the shmfs to drop all of its
+        * backing pages, *now*.
         */
-       page_count = obj->base.size / PAGE_SIZE;
-       BUG_ON(obj->pages != NULL);
-       obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
-       if (obj->pages == NULL)
-               return -ENOMEM;
-
        inode = obj->base.filp->f_path.dentry->d_inode;
-       mapping = inode->i_mapping;
-       gfpmask |= mapping_gfp_mask(mapping);
-
-       for (i = 0; i < page_count; i++) {
-               page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
-               if (IS_ERR(page))
-                       goto err_pages;
-
-               obj->pages[i] = page;
-       }
-
-       if (i915_gem_object_needs_bit17_swizzle(obj))
-               i915_gem_object_do_bit_17_swizzle(obj);
+       shmem_truncate_range(inode, 0, (loff_t)-1);
 
-       return 0;
+       if (obj->base.map_list.map)
+               drm_gem_free_mmap_offset(&obj->base);
 
-err_pages:
-       while (i--)
-               page_cache_release(obj->pages[i]);
+       obj->madv = __I915_MADV_PURGED;
+}
 
-       drm_free_large(obj->pages);
-       obj->pages = NULL;
-       return PTR_ERR(page);
+static inline int
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
+{
+       return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
+static int
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
        int page_count = obj->base.size / PAGE_SIZE;
-       int i;
+       int ret, i;
+
+       if (obj->pages == NULL)
+               return 0;
 
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
+       ret = i915_gem_object_set_to_cpu_domain(obj, 0);
+       if (ret && ret != -EIO)
+               return ret;
+
+       if (ret) {
+               /* In the event of a disaster, abandon all caches and
+                * hope for the best.
+                */
+               i915_gem_clflush_object(obj);
+               obj->base.read_domains = obj->base.write_domain = 
I915_GEM_DOMAIN_CPU;
+       }
+
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_save_bit_17_swizzle(obj);
 
@@ -1439,6 +1442,162 @@ i915_gem_object_put_pages_gtt(struct 
drm_i915_gem_object *obj)
 
        drm_free_large(obj->pages);
        obj->pages = NULL;
+
+       list_del(&obj->mm_list);
+
+       if (i915_gem_object_is_purgeable(obj))
+               i915_gem_object_truncate(obj);
+
+       return 0;
+}
+
+static void
+i915_gem_shrink_by(struct drm_i915_private *dev_priv,
+                        unsigned long target)
+{
+       struct drm_i915_gem_object *obj, *next;
+       unsigned long count = 0;
+       int ret;
+
+       if (target == -1)
+               ret = i915_gpu_idle(dev_priv->dev, true);
+
+       list_for_each_entry_safe(obj, next,
+                                &dev_priv->mm.unbound_list,
+                                mm_list) {
+               if (i915_gem_object_is_purgeable(obj) &&
+                   i915_gem_object_put_pages_gtt(obj) == 0) {
+                       count += obj->base.size >> PAGE_SHIFT;
+                       if (count >= target)
+                               return;
+               }
+       }
+
+       list_for_each_entry_safe(obj, next,
+                                &dev_priv->mm.inactive_list,
+                                mm_list) {
+               if (i915_gem_object_is_purgeable(obj) &&
+                   i915_gem_object_unbind(obj) == 0 &&
+                   i915_gem_object_put_pages_gtt(obj) == 0) {
+                       count += obj->base.size >> PAGE_SHIFT;
+                       if (count >= target)
+                               return;
+               }
+       }
+}
+
+/* Try to allocate some memory under the struct_mutex by purging some
+ * of our own buffers if necessary.
+ */
+static void *i915_malloc(struct drm_i915_private *dev_priv,
+                        unsigned long size)
+{
+       gfp_t gfp;
+       void *ptr;
+
+       gfp = GFP_KERNEL;
+       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+       gfp &= ~(__GFP_IO | __GFP_WAIT);
+
+       ptr = kmalloc(size, gfp);
+       if (ptr)
+               return ptr;
+
+       if (size <= 2*PAGE_SIZE) {
+               i915_gem_shrink_by(dev_priv, (size >> PAGE_SHIFT) + 1);
+               ptr = kmalloc(size, gfp);
+               if (ptr)
+                       return ptr;
+
+               i915_gem_shrink_by(dev_priv, -1);
+
+               gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
+               gfp |= __GFP_IO | __GFP_WAIT;
+               return kmalloc(size, gfp);
+       } else {
+               gfp |= __GFP_HIGHMEM;
+               ptr =  __vmalloc(size, gfp, PAGE_KERNEL);
+               if (ptr)
+                       return ptr;
+
+               i915_gem_shrink_by(dev_priv, (size >> PAGE_SHIFT) + 1);
+               ptr =  __vmalloc(size, gfp, PAGE_KERNEL);
+               if (ptr)
+                       return ptr;
+
+               i915_gem_shrink_by(dev_priv, -1);
+
+               gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
+               gfp |= __GFP_IO | __GFP_WAIT;
+               return  __vmalloc(size, gfp, PAGE_KERNEL);
+       }
+}
+
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       int page_count, i;
+       struct address_space *mapping;
+       struct page *page;
+       gfp_t gfp;
+
+       if (obj->pages)
+               return 0;
+
+       /* Get the list of pages out of our struct file.  They'll be pinned
+        * at this point until we release them.
+        */
+       page_count = obj->base.size / PAGE_SIZE;
+       BUG_ON(obj->pages != NULL);
+       obj->pages = i915_malloc(dev_priv, page_count*sizeof(struct page *));
+       if (obj->pages == NULL)
+               return -ENOMEM;
+
+       /* Fail silently without starting the shrinker */
+       mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+       gfp = mapping_gfp_mask(mapping);
+       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+       gfp &= ~(__GFP_IO | __GFP_WAIT);
+       for (i = 0; i < page_count; i++) {
+               page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+               if (IS_ERR(page)) {
+                       i915_gem_shrink_by(dev_priv, page_count);
+                       page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+               }
+               if (IS_ERR(page)) {
+                       /* We've tried hard to allocate the memory by reaping
+                        * our own buffer, now let the real VM do its job and
+                        * go down in flames if truly OOM.
+                        */
+                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | 
__GFP_NO_KSWAPD);
+                       gfp |= __GFP_IO | __GFP_WAIT;
+
+                       i915_gem_shrink_by(dev_priv, -1);
+                       page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+                       if (IS_ERR(page))
+                               goto err_pages;
+
+                       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
+                       gfp &= ~(__GFP_IO | __GFP_WAIT);
+               }
+
+               obj->pages[i] = page;
+       }
+
+       if (i915_gem_object_needs_bit17_swizzle(obj))
+               i915_gem_object_do_bit_17_swizzle(obj);
+
+       list_add_tail(&obj->mm_list, &dev_priv->mm.unbound_list);
+       return 0;
+
+err_pages:
+       while (i--)
+               page_cache_release(obj->pages[i]);
+
+       drm_free_large(obj->pages);
+       obj->pages = NULL;
+       return PTR_ERR(page);
 }
 
 void
@@ -1517,32 +1676,6 @@ i915_gem_object_move_to_inactive(struct 
drm_i915_gem_object *obj)
        WARN_ON(i915_verify_lists(dev));
 }
 
-/* Immediately discard the backing storage */
-static void
-i915_gem_object_truncate(struct drm_i915_gem_object *obj)
-{
-       struct inode *inode;
-
-       /* Our goal here is to return as much of the memory as
-        * is possible back to the system as we are called from OOM.
-        * To do this we must instruct the shmfs to drop all of its
-        * backing pages, *now*.
-        */
-       inode = obj->base.filp->f_path.dentry->d_inode;
-       shmem_truncate_range(inode, 0, (loff_t)-1);
-
-       if (obj->base.map_list.map)
-               drm_gem_free_mmap_offset(&obj->base);
-
-       obj->madv = __I915_MADV_PURGED;
-}
-
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
-       return obj->madv == I915_MADV_DONTNEED;
-}
-
 static void
 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
                               uint32_t flush_domains)
@@ -1748,6 +1881,9 @@ void i915_gem_reset(struct drm_device *dev)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
        }
 
+       while (!list_empty(&dev_priv->mm.unbound_list))
+               i915_gem_object_put_pages_gtt(first_unbound_bo(dev_priv));
+
        /* The fence registers are invalidated so clear them out */
        i915_gem_reset_fences(dev);
 }
@@ -1829,20 +1965,6 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int i;
 
-       if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-           struct drm_i915_gem_object *obj, *next;
-
-           /* We must be careful that during unbind() we do not
-            * accidentally infinitely recurse into retire requests.
-            * Currently:
-            *   retire -> free -> unbind -> wait -> retire_ring
-            */
-           list_for_each_entry_safe(obj, next,
-                                    &dev_priv->mm.deferred_free_list,
-                                    mm_list)
-                   i915_gem_free_object_tail(obj);
-       }
-
        for (i = 0; i < I915_NUM_RINGS; i++)
                i915_gem_retire_requests_ring(&dev_priv->ring[i]);
 }
@@ -2061,7 +2183,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        }
 
        ret = i915_gem_object_finish_gpu(obj);
-       if (ret == -ERESTARTSYS)
+       if (ret && ret != -EIO)
                return ret;
        /* Continue on if we fail due to EIO, the GPU is hung so we
         * should be safe and we need to cleanup or else we might
@@ -2070,25 +2192,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 
        i915_gem_object_finish_gtt(obj);
 
-       /* Move the object to the CPU domain to ensure that
-        * any possible CPU writes while it's not in the GTT
-        * are flushed when we go to remap it.
-        */
-       if (ret == 0)
-               ret = i915_gem_object_set_to_cpu_domain(obj, 1);
-       if (ret == -ERESTARTSYS)
-               return ret;
-       if (ret) {
-               /* In the event of a disaster, abandon all caches and
-                * hope for the best.
-                */
-               i915_gem_clflush_object(obj);
-               obj->base.read_domains = obj->base.write_domain = 
I915_GEM_DOMAIN_CPU;
-       }
-
        /* release the fence reg _after_ flushing */
        ret = i915_gem_object_put_fence(obj);
-       if (ret == -ERESTARTSYS)
+       if (ret && ret != -EIO)
                return ret;
 
        trace_i915_gem_object_unbind(obj);
@@ -2099,10 +2205,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
                obj->has_aliasing_ppgtt_mapping = 0;
        }
 
-       i915_gem_object_put_pages_gtt(obj);
-
        list_del_init(&obj->gtt_list);
-       list_del_init(&obj->mm_list);
+       list_move_tail(&obj->mm_list, &dev_priv->mm.unbound_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        obj->map_and_fenceable = true;
 
@@ -2110,10 +2214,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
        obj->gtt_space = NULL;
        obj->gtt_offset = 0;
 
-       if (i915_gem_object_is_purgeable(obj))
-               i915_gem_object_truncate(obj);
+       if (obj->base.read_domains & I915_GEM_DOMAIN_CPU)
+               i915_gem_object_put_pages_gtt(obj);
 
-       return ret;
+       return 0;
 }
 
 int
@@ -2644,7 +2748,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object 
*obj,
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_mm_node *free_space;
-       gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
@@ -2684,6 +2787,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object 
*obj,
                return -E2BIG;
        }
 
+       ret = i915_gem_object_get_pages_gtt(obj);
+       if (ret)
+               return ret;
+
  search_free:
        if (map_and_fenceable)
                free_space =
@@ -2707,9 +2814,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object 
*obj,
                                drm_mm_get_block(free_space, size, alignment);
        }
        if (obj->gtt_space == NULL) {
-               /* If the gtt is empty and we're still having trouble
-                * fitting our object in, we're out of memory.
-                */
                ret = i915_gem_evict_something(dev, size, alignment,
                                               map_and_fenceable);
                if (ret)
@@ -2718,44 +2822,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object 
*obj,
                goto search_free;
        }
 
-       ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
-       if (ret) {
-               drm_mm_put_block(obj->gtt_space);
-               obj->gtt_space = NULL;
-
-               if (ret == -ENOMEM) {
-                       /* first try to reclaim some memory by clearing the GTT 
*/
-                       ret = i915_gem_evict_everything(dev, false);
-                       if (ret) {
-                               /* now try to shrink everyone else */
-                               if (gfpmask) {
-                                       gfpmask = 0;
-                                       goto search_free;
-                               }
-
-                               return -ENOMEM;
-                       }
-
-                       goto search_free;
-               }
-
-               return ret;
-       }
-
        ret = i915_gem_gtt_bind_object(obj);
        if (ret) {
-               i915_gem_object_put_pages_gtt(obj);
                drm_mm_put_block(obj->gtt_space);
                obj->gtt_space = NULL;
-
-               if (i915_gem_evict_everything(dev, false))
-                       return ret;
-
-               goto search_free;
+               return ret;
        }
 
        list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
-       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -3546,9 +3621,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (obj->madv != __I915_MADV_PURGED)
                obj->madv = args->madv;
 
-       /* if the object is no longer bound, discard its backing storage */
-       if (i915_gem_object_is_purgeable(obj) &&
-           obj->gtt_space == NULL)
+       /* if the object is no longer attached, discard its backing storage */
+       if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
                i915_gem_object_truncate(obj);
 
        args->retained = obj->madv != __I915_MADV_PURGED;
@@ -3622,24 +3696,29 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        return 0;
 }
 
-static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
+       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
+       bool was_interruptible;
        int ret;
 
-       ret = i915_gem_object_unbind(obj);
-       if (ret == -ERESTARTSYS) {
-               list_move(&obj->mm_list,
-                         &dev_priv->mm.deferred_free_list);
-               return;
-       }
-
        trace_i915_gem_object_destroy(obj);
 
+       if (obj->phys_obj)
+               i915_gem_detach_phys_object(dev, obj);
+
+       was_interruptible = dev_priv->mm.interruptible;
+       dev_priv->mm.interruptible = false;
+
+       WARN_ON(i915_gem_object_unbind(obj));
+       i915_gem_object_put_pages_gtt(obj);
        if (obj->base.map_list.map)
                drm_gem_free_mmap_offset(&obj->base);
 
+       dev_priv->mm.interruptible = was_interruptible;
+
        drm_gem_object_release(&obj->base);
        i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
@@ -3648,17 +3727,6 @@ static void i915_gem_free_object_tail(struct 
drm_i915_gem_object *obj)
        kfree(obj);
 }
 
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
-       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-       struct drm_device *dev = obj->base.dev;
-
-       if (obj->phys_obj)
-               i915_gem_detach_phys_object(dev, obj);
-
-       i915_gem_free_object_tail(obj);
-}
-
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -3900,8 +3968,8 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->mm.active_list);
        INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
        INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+       INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
-       INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
        INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
        for (i = 0; i < I915_NUM_RINGS; i++)
                init_ring_lists(&dev_priv->ring[i]);
@@ -4172,60 +4240,27 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, 
struct shrink_control *sc)
                             struct drm_i915_private,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
-       struct drm_i915_gem_object *obj, *next;
+       struct drm_i915_gem_object *obj;
        int nr_to_scan = sc->nr_to_scan;
        int cnt;
 
        if (!mutex_trylock(&dev->struct_mutex))
                return 0;
 
-       /* "fast-path" to count number of available objects */
-       if (nr_to_scan == 0) {
-               cnt = 0;
-               list_for_each_entry(obj,
-                                   &dev_priv->mm.inactive_list,
-                                   mm_list)
-                       cnt++;
-               mutex_unlock(&dev->struct_mutex);
-               return cnt / 100 * sysctl_vfs_cache_pressure;
-       }
-
-rescan:
-       /* first scan for clean buffers */
-       i915_gem_retire_requests(dev);
+       if (nr_to_scan) {
+               do {
+                       while (nr_to_scan &&
+                              !list_empty(&dev_priv->mm.unbound_list) &&
+                              
i915_gem_object_put_pages_gtt(first_unbound_bo(dev_priv)) == 0)
+                               nr_to_scan--;
 
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list,
-                                mm_list) {
-               if (i915_gem_object_is_purgeable(obj)) {
-                       if (i915_gem_object_unbind(obj) == 0 &&
-                           --nr_to_scan == 0)
-                               break;
-               }
+               } while (nr_to_scan && i915_gpu_is_active(dev) && 
i915_gpu_idle(dev, true) == 0);
        }
 
-       /* second pass, evict/count anything still on the inactive list */
        cnt = 0;
-       list_for_each_entry_safe(obj, next,
-                                &dev_priv->mm.inactive_list,
-                                mm_list) {
-               if (nr_to_scan &&
-                   i915_gem_object_unbind(obj) == 0)
-                       nr_to_scan--;
-               else
-                       cnt++;
-       }
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm_list)
+               cnt++;
 
-       if (nr_to_scan && i915_gpu_is_active(dev)) {
-               /*
-                * We are desperate for pages, so as a last resort, wait
-                * for the GPU to finish and discard whatever we can.
-                * This has a dramatic impact to reduce the number of
-                * OOM-killer events whilst running the GPU aggressively.
-                */
-               if (i915_gpu_idle(dev, true) == 0)
-                       goto rescan;
-       }
        mutex_unlock(&dev->struct_mutex);
        return cnt / 100 * sysctl_vfs_cache_pressure;
 }
-- 
1.7.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to