Since we are operating at the global level, we can simply iterate over
the bound list using the robust method developed for the shrinker.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |  2 +-
 drivers/gpu/drm/i915/i915_gem_evict.c      | 54 +++++++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 3 files changed, 32 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c48909f6baa2..97372869097f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2994,7 +2994,7 @@ int __must_check i915_gem_evict_something(struct 
drm_device *dev,
 int __must_check
 i915_gem_evict_range(struct drm_device *dev, struct i915_address_space *vm,
                     unsigned long start, unsigned long end);
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
+int i915_gem_evict_vm(struct i915_address_space *vm);
 int i915_gem_evict_everything(struct drm_device *dev);
 
 /* belongs in i915_gem_gtt.h */
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c 
b/drivers/gpu/drm/i915/i915_gem_evict.c
index 9754740edecd..cf33f982da8e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -254,7 +254,6 @@ i915_gem_evict_range(struct drm_device *dev, struct 
i915_address_space *vm,
 /**
  * i915_gem_evict_vm - Evict all idle vmas from a vm
  * @vm: Address space to cleanse
- * @do_idle: Boolean directing whether to idle first.
  *
  * This function evicts all idles vmas from a vm. If all unpinned vmas should 
be
  * evicted the @do_idle needs to be set to true.
@@ -265,7 +264,7 @@ i915_gem_evict_range(struct drm_device *dev, struct 
i915_address_space *vm,
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  */
-int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+int i915_gem_evict_vm(struct i915_address_space *vm)
 {
        struct i915_vma *vma, *next;
        int ret;
@@ -273,16 +272,14 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool 
do_idle)
        WARN_ON(!mutex_is_locked(&vm->dev->struct_mutex));
        trace_i915_gem_evict_vm(vm);
 
-       if (do_idle) {
-               ret = i915_gpu_idle(vm->dev);
-               if (ret)
-                       return ret;
-
-               i915_gem_retire_requests(vm->dev);
+       ret = i915_gpu_idle(vm->dev);
+       if (ret)
+               return ret;
 
-               WARN_ON(!list_empty(&vm->active_list));
-       }
+       i915_gem_retire_requests(vm->dev);
+       WARN_ON(!list_empty(&vm->active_list));
 
+       /* Having flushed everything, unbind() should never raise an error */
        list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
                if (vma->pin_count == 0)
                        WARN_ON(i915_vma_unbind(vma));
@@ -297,23 +294,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool 
do_idle)
  * This functions tries to evict all gem objects from all address spaces. Used
  * by the shrinker as a last-ditch effort and for suspend, before releasing the
  * backing storage of all unbound objects.
+ *
+ * This is similar to i915_gem_shrink_all() with the important exception that
+ * we keep a reference to the obj->pages after unbinding (so we can avoid
+ * any expensive migration between the CPU and GPU).
  */
 int
 i915_gem_evict_everything(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm, *v;
-       bool lists_empty = true;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct list_head still_in_list;
        int ret;
 
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               lists_empty = (list_empty(&vm->inactive_list) &&
-                              list_empty(&vm->active_list));
-               if (!lists_empty)
-                       lists_empty = false;
-       }
-
-       if (lists_empty)
+       if (list_empty(&dev_priv->mm.bound_list))
                return -ENOSPC;
 
        trace_i915_gem_evict_everything(dev);
@@ -328,9 +321,22 @@ i915_gem_evict_everything(struct drm_device *dev)
 
        i915_gem_retire_requests(dev);
 
-       /* Having flushed everything, unbind() should never raise an error */
-       list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
-               WARN_ON(i915_gem_evict_vm(vm, false));
+       INIT_LIST_HEAD(&still_in_list);
+       while (!list_empty(&dev_priv->mm.bound_list)) {
+               struct drm_i915_gem_object *obj;
+               struct i915_vma *vma, *v;
+
+               obj = list_first_entry(&dev_priv->mm.bound_list,
+                                      typeof(*obj), global_list);
+               list_move_tail(&obj->global_list, &still_in_list);
+
+               drm_gem_object_reference(&obj->base);
+               list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                       if (WARN_ON(i915_vma_unbind(vma)))
+                               break;
+               drm_gem_object_unreference(&obj->base);
+       }
+       list_splice(&still_in_list, &dev_priv->mm.bound_list);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9afd2dcba43b..bd48393fb91f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -820,7 +820,7 @@ err:
                list_for_each_entry(vma, vmas, exec_list)
                        i915_gem_execbuffer_unreserve_vma(vma);
 
-               ret = i915_gem_evict_vm(vm, true);
+               ret = i915_gem_evict_vm(vm);
                if (ret)
                        return ret;
        } while (1);
-- 
2.1.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to