On Thu, Mar 24, 2016 at 11:52:59PM +0530, akash.g...@intel.com wrote:
> +static int
> +unsafe_drop_pages(struct drm_i915_gem_object *obj)
> +{
> +     struct i915_vma *vma, *next;
> +     int ret;
> +
> +     drm_gem_object_reference(&obj->base);
> +     list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
> +             if (i915_vma_unbind(vma))
> +                     break;
> +
> +     ret = i915_gem_object_put_pages(obj);
> +     drm_gem_object_unreference(&obj->base);
> +
> +     return ret;
> +}
> +
> +static int
> +do_migrate_page(struct drm_i915_gem_object *obj)
> +{
> +     struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> +     int ret = 0;
> +
> +     if (!can_migrate_page(obj))
> +             return -EBUSY;
> +
> +     /* HW access would be required for a bound object for which
> +      * device has to be kept runtime active. But a deadlock scenario
> +      * can arise if the attempt is made to resume the device, when
> +      * either a suspend or a resume operation is already happening
> +      * concurrently from some other path and that only actually
> +      * triggered the compaction. So only unbind if the device is
> +      * currently runtime active.
> +      */
> +     if (!intel_runtime_pm_get_if_in_use(dev_priv))
> +             return -EBUSY;
> +
> +     if (!unsafe_drop_pages(obj))
> +             ret = -EBUSY;

Reversed!

> +
> +     intel_runtime_pm_put(dev_priv);
> +     return ret;
> +}
> +
>  /**
>   * i915_gem_shrink - Shrink buffer object caches
>   * @dev_priv: i915 device
> @@ -156,7 +222,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>               INIT_LIST_HEAD(&still_in_list);
>               while (count < target && !list_empty(phase->list)) {
>                       struct drm_i915_gem_object *obj;
> -                     struct i915_vma *vma, *v;
>  
>                       obj = list_first_entry(phase->list,
>                                              typeof(*obj), global_list);
> @@ -172,18 +237,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>                       if (!can_release_pages(obj))
>                               continue;
>  
> -                     drm_gem_object_reference(&obj->base);
> -
> -                     /* For the unbound phase, this should be a no-op! */
> -                     list_for_each_entry_safe(vma, v,
> -                                              &obj->vma_list, obj_link)
> -                             if (i915_vma_unbind(vma))
> -                                     break;
> -
> -                     if (i915_gem_object_put_pages(obj) == 0)
> +                     if (unsafe_drop_pages(obj) == 0)
>                               count += obj->base.size >> PAGE_SHIFT;

But correct here :)
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to