On Fri, 2014-11-07 at 12:44 +0000, Chris Wilson wrote:
> Previously, this was restricted to only operate on bound objects - to
> make pointer access through the GTT to the object coherent with writes
> to and from the GPU. A second usecase is drm_intel_bo_wait_rendering()
> which at present does not function unless the object also happens to
> be bound into the GGTT (on current systems that is becoming increasingly
> rare, especially for the typical requests from mesa). A third usecase is
> a future patch wishing to extend the coverage of the GTT domain to
> include objects not bound into the GGTT but still in its coherent cache
> domain. For the latter pair of requests, we need to operate on the
> object regardless of its bind state.
> 
> v2: After discussion with Akash, we came to the conclusion that the
> get-pages was required in order for accurate domain tracking in the
> corner cases (like the shrinker) and also useful for ensuring memory
> coherency with earlier cached CPU mmaps in case userspace uses exotic
> cache bypass (non-temporal) instructions.
> 
> v3: Fix the inactive object check.
> 
> Cc: Akash Goel <akash.g...@intel.com>
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem.c | 43 
> ++++++++++++++++++-----------------------
>  1 file changed, 19 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 3528ef749009..23894e573343 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -153,12 +153,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
>       return 0;
>  }
>  
> -static inline bool
> -i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
> -{
> -     return i915_gem_obj_bound_any(obj) && !obj->active;
> -}
> -
>  int
>  i915_gem_init_ioctl(struct drm_device *dev, void *data,
>                   struct drm_file *file)
> @@ -1410,18 +1404,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, 
> void *data,
>       if (ret)
>               goto unref;
>  
> -     if (read_domains & I915_GEM_DOMAIN_GTT) {
> +     if (read_domains & I915_GEM_DOMAIN_GTT)
>               ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
> -
> -             /* Silently promote "you're not bound, there was nothing to do"
> -              * to success, since the client was just asking us to
> -              * make sure everything was done.
> -              */
> -             if (ret == -EINVAL)
> -                     ret = 0;
> -     } else {
> +     else
>               ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
> -     }
>  
>  unref:
>       drm_gem_object_unreference(&obj->base);
> @@ -3588,15 +3574,10 @@ i915_gem_object_flush_cpu_write_domain(struct 
> drm_i915_gem_object *obj,
>  int
>  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool 
> write)
>  {
> -     struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> -     struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
>       uint32_t old_write_domain, old_read_domains;
> +     struct i915_vma *vma;
>       int ret;
>  
> -     /* Not valid to be called on unbound objects. */
> -     if (vma == NULL)
> -             return -EINVAL;
> -
>       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
>               return 0;
>  
> @@ -3605,6 +3586,19 @@ i915_gem_object_set_to_gtt_domain(struct 
> drm_i915_gem_object *obj, bool write)
>               return ret;
>  
>       i915_gem_object_retire(obj);
> +
> +     /* Flush and acquire obj->pages so that we are coherent through
> +      * direct access in memory with previous cached writes through
> +      * shmemfs and that our cache domain tracking remains valid.
> +      * For example, if the obj->filp was moved to swap without us
> +      * being notified and releasing the pages, we would mistakenly
> +      * continue to assume that the obj remained out of the CPU cached
> +      * domain.
> +      */
> +     ret = i915_gem_object_get_pages(obj);
> +     if (ret)
> +             return ret;
> +
>       i915_gem_object_flush_cpu_write_domain(obj, false);
>  
>       /* Serialise direct access to this object with the barriers for
> @@ -3636,9 +3630,10 @@ i915_gem_object_set_to_gtt_domain(struct 
> drm_i915_gem_object *obj, bool write)
>                                           old_write_domain);
>  
>       /* And bump the LRU for this access */
> -     if (i915_gem_object_is_inactive(obj))
> +     vma = i915_gem_obj_to_ggtt(obj);
> +     if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
>               list_move_tail(&vma->mm_list,
> -                            &dev_priv->gtt.base.inactive_list);
> +                            &to_i915(obj->base.dev)->gtt.base.inactive_list);
>  
>       return 0;
>  }

Reviewed-by: Akash Goel <akash.g...@intel.com>

Best regards
Akash

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to