From: Brad Volkin <bradley.d.vol...@intel.com>

The command parser is going to need the same synchronization and
setup logic, so factor it out for reuse.

v2: Add a check that the object is backed by shmem

Signed-off-by: Brad Volkin <bradley.d.vol...@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  3 +++
 drivers/gpu/drm/i915/i915_gem.c | 51 ++++++++++++++++++++++++++++++-----------
 2 files changed, 40 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8c64831..582035b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2097,6 +2097,9 @@ void i915_gem_release_all_mmaps(struct drm_i915_private 
*dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+                                   int *needs_clflush);
+
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object 
*obj, int n)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3618bb0..83990cb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -326,6 +326,42 @@ __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
        return 0;
 }
 
+/*
+ * Pins the specified object's pages and synchronizes the object with
+ * GPU accesses. Sets needs_clflush to non-zero if the caller should
+ * flush the object from the CPU cache.
+ */
+int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
+                                   int *needs_clflush)
+{
+       int ret;
+
+       *needs_clflush = 0;
+
+       if (!obj->base.filp)
+               return -EINVAL;
+
+       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
+               /* If we're not in the cpu read domain, set ourself into the gtt
+                * read domain and manually flush cachelines (if required). This
+                * optimizes for the case when the gpu will dirty the data
+                * anyway again before the next pread happens. */
+               *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
+                                                       obj->cache_level);
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
+       }
+
+       ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               return ret;
+
+       i915_gem_object_pin_pages(obj);
+
+       return ret;
+}
+
 /* Per-page copy function for the shmem pread fastpath.
  * Flushes invalid cachelines before reading the target if
  * needs_clflush is set. */
@@ -423,23 +459,10 @@ i915_gem_shmem_pread(struct drm_device *dev,
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-               /* If we're not in the cpu read domain, set ourself into the gtt
-                * read domain and manually flush cachelines (if required). This
-                * optimizes for the case when the gpu will dirty the data
-                * anyway again before the next pread happens. */
-               needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
-               ret = i915_gem_object_wait_rendering(obj, true);
-               if (ret)
-                       return ret;
-       }
-
-       ret = i915_gem_object_get_pages(obj);
+       ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
        if (ret)
                return ret;
 
-       i915_gem_object_pin_pages(obj);
-
        offset = args->offset;
 
        for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
-- 
1.8.3.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to