In order to specialise functions depending upon the type of object, we
can attach vfuncs to each object via a new ->ops pointer.

For instance, this will be used in future patches to only bind pages from
a dma-buf for the duration that the object is used by the GPU - and so
prevent them from pinning those pages for the entire of the object.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h        |   12 +++++-
 drivers/gpu/drm/i915/i915_gem.c        |   71 +++++++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_dmabuf.c |    4 +-
 3 files changed, 60 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f16ab5e..f180874 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -898,9 +898,16 @@ enum i915_cache_level {
        I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
 };
 
+struct drm_i915_gem_object_ops {
+       int (*get_pages)(struct drm_i915_gem_object *);
+       void (*put_pages)(struct drm_i915_gem_object *);
+};
+
 struct drm_i915_gem_object {
        struct drm_gem_object base;
 
+       const struct drm_i915_gem_object_ops *ops;
+
        /** Current space allocated to this object in the GTT, if any. */
        struct drm_mm_node *gtt_space;
        struct list_head gtt_list;
@@ -1305,7 +1312,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void 
*data,
                        struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-void i915_gem_object_init(struct drm_i915_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+                        const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1318,7 +1326,7 @@ int __must_check i915_gem_object_unbind(struct 
drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
-int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_ring_buffer *to);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 87a64e5..66fbd9f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1650,18 +1650,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object 
*obj)
        return obj->madv == I915_MADV_DONTNEED;
 }
 
-static int
+static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
        int page_count = obj->base.size / PAGE_SIZE;
        int ret, i;
 
-       BUG_ON(obj->gtt_space);
-
-       if (obj->pages == NULL)
-               return 0;
-
-       BUG_ON(obj->gtt_space);
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -1693,9 +1687,21 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object 
*obj)
 
        drm_free_large(obj->pages);
        obj->pages = NULL;
+}
 
-       list_del(&obj->gtt_list);
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+       const struct drm_i915_gem_object_ops *ops = obj->ops;
+
+       if (obj->sg_table || obj->pages == NULL)
+               return 0;
+
+       BUG_ON(obj->gtt_space);
 
+       ops->put_pages(obj);
+
+       list_del(&obj->gtt_list);
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
@@ -1712,7 +1718,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long 
target)
                                 &dev_priv->mm.unbound_list,
                                 gtt_list) {
                if (i915_gem_object_is_purgeable(obj) &&
-                   i915_gem_object_put_pages_gtt(obj) == 0) {
+                   i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
                                return count;
@@ -1724,7 +1730,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long 
target)
                                 mm_list) {
                if (i915_gem_object_is_purgeable(obj) &&
                    i915_gem_object_unbind(obj) == 0 &&
-                   i915_gem_object_put_pages_gtt(obj) == 0) {
+                   i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
                                return count;
@@ -1742,10 +1748,10 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
        i915_gem_evict_everything(dev_priv->dev);
 
        list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 
gtt_list)
-               i915_gem_object_put_pages_gtt(obj);
+               i915_gem_object_put_pages(obj);
 }
 
-int
+static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1754,9 +1760,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj)
        struct page *page;
        gfp_t gfp;
 
-       if (obj->pages || obj->sg_table)
-               return 0;
-
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
@@ -1806,7 +1809,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj)
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj);
 
-       list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
        return 0;
 
 err_pages:
@@ -1818,6 +1820,24 @@ err_pages:
        return PTR_ERR(page);
 }
 
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       const struct drm_i915_gem_object_ops *ops = obj->ops;
+       int ret;
+
+       if (obj->sg_table || obj->pages)
+               return 0;
+
+       ret = ops->get_pages(obj);
+       if (ret)
+               return ret;
+
+       list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+       return 0;
+}
+
 void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *ring,
@@ -2071,7 +2091,6 @@ void i915_gem_reset(struct drm_device *dev)
                obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
        }
 
-
        /* The fence registers are invalidated so clear them out */
        i915_gem_reset_fences(dev);
 }
@@ -2871,7 +2890,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object 
*obj,
                return -E2BIG;
        }
 
-       ret = i915_gem_object_get_pages_gtt(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret)
                return ret;
 
@@ -3610,15 +3629,16 @@ unlock:
        return ret;
 }
 
-void i915_gem_object_init(struct drm_i915_gem_object *obj)
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+                         const struct drm_i915_gem_object_ops *ops)
 {
-       obj->base.driver_private = NULL;
-
        INIT_LIST_HEAD(&obj->mm_list);
        INIT_LIST_HEAD(&obj->gtt_list);
        INIT_LIST_HEAD(&obj->ring_list);
        INIT_LIST_HEAD(&obj->exec_list);
 
+       obj->ops = ops;
+
        obj->fence_reg = I915_FENCE_REG_NONE;
        obj->madv = I915_MADV_WILLNEED;
        /* Avoid an unnecessary call to unbind on the first bind. */
@@ -3627,6 +3647,11 @@ void i915_gem_object_init(struct drm_i915_gem_object 
*obj)
        i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
 }
 
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+       .get_pages = i915_gem_object_get_pages_gtt,
+       .put_pages = i915_gem_object_put_pages_gtt,
+};
+
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size)
 {
@@ -3653,7 +3678,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct 
drm_device *dev,
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        mapping_set_gfp_mask(mapping, mask);
 
-       i915_gem_object_init(obj);
+       i915_gem_object_init(obj, &i915_gem_object_ops);
 
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3711,7 +3736,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                dev_priv->mm.interruptible = was_interruptible;
        }
 
-       i915_gem_object_put_pages_gtt(obj);
+       i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
 
        drm_gem_object_release(&obj->base);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 43c9530..e4f1141 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -41,7 +41,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
        if (ret)
                return ERR_PTR(ret);
 
-       ret = i915_gem_object_get_pages_gtt(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                sg = ERR_PTR(ret);
                goto out;
@@ -89,7 +89,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
                goto out_unlock;
        }
 
-       ret = i915_gem_object_get_pages_gtt(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
                return ERR_PTR(ret);
-- 
1.7.10.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to