In order to specialise functions depending upon the type of object, we
can attach vfuncs to each object via their obj->driver_private pointer,
bringing it back to life!

For instance, this will be used in future patches to only bind pages from
a dma-buf for the duration that the object is used by the GPU - and so
prevent them from pinning those pages for the entire of the object.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h        |   10 ++++-
 drivers/gpu/drm/i915/i915_gem.c        |   65 ++++++++++++++++++++++----------
 drivers/gpu/drm/i915/i915_gem_dmabuf.c |    4 +-
 3 files changed, 56 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bbc51ef..c42190b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -895,6 +895,11 @@ enum i915_cache_level {
        I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
 };
 
+struct drm_i915_gem_object_ops {
+       int (*get_pages)(struct drm_i915_gem_object *);
+       void (*put_pages)(struct drm_i915_gem_object *);
+};
+
 struct drm_i915_gem_object {
        struct drm_gem_object base;
 
@@ -1302,7 +1307,8 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void 
*data,
                        struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-void i915_gem_object_init(struct drm_i915_gem_object *obj);
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+                        const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
@@ -1315,7 +1321,7 @@ int __must_check i915_gem_object_unbind(struct 
drm_i915_gem_object *obj);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
-int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj);
+int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_ring_buffer *to);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9c8787e..ed6a1ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1407,15 +1407,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object 
*obj)
        return obj->madv == I915_MADV_DONTNEED;
 }
 
-static int
+static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
        int page_count = obj->base.size / PAGE_SIZE;
        int ret, i;
 
-       if (obj->pages == NULL)
-               return 0;
-
        BUG_ON(obj->gtt_space);
        BUG_ON(obj->madv == __I915_MADV_PURGED);
 
@@ -1448,9 +1445,19 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object 
*obj)
 
        drm_free_large(obj->pages);
        obj->pages = NULL;
+}
 
-       list_del(&obj->gtt_list);
+static int
+i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+{
+       const struct drm_i915_gem_object_ops *ops = obj->base.driver_private;
 
+       if (obj->sg_table || obj->pages == NULL)
+               return 0;
+
+       ops->put_pages(obj);
+
+       list_del(&obj->gtt_list);
        if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
@@ -1467,7 +1474,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long 
target)
                                 &dev_priv->mm.unbound_list,
                                 gtt_list) {
                if (i915_gem_object_is_purgeable(obj) &&
-                   i915_gem_object_put_pages_gtt(obj) == 0) {
+                   i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
                                return count;
@@ -1479,7 +1486,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long 
target)
                                 mm_list) {
                if (i915_gem_object_is_purgeable(obj) &&
                    i915_gem_object_unbind(obj) == 0 &&
-                   i915_gem_object_put_pages_gtt(obj) == 0) {
+                   i915_gem_object_put_pages(obj) == 0) {
                        count += obj->base.size >> PAGE_SHIFT;
                        if (count >= target)
                                return count;
@@ -1497,10 +1504,10 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
        i915_gem_evict_everything(dev_priv->dev);
 
        list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 
gtt_list)
-               i915_gem_object_put_pages_gtt(obj);
+               i915_gem_object_put_pages(obj);
 }
 
-int
+static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1509,9 +1516,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj)
        struct page *page;
        gfp_t gfp;
 
-       if (obj->pages || obj->sg_table)
-               return 0;
-
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
@@ -1561,7 +1565,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object 
*obj)
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj);
 
-       list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
        return 0;
 
 err_pages:
@@ -1573,6 +1576,24 @@ err_pages:
        return PTR_ERR(page);
 }
 
+int
+i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       const struct drm_i915_gem_object_ops *ops = obj->base.driver_private;
+       int ret;
+
+       if (obj->sg_table || obj->pages)
+               return 0;
+
+       ret = ops->get_pages(obj);
+       if (ret)
+               return ret;
+
+       list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
+       return 0;
+}
+
 void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *ring,
@@ -1828,7 +1849,7 @@ void i915_gem_reset(struct drm_device *dev)
 
 
        list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, 
gtt_list)
-               i915_gem_object_put_pages_gtt(obj);
+               i915_gem_object_put_pages(obj);
 
        /* The fence registers are invalidated so clear them out */
        i915_gem_reset_fences(dev);
@@ -2818,7 +2839,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object 
*obj,
                return -E2BIG;
        }
 
-       ret = i915_gem_object_get_pages_gtt(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret)
                return ret;
 
@@ -3557,9 +3578,10 @@ unlock:
        return ret;
 }
 
-void i915_gem_object_init(struct drm_i915_gem_object *obj)
+void i915_gem_object_init(struct drm_i915_gem_object *obj,
+                         const struct drm_i915_gem_object_ops *ops)
 {
-       obj->base.driver_private = NULL;
+       obj->base.driver_private = (void *)ops;
 
        INIT_LIST_HEAD(&obj->mm_list);
        INIT_LIST_HEAD(&obj->gtt_list);
@@ -3574,6 +3596,11 @@ void i915_gem_object_init(struct drm_i915_gem_object 
*obj)
        i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
 }
 
+static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+       .get_pages = i915_gem_object_get_pages_gtt,
+       .put_pages = i915_gem_object_put_pages_gtt,
+};
+
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size)
 {
@@ -3600,7 +3627,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct 
drm_device *dev,
        mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        mapping_set_gfp_mask(mapping, mask);
 
-       i915_gem_object_init(obj);
+       i915_gem_object_init(obj, &i915_gem_object_ops);
 
        obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -3658,7 +3685,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                dev_priv->mm.interruptible = was_interruptible;
        }
 
-       i915_gem_object_put_pages_gtt(obj);
+       i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
 
        drm_gem_object_release(&obj->base);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index e5f0375..1203460 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -41,7 +41,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct 
dma_buf_attachment *attachme
        if (ret)
                return ERR_PTR(ret);
 
-       ret = i915_gem_object_get_pages_gtt(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                sg = ERR_PTR(ret);
                goto out;
@@ -89,7 +89,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
                goto out_unlock;
        }
 
-       ret = i915_gem_object_get_pages_gtt(obj);
+       ret = i915_gem_object_get_pages(obj);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
                return ERR_PTR(ret);
-- 
1.7.10.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to