Building on the last patch which created the new function pointers in
the VM for bind/unbind, here we actually put those new function pointers
to use.

Split out as a separate patch to aid in review. I'm fine with squashing
into the previous patch if people request it.

v2: Updated to address the smart ggtt which can do aliasing as needed
Make sure we bind to global gtt when mappable and fenceable. I thought
we could get away without this initialy, but we cannot.

Signed-off-by: Ben Widawsky <b...@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_drv.h            | 10 ------
 drivers/gpu/drm/i915/i915_gem.c            | 37 +++++++++------------
 drivers/gpu/drm/i915/i915_gem_context.c    |  7 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 29 ++++++++--------
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 53 ++----------------------------
 5 files changed, 37 insertions(+), 99 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f3f2825..8d6aa34 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1933,18 +1933,8 @@ int i915_gem_context_destroy_ioctl(struct drm_device 
*dev, void *data,
 
 /* i915_gem_gtt.c */
 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level);
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj);
-
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-/* FIXME: this is never okay with full PPGTT */
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                               enum i915_cache_level cache_level);
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
 void i915_gem_init_global_gtt(struct drm_device *dev);
 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9ea6424..63297d7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2653,12 +2653,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 
        trace_i915_gem_object_unbind(obj, vm);
 
-       if (obj->has_global_gtt_mapping && i915_is_ggtt(vm))
-               i915_gem_gtt_unbind_object(obj);
-       if (obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
-               obj->has_aliasing_ppgtt_mapping = 0;
-       }
+       vma = i915_gem_obj_to_vma(obj, vm);
+       vm->unmap_vma(vma);
+
        i915_gem_gtt_finish_object(obj);
        i915_gem_object_unpin_pages(obj);
 
@@ -2666,7 +2663,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj,
        if (i915_is_ggtt(vm))
                obj->map_and_fenceable = true;
 
-       vma = i915_gem_obj_to_vma(obj, vm);
        list_del(&vma->mm_list);
        list_del(&vma->vma_link);
        drm_mm_remove_node(&vma->node);
@@ -3372,7 +3368,6 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
        int ret;
 
@@ -3407,13 +3402,8 @@ int i915_gem_object_set_cache_level(struct 
drm_i915_gem_object *obj,
                                return ret;
                }
 
-               if (obj->has_global_gtt_mapping)
-                       i915_gem_gtt_bind_object(obj, cache_level);
-               if (obj->has_aliasing_ppgtt_mapping)
-                       i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                              obj, cache_level);
-
-               i915_gem_obj_set_color(obj, vma->vm, cache_level);
+               vm->map_vma(vma, cache_level, 0);
+               i915_gem_obj_set_color(obj, vm, cache_level);
        }
 
        if (cache_level == I915_CACHE_NONE) {
@@ -3695,6 +3685,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
                    bool map_and_fenceable,
                    bool nonblocking)
 {
+       const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
+       struct i915_vma *vma;
        int ret;
 
        if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -3702,6 +3694,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 
        WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
 
+       /* FIXME: Use vma for bounds check */
        if (i915_gem_obj_bound(obj, vm)) {
                if ((alignment &&
                     i915_gem_obj_offset(obj, vm) & (alignment - 1)) ||
@@ -3720,20 +3713,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
        }
 
        if (!i915_gem_obj_bound(obj, vm)) {
-               struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-
                ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
                                                 map_and_fenceable,
                                                 nonblocking);
                if (ret)
                        return ret;
 
-               if (!dev_priv->mm.aliasing_ppgtt)
-                       i915_gem_gtt_bind_object(obj, obj->cache_level);
-       }
+               vma = i915_gem_obj_to_vma(obj, vm);
+               vm->map_vma(vma, obj->cache_level, flags);
+       } else
+               vma = i915_gem_obj_to_vma(obj, vm);
 
+       /* Objects are created map and fenceable. If we bind an object
+        * the first time, and we had aliasing PPGTT (and didn't request
+        * GLOBAL), we'll need to do this on the second bind.*/
        if (!obj->has_global_gtt_mapping && map_and_fenceable)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+               vm->map_vma(vma, obj->cache_level, GLOBAL_BIND);
 
        obj->pin_count++;
        obj->pin_mappable |= map_and_fenceable;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 873577d..cc7c0b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -417,8 +417,11 @@ static int do_switch(struct i915_hw_context *to)
                return ret;
        }
 
-       if (!to->obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
+       if (!to->obj->has_global_gtt_mapping) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+                                                          &dev_priv->gtt.base);
+               vma->vm->map_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+       }
 
        if (!to->is_initialized || is_default_context(to))
                hw_flags |= MI_RESTORE_INHIBIT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8d2643b..6359ef2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -197,8 +197,9 @@ i915_gem_execbuffer_relocate_entry(struct 
drm_i915_gem_object *obj,
        if (unlikely(IS_GEN6(dev) &&
            reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
            !target_i915_obj->has_global_gtt_mapping)) {
-               i915_gem_gtt_bind_object(target_i915_obj,
-                                        target_i915_obj->cache_level);
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
+               vma->vm->map_vma(vma, target_i915_obj->cache_level,
+                                GLOBAL_BIND);
        }
 
        /* Validate that the target is in a valid r/w GPU domain */
@@ -404,10 +405,12 @@ i915_gem_execbuffer_reserve_object(struct 
drm_i915_gem_object *obj,
                                   struct i915_address_space *vm,
                                   bool *need_reloc)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence, need_mappable;
+       u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+               !obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
+       struct i915_vma *vma;
        int ret;
 
        need_fence =
@@ -421,6 +424,7 @@ i915_gem_execbuffer_reserve_object(struct 
drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
+       vma = i915_gem_obj_to_vma(obj, vm);
        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
        if (has_fenced_gpu_access) {
@@ -436,14 +440,6 @@ i915_gem_execbuffer_reserve_object(struct 
drm_i915_gem_object *obj,
                }
        }
 
-       /* Ensure ppgtt mapping exists if needed */
-       if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
-               i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
-                                      obj, obj->cache_level);
-
-               obj->has_aliasing_ppgtt_mapping = 1;
-       }
-
        if (entry->offset != i915_gem_obj_offset(obj, vm)) {
                entry->offset = i915_gem_obj_offset(obj, vm);
                *need_reloc = true;
@@ -454,9 +450,7 @@ i915_gem_execbuffer_reserve_object(struct 
drm_i915_gem_object *obj,
                obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
        }
 
-       if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
-           !obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+       vm->map_vma(vma, obj->cache_level, flags);
 
        return 0;
 }
@@ -1047,8 +1041,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
*data,
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but let's be paranoid and do it
         * unconditionally for now. */
-       if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
-               i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+       if (flags & I915_DISPATCH_SECURE &&
+           !batch_obj->has_global_gtt_mapping) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(batch_obj, vm);
+               vm->map_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
+       }
 
        ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
        if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 03e6179..1de49a0 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -414,18 +414,6 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device 
*dev)
        dev_priv->mm.aliasing_ppgtt = NULL;
 }
 
-void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
-                           struct drm_i915_gem_object *obj,
-                           enum i915_cache_level cache_level)
-{
-       struct i915_address_space *vm = &ppgtt->base;
-       unsigned long obj_offset = i915_gem_obj_offset(obj, vm);
-
-       vm->insert_entries(vm, obj->pages,
-                          obj_offset >> PAGE_SHIFT,
-                          cache_level);
-}
-
 static void __always_unused gen6_ppgtt_map_vma(struct i915_vma *vma,
                                               enum i915_cache_level 
cache_level,
                                               u32 flags)
@@ -437,16 +425,6 @@ static void __always_unused gen6_ppgtt_map_vma(struct 
i915_vma *vma,
        gen6_ppgtt_insert_entries(vma->vm, vma->obj->pages, entry, cache_level);
 }
 
-void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
-                             struct drm_i915_gem_object *obj)
-{
-       struct i915_address_space *vm = &ppgtt->base;
-       unsigned long obj_offset = i915_gem_obj_offset(obj, vm);
-
-       vm->clear_range(vm, obj_offset >> PAGE_SHIFT,
-                       obj->base.size >> PAGE_SHIFT);
-}
-
 static void __always_unused gen6_ppgtt_unmap_vma(struct i915_vma *vma)
 {
        const unsigned long entry = vma->node.start >> PAGE_SHIFT;
@@ -507,8 +485,10 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
                gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+                                                          &dev_priv->gtt.base);
                i915_gem_clflush_object(obj);
-               i915_gem_gtt_bind_object(obj, obj->cache_level);
+               vma->vm->map_vma(vma, obj->cache_level, 0);
        }
 
        i915_gem_chipset_flush(dev);
@@ -664,33 +644,6 @@ static void gen6_ggtt_map_vma(struct i915_vma *vma,
        }
 }
 
-void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
-                             enum i915_cache_level cache_level)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-       dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
-                                         entry,
-                                         cache_level);
-
-       obj->has_global_gtt_mapping = 1;
-}
-
-void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
-{
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
-
-       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
-                                      entry,
-                                      obj->base.size >> PAGE_SHIFT);
-
-       obj->has_global_gtt_mapping = 0;
-}
-
 static void gen6_ggtt_unmap_vma(struct i915_vma *vma)
 {
        struct drm_device *dev = vma->vm->dev;
-- 
1.8.3.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to