We are going want to able to move objects between different regions
like system memory and local memory. In the future everything should
be just another region.

Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Signed-off-by: Abdiel Janulgue <abdiel.janul...@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janul...@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h               |   5 +
 drivers/gpu/drm/i915/i915_gem.c               | 121 +++++++++++++++++
 .../drm/i915/selftests/intel_memory_region.c  | 127 ++++++++++++++++++
 3 files changed, 253 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9d01caa3430..bdf85e901268 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2822,6 +2822,11 @@ i915_gem_object_create_from_data(struct drm_i915_private 
*dev_priv,
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
 
+int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj);
+int i915_gem_object_migrate(struct i915_gem_context *ctx,
+                           struct drm_i915_gem_object *obj,
+                           enum intel_region_id id);
+
 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
 {
        if (!atomic_read(&i915->mm.free_count))
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 501714fbf92f..33699b3dc395 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4626,6 +4626,127 @@ int i915_gem_object_clear_blt(struct i915_gem_context 
*ctx,
        return i915_gem_object_fill_blt(ctx, obj, 0);
 }
 
+int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+       if (obj->mm.madv != I915_MADV_WILLNEED)
+               return -EINVAL;
+
+       if (i915_gem_object_needs_bit17_swizzle(obj))
+               return -EINVAL;
+
+       if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
+               return -EBUSY;
+
+       if (obj->pin_global)
+               return -EBUSY;
+
+       i915_gem_release_mmap(obj);
+
+       GEM_BUG_ON(obj->mm.mapping);
+       GEM_BUG_ON(obj->base.filp && mapping_mapped(obj->base.filp->f_mapping));
+
+       err = i915_gem_object_wait(obj,
+                                  I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_LOCKED |
+                                  I915_WAIT_ALL,
+                                  MAX_SCHEDULE_TIMEOUT);
+       if (err)
+               return err;
+
+       return i915_gem_object_unbind(obj);
+}
+
+int i915_gem_object_migrate(struct i915_gem_context *ctx,
+                           struct drm_i915_gem_object *obj,
+                           enum intel_region_id id)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct drm_i915_gem_object *donor;
+       struct intel_memory_region *mem;
+       int err = 0;
+
+       lockdep_assert_held(&i915->drm.struct_mutex);
+
+       GEM_BUG_ON(id >= INTEL_MEMORY_UKNOWN);
+       GEM_BUG_ON(obj->memory_region->id == id);
+       GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
+
+       mem = i915->regions[id];
+
+       donor = i915_gem_object_create_region(mem, obj->base.size, 0);
+       if (IS_ERR(donor))
+               return PTR_ERR(donor);
+
+       /* Copy backing-pages if we have to */
+       if (i915_gem_object_has_pages(obj)) {
+               struct sg_table *pages;
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto err_put_donor;
+
+               err = i915_gem_object_copy_blt(ctx, obj, donor);
+               if (err)
+                       goto err_put_donor;
+
+               i915_retire_requests(i915);
+
+               i915_gem_object_unbind(donor);
+               err = i915_gem_object_unbind(obj);
+               if (err)
+                       goto err_put_donor;
+
+               mutex_lock(&obj->mm.lock);
+
+               pages = fetch_and_zero(&obj->mm.pages);
+               obj->ops->put_pages(obj, pages);
+
+               obj->mm.pages = __i915_gem_object_unset_pages(donor);
+               memcpy(&obj->mm.page_sizes, &donor->mm.page_sizes,
+                      sizeof(struct i915_page_sizes));
+
+               obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
+               obj->mm.get_page.sg_idx = 0;
+               __i915_gem_object_reset_page_iter(obj);
+
+               mutex_unlock(&obj->mm.lock);
+       }
+
+       if (obj->ops->release)
+               obj->ops->release(obj);
+
+       /* We need still need a little special casing for shmem */
+       if (obj->base.filp)
+               fput(fetch_and_zero(&obj->base.filp));
+       else
+               obj->base.filp = fetch_and_zero(&donor->base.filp);
+
+       obj->base.size = donor->base.size;
+       obj->memory_region = mem;
+       obj->flags = donor->flags;
+       obj->ops = donor->ops;
+
+       list_replace_init(&donor->blocks, &obj->blocks);
+
+       mutex_lock(&mem->obj_lock);
+       list_add(&obj->region_link, &mem->objects);
+       mutex_unlock(&mem->obj_lock);
+
+       GEM_BUG_ON(i915_gem_object_has_pages(donor));
+       GEM_BUG_ON(i915_gem_object_has_pinned_pages(donor));
+
+err_put_donor:
+       i915_gem_object_put(donor);
+       if (i915_gem_object_has_pinned_pages(obj))
+               i915_gem_object_unpin_pages(obj);
+
+       return err;
+}
+
 static void __i915_gem_free_objects(struct drm_i915_private *i915,
                                    struct llist_node *freed)
 {
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c 
b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 210ec6fec19a..2c07d8d219e5 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -579,6 +579,57 @@ static int igt_lmem_create(void *arg)
        return err;
 }
 
+static int igt_smem_create_migrate(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       int err = 0;
+
+       /* Switch object backing-store on create */
+       obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+       err = i915_gem_object_migrate(ctx, obj, INTEL_MEMORY_SMEM);
+       if (err)
+               goto out_put;
+
+       err = i915_gem_object_pin_pages(obj);
+       if (err)
+               goto out_put;
+
+       i915_gem_object_unpin_pages(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static int igt_lmem_create_migrate(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       int err = 0;
+
+       /* Switch object backing-store on create */
+       obj = i915_gem_object_create(i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+       err = i915_gem_object_migrate(ctx, obj, INTEL_MEMORY_LMEM);
+       if (err)
+               goto out_put;
+
+       err = i915_gem_object_pin_pages(obj);
+       if (err)
+               goto out_put;
+
+       i915_gem_object_unpin_pages(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
 static int igt_lmem_write_gpu(void *arg)
 {
        struct i915_gem_context *ctx = arg;
@@ -608,6 +659,79 @@ static int igt_lmem_write_gpu(void *arg)
        return err;
 }
 
+static int igt_lmem_pages_migrate(void *arg)
+{
+       struct i915_gem_context *ctx = arg;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_object *obj;
+       int err;
+       int i;
+
+       /* From LMEM to shmem and back again */
+
+       obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       err = i915_gem_object_clear_blt(ctx, obj);
+       if (err)
+               goto out_put;
+
+       for (i = 1; i <= 4; ++i) {
+               err = i915_gem_object_prepare_move(obj);
+               if (err)
+                       goto out_put;
+
+               if (i915_gem_object_is_lmem(obj)) {
+                       err = i915_gem_object_migrate(ctx, obj, 
INTEL_MEMORY_SMEM);
+                       if (err)
+                               goto out_put;
+
+                       if (i915_gem_object_is_lmem(obj)) {
+                               pr_err("object still backed by lmem\n");
+                               err = -EINVAL;
+                       }
+
+                       if (!list_empty(&obj->blocks)) {
+                               pr_err("object leaking memory region\n");
+                               err = -EINVAL;
+                       }
+
+                       if (!i915_gem_object_has_struct_page(obj)) {
+                               pr_err("object not backed by struct page\n");
+                               err = -EINVAL;
+                       }
+
+               } else {
+                       err = i915_gem_object_migrate(ctx, obj, 
INTEL_MEMORY_LMEM);
+                       if (err)
+                               goto out_put;
+
+                       if (i915_gem_object_has_struct_page(obj)) {
+                               pr_err("object still backed by struct page\n");
+                               err = -EINVAL;
+                       }
+
+                       if (!i915_gem_object_is_lmem(obj)) {
+                               pr_err("object not backed by lmem\n");
+                               err = -EINVAL;
+                       }
+               }
+
+               if (err)
+                       break;
+
+               err = i915_gem_object_fill_blt(ctx, obj, 0xdeadbeaf);
+               if (err)
+                       break;
+       }
+
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
@@ -649,7 +773,10 @@ int intel_memory_region_live_selftests(struct 
drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_lmem_create),
+               SUBTEST(igt_smem_create_migrate),
+               SUBTEST(igt_lmem_create_migrate),
                SUBTEST(igt_lmem_write_gpu),
+               SUBTEST(igt_lmem_pages_migrate),
        };
        struct i915_gem_context *ctx;
        struct drm_file *file;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to