From: Matthew Auld <matthew.a...@intel.com>

A selftest for the gem object migrate functionality. Slightly adapted
from the original by Matthew to the new interface and new fill blit
code.

v4:
- Initialize buffers and check contents after migration
  (Suggested by Matthew Auld)
- Perform async migration (if implemented) in the igt_lmem_pages_migrate
  test
- Test also migration to the current region.

Co-developed-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.a...@intel.com>
Reviewed-by: Michael J. Ruhl <michael.j.r...@intel.com> #v3
---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 +
 .../drm/i915/gem/selftests/i915_gem_migrate.c | 258 ++++++++++++++++++
 .../drm/i915/selftests/i915_live_selftests.h  |   1 +
 3 files changed, 260 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c 
b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 225b77fb4314..547cc9dad90d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -665,6 +665,7 @@ static const struct drm_gem_object_funcs 
i915_gem_object_funcs = {
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
 #include "selftests/huge_pages.c"
+#include "selftests/i915_gem_migrate.c"
 #include "selftests/i915_gem_object.c"
 #include "selftests/i915_gem_coherency.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c 
b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
new file mode 100644
index 000000000000..ced6e3a814a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020-2021 Intel Corporation
+ */
+
+#include "gt/intel_migrate.h"
+
+static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
+                                bool fill)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned int i, count = obj->base.size / sizeof(u32);
+       enum i915_map_type map_type =
+               i915_coherent_map_type(i915, obj, false);
+       u32 *cur;
+       int err = 0;
+
+       assert_object_held(obj);
+       cur = i915_gem_object_pin_map(obj, map_type);
+       if (IS_ERR(cur))
+               return PTR_ERR(cur);
+
+       if (fill)
+               for (i = 0; i < count; ++i)
+                       *cur++ = i;
+       else
+               for (i = 0; i < count; ++i)
+                       if (*cur++ != i) {
+                               pr_err("Object content mismatch at location %d 
of %d\n", i, count);
+                               err = -EINVAL;
+                               break;
+                       }
+
+       i915_gem_object_unpin_map(obj);
+
+       return err;
+}
+
+static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
+                             enum intel_region_id dst)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct intel_memory_region *src_mr = i915->mm.regions[src];
+       struct drm_i915_gem_object *obj;
+       struct i915_gem_ww_ctx ww;
+       int err = 0;
+
+       GEM_BUG_ON(!src_mr);
+
+       /* Switch object backing-store on create */
+       obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(obj, &ww);
+               if (err)
+                       continue;
+
+               err = igt_fill_check_buffer(obj, true);
+               if (err)
+                       continue;
+
+               if (!i915_gem_object_can_migrate(obj, dst)) {
+                       err = -EINVAL;
+                       continue;
+               }
+
+               err = i915_gem_object_migrate(obj, &ww, dst);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       continue;
+
+               if (i915_gem_object_can_migrate(obj, src))
+                       err = -EINVAL;
+
+               i915_gem_object_unpin_pages(obj);
+               err = i915_gem_object_wait_migration(obj, true);
+               if (err)
+                       continue;
+
+               err = igt_fill_check_buffer(obj, false);
+       }
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static int igt_smem_create_migrate(void *arg)
+{
+       return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
+}
+
+static int igt_lmem_create_migrate(void *arg)
+{
+       return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
+}
+
+static int igt_same_create_migrate(void *arg)
+{
+       return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
+}
+
+static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
+                                 struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       err = i915_gem_object_lock(obj, ww);
+       if (err)
+               return err;
+
+       if (i915_gem_object_is_lmem(obj)) {
+               if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM)) {
+                       pr_err("object can't migrate to smem.\n");
+                       return -EINVAL;
+               }
+
+               err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
+               if (err) {
+                       pr_err("Object failed migration to smem\n");
+                       if (err)
+                               return err;
+               }
+
+               if (i915_gem_object_is_lmem(obj)) {
+                       pr_err("object still backed by lmem\n");
+                       err = -EINVAL;
+               }
+
+               if (!i915_gem_object_has_struct_page(obj)) {
+                       pr_err("object not backed by struct page\n");
+                       err = -EINVAL;
+               }
+
+       } else {
+               if (!i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+                       pr_err("object can't migrate to lmem.\n");
+                       return -EINVAL;
+               }
+
+               err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
+               if (err) {
+                       pr_err("Object failed migration to lmem\n");
+                       if (err)
+                               return err;
+               }
+
+               if (i915_gem_object_has_struct_page(obj)) {
+                       pr_err("object still backed by struct page\n");
+                       err = -EINVAL;
+               }
+
+               if (!i915_gem_object_is_lmem(obj)) {
+                       pr_err("object not backed by lmem\n");
+                       err = -EINVAL;
+               }
+       }
+
+       return err;
+}
+
+static int igt_lmem_pages_migrate(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct drm_i915_private *i915 = gt->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_gem_ww_ctx ww;
+       struct i915_request *rq;
+       int err;
+       int i;
+
+       /* From LMEM to shmem and back again */
+
+       obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       /* Initial GPU fill, sync, CPU initialization. */
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(obj, &ww);
+               if (err)
+                       continue;
+
+               err = ____i915_gem_object_get_pages(obj);
+               if (err)
+                       continue;
+
+               err = intel_migrate_clear(&gt->migrate, &ww, NULL,
+                                         obj->mm.pages->sgl, obj->cache_level,
+                                         i915_gem_object_is_lmem(obj),
+                                         0xdeadbeaf, &rq);
+               if (rq) {
+                       dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+                       i915_request_put(rq);
+               }
+               if (err)
+                       continue;
+
+               err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE,
+                                          5 * HZ);
+               if (err)
+                       continue;
+
+               err = igt_fill_check_buffer(obj, true);
+               if (err)
+                       continue;
+       }
+       if (err)
+               goto out_put;
+
+       /*
+        * Migrate to and from smem without explicitly syncing.
+        * Finalize with data in smem for fast readout.
+        */
+       for (i = 1; i <= 5; ++i) {
+               for_i915_gem_ww(&ww, err, true)
+                       err = lmem_pages_migrate_one(&ww, obj);
+               if (err)
+                       goto out_put;
+       }
+
+       err = i915_gem_object_lock_interruptible(obj, NULL);
+       if (err)
+               goto out_put;
+
+       /* Finally sync migration and check content. */
+       err = i915_gem_object_wait_migration(obj, true);
+       if (err)
+               goto out_unlock;
+
+       err = igt_fill_check_buffer(obj, false);
+
+out_unlock:
+       i915_gem_object_unlock(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_smem_create_migrate),
+               SUBTEST(igt_lmem_create_migrate),
+               SUBTEST(igt_same_create_migrate),
+               SUBTEST(igt_lmem_pages_migrate),
+       };
+
+       if (!HAS_LMEM(i915))
+               return 0;
+
+       return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h 
b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index a68197cf1044..e2fd1b61af71 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -40,6 +40,7 @@ selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(gem_contexts, i915_gem_context_live_selftests)
 selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
+selftest(gem_migrate, i915_gem_migrate_live_selftests)
 selftest(reset, intel_reset_live_selftests)
 selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
-- 
2.31.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to