Allocate objects with varying number of pages (which should hopefully
consist of a mixture of contiguous page chunks and so coalesced sg
lists) and check that the sg walkers in insert_pages cope.

v2: Check both small <-> large

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 362 ++++++++++++++++++++++++++
 1 file changed, 362 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5c09dc920cb8..4cd55fc0820a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -22,8 +22,101 @@
  *
  */
 
+#include <linux/prime_numbers.h>
+
 #include "../i915_selftest.h"
 
+#include "mock_drm.h"
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+                           struct sg_table *pages)
+{
+       sg_free_table(pages);
+       kfree(pages);
+}
+
+static struct sg_table *
+fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define PFN_BIAS 0x1000
+       struct sg_table *pages;
+       struct scatterlist *sg;
+       typeof(obj->base.size) rem;
+
+       pages = kmalloc(sizeof(*pages), GFP);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
+
+       rem = round_up(obj->base.size, BIT(31)) >> 31;
+       if (sg_alloc_table(pages, rem, GFP)) {
+               kfree(pages);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       rem = obj->base.size;
+       for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+               unsigned long len = min_t(typeof(rem), rem, BIT(31));
+
+               sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
+               sg_dma_address(sg) = page_to_phys(sg_page(sg));
+               sg_dma_len(sg) = len;
+
+               rem -= len;
+       }
+
+       return pages;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+                          struct sg_table *pages)
+{
+       fake_free_pages(obj, pages);
+       obj->mm.dirty = false;
+}
+
+static void fake_release(struct drm_i915_gem_object *obj)
+{
+       __i915_gem_object_unpin_pages(obj);
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+       .get_pages = fake_get_pages,
+       .put_pages = fake_put_pages,
+       .release = fake_release
+};
+
+static struct drm_i915_gem_object *
+fake_dma_object(struct drm_i915_private *i915, u64 size)
+{
+       struct drm_i915_gem_object *obj;
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+       if (overflows_type(size, obj->base.size))
+               return ERR_PTR(-E2BIG);
+
+       obj = i915_gem_object_alloc(i915);
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       drm_gem_private_object_init(&i915->drm, &obj->base, size);
+       i915_gem_object_init(obj, &fake_ops);
+
+       obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+       obj->cache_level = I915_CACHE_NONE;
+
+       if (i915_gem_object_pin_pages(obj)) {
+               i915_gem_object_free(obj);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return obj;
+}
+
 static int igt_ppgtt_alloc(void *arg)
 {
        struct drm_i915_private *dev_priv = arg;
@@ -87,10 +180,279 @@ static int igt_ppgtt_alloc(void *arg)
        return err;
 }
 
+static void close_object_list(struct list_head *objects,
+                             struct i915_address_space *vm)
+{
+       struct drm_i915_gem_object *obj, *on;
+
+       list_for_each_entry_safe(obj, on, objects, batch_pool_link) {
+               struct i915_vma *vma;
+
+               vma = i915_vma_instance(obj, vm, NULL);
+               if (!IS_ERR(vma))
+                       i915_vma_close(vma);
+
+               list_del(&obj->batch_pool_link);
+               i915_gem_object_put(obj);
+       }
+}
+
+static int fill_hole(struct drm_i915_private *i915,
+                    struct i915_address_space *vm,
+                    u64 hole_start, u64 hole_end,
+                    unsigned long end_time)
+{
+#define FLAGS (PIN_USER | PIN_OFFSET_FIXED)
+       const u64 hole_size = hole_end - hole_start;
+       struct drm_i915_gem_object *obj;
+       const unsigned long max_pages =
+               min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
+       const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
+       unsigned long npages, prime;
+       struct i915_vma *vma;
+       LIST_HEAD(objects);
+       int err;
+
+       /* Try binding many VMA working inwards from either edge */
+
+       for_each_prime_number_from(prime, 2, max_step) {
+               for (npages = 1; npages <= max_pages; npages *= prime) {
+                       const u64 full_size = npages << PAGE_SHIFT;
+                       const struct {
+                               const char *name;
+                               u64 offset;
+                               int step;
+                       } phases[] = {
+                               { "top-down", hole_end, -1, },
+                               { "bottom-up", hole_start, 1, },
+                               { }
+                       }, *p;
+
+                       obj = fake_dma_object(i915, full_size);
+                       if (IS_ERR(obj))
+                               break;
+
+                       list_add(&obj->batch_pool_link, &objects);
+
+                       /* Align differing sized objects against the edges, and
+                        * check we don't walk off into the void when binding
+                        * them into the GTT.
+                        */
+                       for (p = phases; p->name; p++) {
+                               u64 offset;
+
+                               offset = p->offset;
+                               list_for_each_entry(obj, &objects, 
batch_pool_link) {
+                                       vma = i915_vma_instance(obj, vm, NULL);
+                                       if (IS_ERR(vma))
+                                               continue;
+
+                                       if (p->step < 0) {
+                                               if (offset < hole_start + 
obj->base.size)
+                                                       break;
+                                               offset -= obj->base.size;
+                                       }
+
+                                       err = i915_vma_pin(vma, 0, 0, offset | 
FLAGS);
+                                       if (err) {
+                                               pr_err("%s(%s) pin (forward) 
failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+                                                      __func__, p->name, err, 
npages, prime, offset);
+                                               goto err;
+                                       }
+
+                                       if (!drm_mm_node_allocated(&vma->node) 
||
+                                           i915_vma_misplaced(vma, 0, 0, 
offset | FLAGS)) {
+                                               pr_err("%s(%s) (forward) insert 
failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+                                                      __func__, p->name, 
vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+                                                      offset);
+                                               err = -EINVAL;
+                                               goto err;
+                                       }
+
+                                       i915_vma_unpin(vma);
+
+                                       if (p->step > 0) {
+                                               if (offset + obj->base.size > 
hole_end)
+                                                       break;
+                                               offset += obj->base.size;
+                                       }
+                               }
+
+                               offset = p->offset;
+                               list_for_each_entry(obj, &objects, 
batch_pool_link) {
+                                       vma = i915_vma_instance(obj, vm, NULL);
+                                       if (IS_ERR(vma))
+                                               continue;
+
+                                       if (p->step < 0) {
+                                               if (offset < hole_start + 
obj->base.size)
+                                                       break;
+                                               offset -= obj->base.size;
+                                       }
+
+                                       if (!drm_mm_node_allocated(&vma->node) 
||
+                                           i915_vma_misplaced(vma, 0, 0, 
offset | FLAGS)) {
+                                               pr_err("%s(%s) (forward) moved 
vma.node=%llx + %llx, expected offset %llx\n",
+                                                      __func__, p->name, 
vma->node.start, vma->node.size,
+                                                      offset);
+                                               err = -EINVAL;
+                                               goto err;
+                                       }
+
+                                       err = i915_vma_unbind(vma);
+                                       if (err) {
+                                               pr_err("%s(%s) (forward) unbind 
of vma.node=%llx + %llx failed with err=%d\n",
+                                                      __func__, p->name, 
vma->node.start, vma->node.size,
+                                                      err);
+                                               goto err;
+                                       }
+
+                                       if (p->step > 0) {
+                                               if (offset + obj->base.size > 
hole_end)
+                                                       break;
+                                               offset += obj->base.size;
+                                       }
+                               }
+
+                               offset = p->offset;
+                               list_for_each_entry_reverse(obj, &objects, 
batch_pool_link) {
+                                       vma = i915_vma_instance(obj, vm, NULL);
+                                       if (IS_ERR(vma))
+                                               continue;
+
+                                       if (p->step < 0) {
+                                               if (offset < hole_start + 
obj->base.size)
+                                                       break;
+                                               offset -= obj->base.size;
+                                       }
+
+                                       err = i915_vma_pin(vma, 0, 0, offset | 
FLAGS);
+                                       if (err) {
+                                               pr_err("%s(%s) pin (backward) 
failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+                                                      __func__, p->name, err, 
npages, prime, offset);
+                                               goto err;
+                                       }
+
+                                       if (!drm_mm_node_allocated(&vma->node) 
||
+                                           i915_vma_misplaced(vma, 0, 0, 
offset | FLAGS)) {
+                                               pr_err("%s(%s) (backward) 
insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+                                                      __func__, p->name, 
vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+                                                      offset);
+                                               err = -EINVAL;
+                                               goto err;
+                                       }
+
+                                       i915_vma_unpin(vma);
+
+                                       if (p->step > 0) {
+                                               if (offset + obj->base.size > 
hole_end)
+                                                       break;
+                                               offset += obj->base.size;
+                                       }
+                               }
+
+                               offset = p->offset;
+                               list_for_each_entry_reverse(obj, &objects, 
batch_pool_link) {
+                                       vma = i915_vma_instance(obj, vm, NULL);
+                                       if (IS_ERR(vma))
+                                               continue;
+
+                                       if (p->step < 0) {
+                                               if (offset < hole_start + 
obj->base.size)
+                                                       break;
+                                               offset -= obj->base.size;
+                                       }
+
+                                       if (!drm_mm_node_allocated(&vma->node) 
||
+                                           i915_vma_misplaced(vma, 0, 0, 
offset | FLAGS)) {
+                                               pr_err("%s(%s) (backward) moved 
vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+                                                      __func__, p->name, 
vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+                                                      offset);
+                                               err = -EINVAL;
+                                               goto err;
+                                       }
+
+                                       err = i915_vma_unbind(vma);
+                                       if (err) {
+                                               pr_err("%s(%s) (backward) 
unbind of vma.node=%llx + %llx failed with err=%d\n",
+                                                      __func__, p->name, 
vma->node.start, vma->node.size,
+                                                      err);
+                                               goto err;
+                                       }
+
+                                       if (p->step > 0) {
+                                               if (offset + obj->base.size > 
hole_end)
+                                                       break;
+                                               offset += obj->base.size;
+                                       }
+                               }
+                       }
+
+                       if (igt_timeout(end_time, "%s timed out (npages=%lu, 
prime=%lu)\n",
+                                       __func__, npages, prime)) {
+                               err = -EINTR;
+                               goto err;
+                       }
+               }
+
+               close_object_list(&objects, vm);
+       }
+
+       return 0;
+
+err:
+       close_object_list(&objects, vm);
+       return err;
+#undef FLAGS
+}
+
+static int exercise_ppgtt(struct drm_i915_private *dev_priv,
+                         int (*func)(struct drm_i915_private *i915,
+                                     struct i915_address_space *vm,
+                                     u64 hole_start, u64 hole_end,
+                                     unsigned long end_time))
+{
+       struct drm_file *file;
+       struct i915_hw_ppgtt *ppgtt;
+       IGT_TIMEOUT(end_time);
+       int err;
+
+       if (!USES_FULL_PPGTT(dev_priv))
+               return 0;
+
+       file = mock_file(dev_priv);
+       if (IS_ERR(file))
+               return PTR_ERR(file);
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+       if (IS_ERR(ppgtt)) {
+               err = PTR_ERR(ppgtt);
+               goto out_unlock;
+       }
+       GEM_BUG_ON(offset_in_page(ppgtt->base.total));
+
+       err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+
+       i915_ppgtt_close(&ppgtt->base);
+       i915_ppgtt_put(ppgtt);
+out_unlock:
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       mock_file_free(dev_priv, file);
+       return err;
+}
+
+static int igt_ppgtt_fill(void *arg)
+{
+       return exercise_ppgtt(arg, fill_hole);
+}
+
 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_ppgtt_alloc),
+               SUBTEST(igt_ppgtt_fill),
        };
 
        return i915_subtests(tests, i915);
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to