Exercise drm_mm_reserve_node(), check that we can't reserve an already
occupied range and that the lists are correct after reserving/removing.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/selftests/drm_mm_selftests.h |   1 +
 drivers/gpu/drm/selftests/test-drm_mm.c      | 132 +++++++++++++++++++++++++++
 2 files changed, 133 insertions(+)

diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h 
b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index fddf2c01c97f..639913a69101 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -5,6 +5,7 @@
  *
  * Tests are executed in reverse order by igt/drm_mm
  */
+selftest(reserve, igt_reserve)
 selftest(debug, igt_debug)
 selftest(init, igt_init)
 selftest(sanitycheck, igt_sanitycheck) /* keep last */
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c 
b/drivers/gpu/drm/selftests/test-drm_mm.c
index 1fe104cc05d5..13b2cfdb4d44 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -11,6 +11,9 @@
 
 #include <drm/drm_mm.h>
 
+#include "../lib/drm_rand.h"
+#include "../lib/drm_prime_numbers.h"
+
 #define TESTS "drm_mm_selftests.h"
 #include "drm_selftest.h"
 
@@ -105,6 +108,135 @@ static int igt_debug(void *ignored)
        return 0;
 }
 
+static int __igt_reserve(int count, u64 size)
+{
+       u32 lcg_state = random_seed;
+       struct drm_mm mm;
+       struct drm_mm_node *node, *next;
+       int *order, n;
+       int ret;
+
+       /* Fill a range with lots of nodes, check it doesn't fail too early */
+
+       ret = -ENOMEM;
+       order = drm_random_order(count, &lcg_state);
+       if (!order)
+               goto err;
+
+       ret = -EINVAL;
+       drm_mm_init(&mm, 0, count * size);
+       if (!drm_mm_clean(&mm)) {
+               pr_err("mm not empty on creation\n");
+               goto out;
+       }
+
+       for (n = 0; n < count; n++) {
+               int err;
+
+               node = kzalloc(sizeof(*node), GFP_KERNEL);
+               if (!node) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               node->start = order[n] * size;
+               node->size = size;
+
+               err = drm_mm_reserve_node(&mm, node);
+               if (err) {
+                       pr_err("reserve failed, step %d, start %llu\n",
+                              n, node->start);
+                       ret = err;
+                       goto out;
+               }
+
+               if (!drm_mm_node_allocated(node)) {
+                       pr_err("reserved node not allocated! step %d, start 
%llu\n",
+                              n, node->start);
+                       goto out;
+               }
+       }
+
+       /* Repeated use should then fail */
+       drm_random_reorder(order, count, &lcg_state);
+       for (n = 0; n < count; n++) {
+               struct drm_mm_node tmp = {
+                       .start = order[n] * size,
+                       .size = 1
+               };
+
+               if (!drm_mm_reserve_node(&mm, &tmp)) {
+                       drm_mm_remove_node(&tmp);
+                       pr_err("impossible reserve succeeded, step %d, start 
%llu\n",
+                              n, tmp.start);
+                       goto out;
+               }
+       }
+
+       /* Overlapping use should then fail */
+       for (n = 0; n < count; n++) {
+               struct drm_mm_node tmp = {
+                       .start = 0,
+                       .size = size * count,
+               };
+
+               if (!drm_mm_reserve_node(&mm, &tmp)) {
+                       drm_mm_remove_node(&tmp);
+                       pr_err("impossible reserve succeeded, step %d, start 
%llu\n",
+                              n, tmp.start);
+                       goto out;
+               }
+       }
+       for (n = 0; n < count; n++) {
+               struct drm_mm_node tmp = {
+                       .start = size * n,
+                       .size = size * (count - n),
+               };
+
+               if (!drm_mm_reserve_node(&mm, &tmp)) {
+                       drm_mm_remove_node(&tmp);
+                       pr_err("impossible reserve succeeded, step %d, start 
%llu\n",
+                              n, tmp.start);
+                       goto out;
+               }
+       }
+
+       ret = 0;
+out:
+       drm_mm_for_each_node_safe(node, next, &mm) {
+               drm_mm_remove_node(node);
+               kfree(node);
+       }
+       drm_mm_takedown(&mm);
+       kfree(order);
+err:
+       return ret;
+}
+
+static int igt_reserve(void *ignored)
+{
+       const unsigned int count = BIT(10);
+       int n, ret;
+
+       drm_for_each_prime(n, 54) {
+               u64 size = BIT_ULL(n);
+
+               ret = __igt_reserve(count, size - 1);
+               if (ret)
+                       return ret;
+
+               ret = __igt_reserve(count, size);
+               if (ret)
+                       return ret;
+
+               ret = __igt_reserve(count, size + 1);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 #include "drm_selftest.c"
 
 static int __init test_drm_mm_init(void)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to