Check that we can invalidate an object created for an imported dmabuf
(i.e. that we release the obj->mm.pages, and so its associated mapping of
the dmabuf) under memory pressure invoking the shrinker for direct
reclaim.

This is using our mock_dmabuf as the exporter, so while we may miss
intricate lock dependencies of a real device, we should capture any and
all core dependencies.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Daniel Vetter <dan...@ffwll.ch>
---
 .../gpu/drm/i915/selftests/i915_gem_dmabuf.c  | 70 +++++++++++++++++++
 1 file changed, 70 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c 
b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index a7055b12e53c..105eaeb09ca7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -22,6 +22,8 @@
  *
  */
 
+#include <linux/sched/mm.h>
+
 #include "../i915_selftest.h"
 
 #include "mock_gem_device.h"
@@ -370,6 +372,73 @@ static int igt_dmabuf_export_kmap(void *arg)
        return err;
 }
 
+static int igt_dmabuf_import_shrink(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct drm_i915_gem_object *obj;
+       struct dma_buf *dmabuf;
+       bool has_pages;
+       int err;
+
+       dmabuf = mock_dmabuf(1);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
+       obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
+       dma_buf_put(dmabuf);
+       if (IS_ERR(obj)) {
+               pr_err("i915_gem_prime_import failed with err=%d\n",
+                      (int)PTR_ERR(obj));
+               return PTR_ERR(obj);
+       }
+
+       if (obj->base.dev != &i915->drm) {
+               pr_err("i915_gem_prime_import created a non-i915 object!\n");
+               err = -EINVAL;
+               goto out_obj;
+       }
+
+       if (obj->base.size != PAGE_SIZE) {
+               pr_err("i915_gem_prime_import is wrong size found %lld, 
expected %ld\n",
+                      (long long)obj->base.size, PAGE_SIZE);
+               err = -EINVAL;
+               goto out_obj;
+       }
+
+       err = i915_gem_object_pin_pages(obj);
+       if (err)
+               goto out_obj;
+
+       i915_gem_object_unpin_pages(obj);
+
+       if (!i915_gem_object_has_pages(obj)) {
+               pr_err("Failed to acquire dma-buf pages\n");
+               err = -EINVAL;
+               goto out_obj;
+       }
+
+       /* Fake the shrinker invocation for direct-reclaim */
+       fs_reclaim_acquire(GFP_KERNEL);
+       i915_gem_shrink_all(i915);
+       fs_reclaim_release(GFP_KERNEL);
+
+       has_pages = i915_gem_object_has_pages(obj);
+       if (has_pages == i915_gem_object_is_shrinkable(obj)) {
+               if (has_pages)
+                       pr_err("Failed to release dma-buf from shrinker!\n");
+               else
+                       pr_err("Released unshrinkable dma-buf from 
shrinker!\n");
+
+               err = -EINVAL;
+               goto out_obj;
+       }
+
+       err = 0;
+out_obj:
+       i915_gem_object_put(obj);
+       return err;
+}
+
 int i915_gem_dmabuf_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
@@ -397,6 +466,7 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private 
*i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_dmabuf_export),
+               SUBTEST(igt_dmabuf_import_shrink),
        };
 
        return i915_subtests(tests, i915);
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to