As we need to use a mutex to serialisation i915_active activation
(because we want to allow the callback to sleep), we need to push the
i915_active.retire into a worker callback in case we get need to retire
from an atomic context.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c       | 38 +++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_active_types.h |  3 ++
 drivers/gpu/drm/i915/i915_vma.c          |  1 +
 3 files changed, 35 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c 
b/drivers/gpu/drm/i915/i915_active.c
index 4888ef745c6b..f7ffa6e7bd9a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -30,18 +30,13 @@ struct active_node {
 };
 
 static void
-active_retire(struct i915_active *ref)
+__active_retire(struct i915_active *ref)
 {
        struct rb_root root = RB_ROOT;
        struct active_node *it, *n;
        bool retire = false;
 
-       GEM_BUG_ON(!atomic_read(&ref->count));
-       if (atomic_add_unless(&ref->count, -1, 1))
-               return;
-
-       /* One active may be flushed from inside the acquire of another */
-       mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+       lockdep_assert_held(&ref->mutex);
 
        /* return the unused nodes to our slabcache -- flushing the allocator */
        if (atomic_dec_and_test(&ref->count)) {
@@ -62,6 +57,34 @@ active_retire(struct i915_active *ref)
        }
 }
 
+static void
+active_work(struct work_struct *wrk)
+{
+       struct i915_active *ref = container_of(wrk, typeof(*ref), work);
+
+       if (atomic_add_unless(&ref->count, -1, 1))
+               return;
+
+       mutex_lock(&ref->mutex);
+       __active_retire(ref);
+}
+
+static void
+active_retire(struct i915_active *ref)
+{
+       GEM_BUG_ON(!atomic_read(&ref->count));
+       if (atomic_add_unless(&ref->count, -1, 1))
+               return;
+
+       /* If we are inside interrupt context (fence signaling), defer */
+       if (!mutex_trylock(&ref->mutex)) {
+               queue_work(system_unbound_wq, &ref->work);
+               return;
+       }
+
+       __active_retire(ref);
+}
+
 static void
 node_retire(struct i915_active_request *base, struct i915_request *rq)
 {
@@ -138,6 +161,7 @@ void __i915_active_init(struct drm_i915_private *i915,
        init_llist_head(&ref->barriers);
        atomic_set(&ref->count, 0);
        __mutex_init(&ref->mutex, "i915_active", key);
+       INIT_WORK(&ref->work, active_work);
 }
 
 int i915_active_ref(struct i915_active *ref,
diff --git a/drivers/gpu/drm/i915/i915_active_types.h 
b/drivers/gpu/drm/i915/i915_active_types.h
index 5b0a3024ce24..06acdffe0f6d 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -12,6 +12,7 @@
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/rcupdate.h>
+#include <linux/workqueue.h>
 
 struct drm_i915_private;
 struct i915_active_request;
@@ -39,6 +40,8 @@ struct i915_active {
        int (*active)(struct i915_active *ref);
        void (*retire)(struct i915_active *ref);
 
+       struct work_struct work;
+
        struct llist_head barriers;
 };
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index be15f0e0c6eb..393575bfb5ec 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -975,6 +975,7 @@ int i915_vma_unbind(struct i915_vma *vma)
                if (ret)
                        return ret;
        }
+       flush_work(&vma->active.work);
        GEM_BUG_ON(i915_vma_is_active(vma));
 
        if (i915_vma_is_pinned(vma)) {
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to