Forgo the struct_mutex serialisation for i915_active, and interpose its
own mutex handling for active/retire.

This is a multi-layered sleight-of-hand. First, we had to ensure that no
active/retire callbacks accidentally inverted the mutex ordering rules,
nor assumed that they were themselves serialised by struct_mutex. More
challenging though, is the rule over updating elements of the active
rbtree. Instead of the whole i915_active now being serialised by
struct_mutex, allocations/rotations of the tree are serialised by the
i915_active.mutex and individual nodes are serialised by the caller
using the i915_timeline.mutex (we need to use nested spinlocks to
interact with the dma_fence callback lists).

The pain point here is that instead of a single mutex around execbuf, we
now have to take a mutex for active tracker (one for each vma, context,
etc) and a couple of spinlocks for each fence update. The improvement in
fine grained locking allowing for multiple concurrent clients
(eventually!) should be worth it in typical loads.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  12 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |   2 +-
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   2 +
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   9 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |   2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |  10 +-
 drivers/gpu/drm/i915/gt/selftest_lrc.c        |   5 +-
 drivers/gpu/drm/i915/gvt/scheduler.c          |   3 -
 drivers/gpu/drm/i915/i915_active.c            | 165 +++++++-------
 drivers/gpu/drm/i915/i915_active.h            | 204 +++++-------------
 drivers/gpu/drm/i915/i915_active_types.h      |  17 +-
 drivers/gpu/drm/i915/i915_gem.c               |  50 +++--
 drivers/gpu/drm/i915/i915_gem_gtt.c           |   2 +-
 drivers/gpu/drm/i915/i915_request.c           |  56 +----
 drivers/gpu/drm/i915/i915_request.h           |   1 -
 drivers/gpu/drm/i915/i915_timeline.c          |   9 +-
 drivers/gpu/drm/i915/i915_timeline_types.h    |   2 +-
 drivers/gpu/drm/i915/i915_vma.c               |  36 +---
 drivers/gpu/drm/i915/intel_frontbuffer.c      |   3 +-
 drivers/gpu/drm/i915/intel_overlay.c          |   6 +-
 drivers/gpu/drm/i915/selftests/i915_active.c  |   5 +-
 .../gpu/drm/i915/selftests/mock_timeline.c    |   2 +-
 22 files changed, 229 insertions(+), 374 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c 
b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 837cad233cc6..2e684f8151d9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -909,20 +909,18 @@ static int context_barrier_task(struct i915_gem_context 
*ctx,
                                void (*task)(void *data),
                                void *data)
 {
-       struct drm_i915_private *i915 = ctx->i915;
        struct context_barrier_task *cb;
        struct i915_gem_engines_iter it;
        struct intel_context *ce;
        int err = 0;
 
-       lockdep_assert_held(&i915->drm.struct_mutex);
        GEM_BUG_ON(!task);
 
        cb = kmalloc(sizeof(*cb), GFP_KERNEL);
        if (!cb)
                return -ENOMEM;
 
-       i915_active_init(i915, &cb->base, NULL, cb_retire);
+       i915_active_init(&cb->base, NULL, cb_retire);
        err = i915_active_acquire(&cb->base);
        if (err) {
                kfree(cb);
@@ -954,7 +952,9 @@ static int context_barrier_task(struct i915_gem_context 
*ctx,
                if (emit)
                        err = emit(rq, data);
                if (err == 0)
-                       err = i915_active_ref(&cb->base, rq->fence.context, rq);
+                       err = i915_active_ref(&cb->base,
+                                             rq->fence.context,
+                                             &rq->fence);
 
                i915_request_add(rq);
                if (err)
@@ -1188,7 +1188,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct 
intel_sseu sseu)
                return PTR_ERR(rq);
 
        /* Queue this switch after all other activity by this context. */
-       ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
+       ret = i915_active_fence_set(&ce->ring->timeline->last_request, rq);
        if (ret)
                goto out_add;
 
@@ -1200,7 +1200,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct 
intel_sseu sseu)
         * words transfer the pinned ce object to tracked active request.
         */
        GEM_BUG_ON(i915_active_is_idle(&ce->active));
-       ret = i915_active_ref(&ce->active, rq->fence.context, rq);
+       ret = i915_active_ref(&ce->active, rq->fence.context, &rq->fence);
        if (ret)
                goto out_add;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 2c4f3229361d..6193c81ebbed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1316,7 +1316,7 @@ relocate_entry(struct i915_vma *vma,
 
        if (!eb->reloc_cache.vaddr &&
            (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
-            !reservation_object_test_signaled_rcu(vma->resv, true))) {
+            i915_vma_is_active(vma))) {
                const unsigned int gen = eb->reloc_cache.gen;
                unsigned int len;
                u32 *batch;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 21bfb7bd0f57..e87fca4d8194 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -11,6 +11,8 @@
 
 #include <drm/drm_gem.h>
 
+#include <uapi/drm/i915_drm.h>
+
 #include "i915_active.h"
 #include "i915_selftest.h"
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index b0f37621de9f..283092ac13fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -15,14 +15,11 @@ static void call_idle_barriers(struct intel_engine_cs 
*engine)
        struct llist_node *node, *next;
 
        llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
-               struct i915_active_request *active =
+               struct i915_active_fence *active =
                        container_of((struct list_head *)node,
-                                    typeof(*active), link);
+                                    typeof(*active), cb.node);
 
-               INIT_LIST_HEAD(&active->link);
-               RCU_INIT_POINTER(active->request, NULL);
-
-               active->retire(active, NULL);
+               active->cb.func(NULL, &active->cb);
        }
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c 
b/drivers/gpu/drm/i915/gt/intel_context.c
index b9fea31cf9ec..a32698f7645f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -182,7 +182,7 @@ intel_context_init(struct intel_context *ce,
 
        mutex_init(&ce->pin_mutex);
 
-       i915_active_init(ctx->i915, &ce->active,
+       i915_active_init(&ce->active,
                         __intel_context_active, __intel_context_retire);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c 
b/drivers/gpu/drm/i915/gt/intel_reset.c
index 1f831fe759a5..c5a772889ea5 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -881,10 +881,10 @@ static bool __i915_gem_unset_wedged(struct 
drm_i915_private *i915)
         */
        mutex_lock(&i915->gt.timelines.mutex);
        list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
-               struct i915_request *rq;
+               struct dma_fence *fence;
 
-               rq = i915_active_request_get_unlocked(&tl->last_request);
-               if (!rq)
+               fence = i915_active_fence_get(&tl->last_request);
+               if (!fence)
                        continue;
 
                /*
@@ -894,8 +894,8 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private 
*i915)
                 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
                 * in the worst case.
                 */
-               dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
-               i915_request_put(rq);
+               dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
+               dma_fence_put(fence);
        }
        mutex_unlock(&i915->gt.timelines.mutex);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c 
b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 338111d690ac..9fc03a400685 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -854,7 +854,6 @@ static struct i915_request *dummy_request(struct 
intel_engine_cs *engine)
        if (!rq)
                return NULL;
 
-       INIT_LIST_HEAD(&rq->active_list);
        rq->engine = engine;
 
        i915_sched_node_init(&rq->sched);
@@ -945,8 +944,8 @@ static int live_suppress_wait_preempt(void *arg)
                                }
 
                                /* Disable NEWCLIENT promotion */
-                               
__i915_active_request_set(&rq[i]->timeline->last_request,
-                                                         dummy);
+                               
__i915_active_fence_set(&rq[i]->timeline->last_request,
+                                                       &dummy->fence);
                                i915_request_add(rq[i]);
                        }
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c 
b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6cd72dd96a4b..31752b7ebff5 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -391,11 +391,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload 
*workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_vgpu_submission *s = &vgpu->submission;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        struct i915_request *rq;
 
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
        if (workload->req)
                return 0;
 
diff --git a/drivers/gpu/drm/i915/i915_active.c 
b/drivers/gpu/drm/i915/i915_active.c
index f7ffa6e7bd9a..65a9ad2fc9c7 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -8,8 +8,6 @@
 #include "i915_active.h"
 #include "i915_globals.h"
 
-#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
-
 /*
  * Active refs memory management
  *
@@ -23,7 +21,7 @@ static struct i915_global_active {
 } global;
 
 struct active_node {
-       struct i915_active_request base;
+       struct i915_active_fence base;
        struct i915_active *ref;
        struct rb_node node;
        u64 timeline;
@@ -32,11 +30,12 @@ struct active_node {
 static void
 __active_retire(struct i915_active *ref)
 {
-       struct rb_root root = RB_ROOT;
        struct active_node *it, *n;
+       struct rb_root root;
        bool retire = false;
 
        lockdep_assert_held(&ref->mutex);
+       GEM_BUG_ON(i915_active_is_idle(ref));
 
        /* return the unused nodes to our slabcache -- flushing the allocator */
        if (atomic_dec_and_test(&ref->count)) {
@@ -47,12 +46,13 @@ __active_retire(struct i915_active *ref)
        }
 
        mutex_unlock(&ref->mutex);
+       if (!retire)
+               return;
 
-       if (retire)
-               ref->retire(ref);
+       ref->retire(ref);
 
        rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
-               GEM_BUG_ON(i915_active_request_isset(&it->base));
+               GEM_BUG_ON(i915_active_fence_isset(&it->base));
                kmem_cache_free(global.slab_cache, it);
        }
 }
@@ -86,12 +86,13 @@ active_retire(struct i915_active *ref)
 }
 
 static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
-       active_retire(container_of(base, struct active_node, base)->ref);
+       i915_active_fence_cb(fence, cb);
+       active_retire(container_of(cb, struct active_node, base.cb)->ref);
 }
 
-static struct i915_active_request *
+static struct i915_active_fence *
 active_instance(struct i915_active *ref, u64 idx)
 {
        struct active_node *node, *prealloc;
@@ -114,6 +115,7 @@ active_instance(struct i915_active *ref, u64 idx)
                return NULL;
 
        mutex_lock(&ref->mutex);
+       GEM_BUG_ON(i915_active_is_idle(ref));
 
        parent = NULL;
        p = &ref->tree.rb_node;
@@ -121,7 +123,7 @@ active_instance(struct i915_active *ref, u64 idx)
                parent = *p;
 
                node = rb_entry(parent, struct active_node, node);
-               if (node->timeline == idx && !IS_ERR(node->base.request)) {
+               if (node->timeline == idx && !IS_ERR(node->base.fence)) {
                        kmem_cache_free(global.slab_cache, prealloc);
                        goto out;
                }
@@ -133,7 +135,8 @@ active_instance(struct i915_active *ref, u64 idx)
        }
 
        node = prealloc;
-       i915_active_request_init(&node->base, NULL, node_retire);
+       RCU_INIT_POINTER(node->base.fence, NULL);
+       node->base.cb.func = node_retire;
        node->ref = ref;
        node->timeline = idx;
 
@@ -147,13 +150,11 @@ active_instance(struct i915_active *ref, u64 idx)
        return &node->base;
 }
 
-void __i915_active_init(struct drm_i915_private *i915,
-                       struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
                        int (*active)(struct i915_active *ref),
                        void (*retire)(struct i915_active *ref),
                        struct lock_class_key *key)
 {
-       ref->i915 = i915;
        ref->active = active;
        ref->retire = retire;
        ref->tree = RB_ROOT;
@@ -166,9 +167,9 @@ void __i915_active_init(struct drm_i915_private *i915,
 
 int i915_active_ref(struct i915_active *ref,
                    u64 timeline,
-                   struct i915_request *rq)
+                   struct dma_fence *fence)
 {
-       struct i915_active_request *active;
+       struct i915_active_fence *active;
        int err;
 
        /* Prevent reaping in case we malloc/wait while building the tree */
@@ -182,9 +183,9 @@ int i915_active_ref(struct i915_active *ref,
                goto out;
        }
 
-       if (!i915_active_request_isset(active))
+       GEM_BUG_ON(!atomic_read(&ref->count));
+       if (!__i915_active_fence_set(active, fence))
                atomic_inc(&ref->count);
-       __i915_active_request_set(active, rq);
 
 out:
        i915_active_release(ref);
@@ -222,60 +223,33 @@ int i915_active_wait(struct i915_active *ref)
        struct active_node *it, *n;
        int err;
 
+       might_sleep();
        if (RB_EMPTY_ROOT(&ref->tree))
                return 0;
 
-       err = i915_active_acquire(ref); /* Avoid retiring ourselves */
+       err = mutex_lock_interruptible(&ref->mutex);
        if (err)
                return err;
 
-       err = mutex_lock_interruptible(&ref->mutex);
-       if (err)
-               goto out;
+       if (!atomic_add_unless(&ref->count, 1, 0))
+               goto unlock;
 
        rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-               err = i915_active_request_retire(&it->base, BKL(ref));
-               if (err)
-                       break;
-       }
-       mutex_unlock(&ref->mutex);
+               struct dma_fence *fence;
 
-out:
-       i915_active_release(ref);
-       return err;
-}
-
-int i915_request_await_active_request(struct i915_request *rq,
-                                     struct i915_active_request *active)
-{
-       struct i915_request *barrier =
-               i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
-
-       return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
-}
+               fence = i915_active_fence_get(&it->base);
+               if (!fence)
+                       continue;
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
-{
-       struct active_node *it, *n;
-       int err;
-
-       if (RB_EMPTY_ROOT(&ref->tree))
-               return 0;
-
-       /* await allocates and so we need to avoid hitting the shrinker */
-       err = i915_active_acquire(ref);
-       if (err)
-               return err;
-
-       mutex_lock(&ref->mutex);
-       rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-               err = i915_request_await_active_request(rq, &it->base);
+               err = dma_fence_wait(fence, true);
+               dma_fence_put(fence);
                if (err)
                        break;
        }
-       mutex_unlock(&ref->mutex);
+       __active_retire(ref);
 
-       i915_active_release(ref);
+unlock:
+       mutex_unlock(&ref->mutex);
        return err;
 }
 
@@ -306,13 +280,13 @@ int i915_active_acquire_preallocate_barrier(struct 
i915_active *ref,
                        break;
                }
 
-               i915_active_request_init(&node->base,
-                                        (void *)engine, node_retire);
+               RCU_INIT_POINTER(node->base.fence, (void *)engine);
+               node->base.cb.func = node_retire;
                node->timeline = kctx->ring->timeline->fence_context;
                node->ref = ref;
                atomic_inc(&ref->count);
 
-               llist_add((struct llist_node *)&node->base.link,
+               llist_add((struct llist_node *)&node->base.cb.node,
                          &ref->barriers);
        }
 
@@ -332,10 +306,10 @@ void i915_active_acquire_barrier(struct i915_active *ref)
                struct rb_node **p, *parent;
 
                node = container_of((struct list_head *)pos,
-                                   typeof(*node), base.link);
+                                   typeof(*node), base.cb.node);
 
-               engine = (void *)rcu_access_pointer(node->base.request);
-               RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
+               engine = (void *)rcu_access_pointer(node->base.fence);
+               RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
 
                parent = NULL;
                p = &ref->tree.rb_node;
@@ -351,7 +325,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
                rb_link_node(&node->node, parent, p);
                rb_insert_color(&node->node, &ref->tree);
 
-               llist_add((struct llist_node *)&node->base.link,
+               llist_add((struct llist_node *)&node->base.cb.node,
                          &engine->barrier_tasks);
        }
        mutex_unlock(&ref->mutex);
@@ -361,29 +335,66 @@ void i915_request_add_barriers(struct i915_request *rq)
 {
        struct intel_engine_cs *engine = rq->engine;
        struct llist_node *node, *next;
+       unsigned long flags;
+
+       GEM_BUG_ON(intel_engine_is_virtual(engine));
+       node = llist_del_all(&engine->barrier_tasks);
+       if (!node)
+               return;
 
-       llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks))
-               list_add_tail((struct list_head *)node, &rq->active_list);
+       spin_lock_irqsave(&rq->lock, flags);
+       llist_for_each_safe(node, next, node)
+               list_add_tail((struct list_head *)node, &rq->fence.cb_list);
+       spin_unlock_irqrestore(&rq->lock, flags);
 }
 
-int i915_active_request_set(struct i915_active_request *active,
-                           struct i915_request *rq)
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+                       struct dma_fence *fence)
 {
+       struct dma_fence *old;
+       unsigned long flags;
+
+       spin_lock_irqsave(fence->lock, flags);
+       GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+
+       old = rcu_dereference_protected(active->fence, 1);
+       if (old) {
+               spin_lock_nested(old->lock, SINGLE_DEPTH_NESTING);
+               __list_del_entry(&active->cb.node);
+               spin_unlock(old->lock);
+       }
+
+       rcu_assign_pointer(active->fence, fence);
+       list_add_tail(&active->cb.node, &fence->cb_list);
+
+       spin_unlock_irqrestore(fence->lock, flags);
+
+       return old;
+}
+
+int i915_active_fence_set(struct i915_active_fence *active,
+                         struct i915_request *rq)
+{
+       struct dma_fence *fence;
        int err;
 
        /* Must maintain ordering wrt previous active requests */
-       err = i915_request_await_active_request(rq, active);
-       if (err)
-               return err;
+       fence = i915_active_fence_get(active);
+       if (fence) {
+               err = i915_request_await_dma_fence(rq, fence);
+               dma_fence_put(fence);
+               if (err)
+                       return err;
+       }
 
-       __i915_active_request_set(active, rq);
+       __i915_active_fence_set(active, &rq->fence);
        return 0;
 }
 
-void i915_active_retire_noop(struct i915_active_request *active,
-                            struct i915_request *request)
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
-       /* Space left intentionally blank */
+       i915_active_fence_cb(fence, cb);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_active.h 
b/drivers/gpu/drm/i915/i915_active.h
index bdec4f81b6e8..21a9ce6fa461 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -10,7 +10,9 @@
 #include <linux/lockdep.h>
 
 #include "i915_active_types.h"
-#include "i915_request.h"
+
+struct i915_request;
+struct intel_engine_cs;
 
 /*
  * We treat requests as fences. This is not be to confused with our
@@ -28,128 +30,62 @@
  * write access so that we can perform concurrent read operations between
  * the CPU and GPU engines, as well as waiting for all rendering to
  * complete, or waiting for the last GPU user of a "fence register". The
- * object then embeds a #i915_active_request to track the most recent (in
+ * object then embeds a #i915_active_fence to track the most recent (in
  * retirement order) request relevant for the desired mode of access.
- * The #i915_active_request is updated with i915_active_request_set() to
+ * The #i915_active_fence is updated with i915_active_fence_set() to
  * track the most recent fence request, typically this is done as part of
  * i915_vma_move_to_active().
  *
- * When the #i915_active_request completes (is retired), it will
+ * When the #i915_active_fence completes (is retired), it will
  * signal its completion to the owner through a callback as well as mark
- * itself as idle (i915_active_request.request == NULL). The owner
+ * itself as idle (i915_active_fence.request == NULL). The owner
  * can then perform any action, such as delayed freeing of an active
  * resource including itself.
  */
 
-void i915_active_retire_noop(struct i915_active_request *active,
-                            struct i915_request *request);
+void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
 
 /**
- * i915_active_request_init - prepares the activity tracker for use
+ * i915_active_fence_init - prepares the activity tracker for use
  * @active - the active tracker
  * @rq - initial request to track, can be NULL
  * @func - a callback when then the tracker is retired (becomes idle),
  *         can be NULL
  *
- * i915_active_request_init() prepares the embedded @active struct for use as
+ * i915_active_fence_init() prepares the embedded @active struct for use as
  * an activity tracker, that is for tracking the last known active request
  * associated with it. When the last request becomes idle, when it is retired
  * after completion, the optional callback @func is invoked.
  */
 static inline void
-i915_active_request_init(struct i915_active_request *active,
-                        struct i915_request *rq,
-                        i915_active_retire_fn retire)
+i915_active_fence_init(struct i915_active_fence *active,
+                      struct dma_fence *fence,
+                      dma_fence_func_t fn)
 {
-       RCU_INIT_POINTER(active->request, rq);
-       INIT_LIST_HEAD(&active->link);
-       active->retire = retire ?: i915_active_retire_noop;
+       RCU_INIT_POINTER(active->fence, fence);
+       active->cb.func = fn ?: i915_active_noop;
 }
 
-#define INIT_ACTIVE_REQUEST(name) i915_active_request_init((name), NULL, NULL)
+#define INIT_ACTIVE_FENCE(A) i915_active_fence_init(A, NULL, NULL)
 
-/**
- * i915_active_request_set - updates the tracker to watch the current request
- * @active - the active tracker
- * @request - the request to watch
- *
- * __i915_active_request_set() watches the given @request for completion. 
Whilst
- * that @request is busy, the @active reports busy. When that @request is
- * retired, the @active tracker is updated to report idle.
- */
-static inline void
-__i915_active_request_set(struct i915_active_request *active,
-                         struct i915_request *request)
-{
-       list_move(&active->link, &request->active_list);
-       rcu_assign_pointer(active->request, request);
-}
+struct dma_fence *
+__i915_active_fence_set(struct i915_active_fence *active,
+                       struct dma_fence *fence);
 
 int __must_check
-i915_active_request_set(struct i915_active_request *active,
-                       struct i915_request *rq);
-
-/**
- * i915_active_request_raw - return the active request
- * @active - the active tracker
- *
- * i915_active_request_raw() returns the current request being tracked, or 
NULL.
- * It does not obtain a reference on the request for the caller, so the caller
- * must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_raw(const struct i915_active_request *active,
-                       struct mutex *mutex)
-{
-       return rcu_dereference_protected(active->request,
-                                        lockdep_is_held(mutex));
-}
+i915_active_fence_set(struct i915_active_fence *active,
+                     struct i915_request *rq);
 
 /**
- * i915_active_request_peek - report the active request being monitored
+ * __i915_active_fence_get_rcu - return a reference to the active request
  * @active - the active tracker
  *
- * i915_active_request_peek() returns the current request being tracked if
- * still active, or NULL. It does not obtain a reference on the request
- * for the caller, so the caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_peek(const struct i915_active_request *active,
-                        struct mutex *mutex)
-{
-       struct i915_request *request;
-
-       request = i915_active_request_raw(active, mutex);
-       if (!request || i915_request_completed(request))
-               return NULL;
-
-       return request;
-}
-
-/**
- * i915_active_request_get - return a reference to the active request
- * @active - the active tracker
- *
- * i915_active_request_get() returns a reference to the active request, or NULL
- * if the active tracker is idle. The caller must hold struct_mutex.
- */
-static inline struct i915_request *
-i915_active_request_get(const struct i915_active_request *active,
-                       struct mutex *mutex)
-{
-       return i915_request_get(i915_active_request_peek(active, mutex));
-}
-
-/**
- * __i915_active_request_get_rcu - return a reference to the active request
- * @active - the active tracker
- *
- * __i915_active_request_get() returns a reference to the active request,
+ * __i915_active_fence_get() returns a reference to the active request,
  * or NULL if the active tracker is idle. The caller must hold the RCU read
  * lock, but the returned pointer is safe to use outside of RCU.
  */
-static inline struct i915_request *
-__i915_active_request_get_rcu(const struct i915_active_request *active)
+static inline struct dma_fence *
+__i915_active_fence_get_rcu(const struct i915_active_fence *active)
 {
        /*
         * Performing a lockless retrieval of the active request is super
@@ -198,10 +134,11 @@ __i915_active_request_get_rcu(const struct 
i915_active_request *active)
         * See i915_request_alloc().
         */
        do {
-               struct i915_request *request;
+               struct dma_fence *fence;
 
-               request = rcu_dereference(active->request);
-               if (!request || i915_request_completed(request))
+               fence = rcu_dereference(active->fence);
+               if (!fence ||
+                   test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
                        return NULL;
 
                /*
@@ -218,7 +155,7 @@ __i915_active_request_get_rcu(const struct 
i915_active_request *active)
                 */
                barrier();
 
-               request = i915_request_get_rcu(request);
+               fence = dma_fence_get_rcu(fence);
 
                /*
                 * What stops the following rcu_access_pointer() from occurring
@@ -247,81 +184,58 @@ __i915_active_request_get_rcu(const struct 
i915_active_request *active)
                 * The corresponding write barrier is part of
                 * rcu_assign_pointer().
                 */
-               if (!request || request == rcu_access_pointer(active->request))
-                       return rcu_pointer_handoff(request);
+               if (!fence || fence == rcu_access_pointer(active->fence))
+                       return rcu_pointer_handoff(fence);
 
-               i915_request_put(request);
+               dma_fence_put(fence);
        } while (1);
 }
 
 /**
- * i915_active_request_get_unlocked - return a reference to the active request
+ * i915_active_fence_get - return a reference to the active request
  * @active - the active tracker
  *
- * i915_active_request_get_unlocked() returns a reference to the active 
request,
+ * i915_active_fence_get() returns a reference to the active request,
  * or NULL if the active tracker is idle. The reference is obtained under RCU,
  * so no locking is required by the caller.
  *
- * The reference should be freed with i915_request_put().
+ * The reference should be freed with dma_fence_put().
  */
-static inline struct i915_request *
-i915_active_request_get_unlocked(const struct i915_active_request *active)
+static inline struct dma_fence *
+i915_active_fence_get(const struct i915_active_fence *active)
 {
-       struct i915_request *request;
+       struct dma_fence *fence;
 
        rcu_read_lock();
-       request = __i915_active_request_get_rcu(active);
+       fence = __i915_active_fence_get_rcu(active);
        rcu_read_unlock();
 
-       return request;
+       return fence;
 }
 
 /**
- * i915_active_request_isset - report whether the active tracker is assigned
+ * i915_active_fence_isset - report whether the active tracker is assigned
  * @active - the active tracker
  *
- * i915_active_request_isset() returns true if the active tracker is currently
+ * i915_active_fence_isset() returns true if the active tracker is currently
  * assigned to a request. Due to the lazy retiring, that request may be idle
  * and this may report stale information.
  */
 static inline bool
-i915_active_request_isset(const struct i915_active_request *active)
+i915_active_fence_isset(const struct i915_active_fence *active)
 {
-       return rcu_access_pointer(active->request);
+       return rcu_access_pointer(active->fence);
 }
 
-/**
- * i915_active_request_retire - waits until the request is retired
- * @active - the active request on which to wait
- *
- * i915_active_request_retire() waits until the request is completed,
- * and then ensures that at least the retirement handler for this
- * @active tracker is called before returning. If the @active
- * tracker is idle, the function returns immediately.
- */
-static inline int __must_check
-i915_active_request_retire(struct i915_active_request *active,
-                          struct mutex *mutex)
+static inline void
+i915_active_fence_cb(struct dma_fence *fence,
+                    struct dma_fence_cb *cb)
 {
-       struct i915_request *request;
-       long ret;
-
-       request = i915_active_request_raw(active, mutex);
-       if (!request)
-               return 0;
-
-       ret = i915_request_wait(request,
-                               I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
-                               MAX_SCHEDULE_TIMEOUT);
-       if (ret < 0)
-               return ret;
-
-       list_del_init(&active->link);
-       RCU_INIT_POINTER(active->request, NULL);
-
-       active->retire(active, request);
+       struct i915_active_fence *active =
+               container_of(cb, typeof(*active), cb);
 
-       return 0;
+       //GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
+       RCU_INIT_POINTER(active->fence, NULL);
 }
 
 /*
@@ -350,31 +264,29 @@ i915_active_request_retire(struct i915_active_request 
*active,
  * synchronisation.
  */
 
-void __i915_active_init(struct drm_i915_private *i915,
-                       struct i915_active *ref,
+void __i915_active_init(struct i915_active *ref,
                        int (*active)(struct i915_active *ref),
                        void (*retire)(struct i915_active *ref),
                        struct lock_class_key *key);
-#define i915_active_init(i915, ref, active, retire) do {               \
+#define i915_active_init(ref, active, retire) do {             \
        static struct lock_class_key __key;                             \
                                                                        \
-       __i915_active_init(i915, ref, active, retire, &__key);          \
+       __i915_active_init(ref, active, retire, &__key);                \
 } while (0)
 
 int i915_active_ref(struct i915_active *ref,
                    u64 timeline,
-                   struct i915_request *rq);
+                   struct dma_fence *fence);
 
 int i915_active_wait(struct i915_active *ref);
 
 int i915_request_await_active(struct i915_request *rq,
                              struct i915_active *ref);
-int i915_request_await_active_request(struct i915_request *rq,
-                                     struct i915_active_request *active);
+int i915_request_await_active_fence(struct i915_request *rq,
+                                   struct i915_active_fence *active);
 
 int i915_active_acquire(struct i915_active *ref);
 void i915_active_release(struct i915_active *ref);
-void __i915_active_release_nested(struct i915_active *ref, int subclass);
 
 static inline bool
 i915_active_is_idle(const struct i915_active *ref)
diff --git a/drivers/gpu/drm/i915/i915_active_types.h 
b/drivers/gpu/drm/i915/i915_active_types.h
index 06acdffe0f6d..9519b6523801 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -8,30 +8,21 @@
 #define _I915_ACTIVE_TYPES_H_
 
 #include <linux/atomic.h>
+#include <linux/dma-fence.h>
 #include <linux/llist.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
 #include <linux/rcupdate.h>
 #include <linux/workqueue.h>
 
-struct drm_i915_private;
-struct i915_active_request;
-struct i915_request;
-
-typedef void (*i915_active_retire_fn)(struct i915_active_request *,
-                                     struct i915_request *);
-
-struct i915_active_request {
-       struct i915_request __rcu *request;
-       struct list_head link;
-       i915_active_retire_fn retire;
+struct i915_active_fence {
+       struct dma_fence __rcu *fence;
+       struct dma_fence_cb cb;
 };
 
 struct active_node;
 
 struct i915_active {
-       struct drm_i915_private *i915;
-
        struct active_node *cache;
        struct rb_root tree;
        struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e097f7fcce6f..d74fcddd863e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -951,34 +951,38 @@ wait_for_timelines(struct drm_i915_private *i915,
 
        mutex_lock(&gt->mutex);
        list_for_each_entry(tl, &gt->active_list, link) {
-               struct i915_request *rq;
+               struct dma_fence *fence;
 
-               rq = i915_active_request_get_unlocked(&tl->last_request);
-               if (!rq)
+               fence = i915_active_fence_get(&tl->last_request);
+               if (!fence)
                        continue;
 
-               mutex_unlock(&gt->mutex);
-
-               /*
-                * "Race-to-idle".
-                *
-                * Switching to the kernel context is often used a synchronous
-                * step prior to idling, e.g. in suspend for flushing all
-                * current operations to memory before sleeping. These we
-                * want to complete as quickly as possible to avoid prolonged
-                * stalls, so allow the gpu to boost to maximum clocks.
-                */
-               if (flags & I915_WAIT_FOR_IDLE_BOOST)
-                       gen6_rps_boost(rq);
+               if (!dma_fence_is_i915(fence)) {
+                       timeout = dma_fence_wait_timeout(fence,
+                                                        flags & 
I915_WAIT_INTERRUPTIBLE,
+                                                        timeout);
+               } else {
+                       struct i915_request *rq = to_request(fence);
+
+                       /*
+                        * "Race-to-idle".
+                        *
+                        * Switching to the kernel context is often used as
+                        * a synchronous step prior to idling, e.g. in suspend
+                        * for flushing all current operations to memory before
+                        * sleeping. These we want to complete as quickly as
+                        * possible to avoid prolonged stalls, so allow the gpu
+                        * to boost to maximum clocks.
+                        */
+                       if (flags & I915_WAIT_FOR_IDLE_BOOST)
+                               gen6_rps_boost(rq);
+
+                       timeout = i915_request_wait(rq, flags, timeout);
+               }
 
-               timeout = i915_request_wait(rq, flags, timeout);
-               i915_request_put(rq);
+               dma_fence_put(fence);
                if (timeout < 0)
-                       return timeout;
-
-               /* restart after reacquiring the lock */
-               mutex_lock(&gt->mutex);
-               tl = list_entry(&gt->active_list, typeof(*tl), link);
+                       break;
        }
        mutex_unlock(&gt->mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 05fef1d3579d..60f754292a64 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2062,7 +2062,7 @@ static struct i915_vma *pd_vma_create(struct 
gen6_hw_ppgtt *ppgtt, int size)
        if (!vma)
                return ERR_PTR(-ENOMEM);
 
-       i915_active_init(i915, &vma->active, NULL, NULL);
+       i915_active_init(&vma->active, NULL, NULL);
 
        vma->vm = &ggtt->vm;
        vma->ops = &pd_vma_ops;
diff --git a/drivers/gpu/drm/i915/i915_request.c 
b/drivers/gpu/drm/i915/i915_request.c
index c71edd6ea873..cbdf71e80616 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -229,9 +229,8 @@ static void free_capture_list(struct i915_request *request)
 
 static bool i915_request_retire(struct i915_request *rq)
 {
-       struct i915_active_request *active, *next;
+       lockdep_assert_held(&rq->timeline->mutex);
 
-       lockdep_assert_held(&rq->i915->drm.struct_mutex);
        if (!i915_request_completed(rq))
                return false;
 
@@ -245,35 +244,6 @@ static bool i915_request_retire(struct i915_request *rq)
 
        advance_ring(rq);
 
-       /*
-        * Walk through the active list, calling retire on each. This allows
-        * objects to track their GPU activity and mark themselves as idle
-        * when their *last* active request is completed (updating state
-        * tracking lists for eviction, active references for GEM, etc).
-        *
-        * As the ->retire() may free the node, we decouple it first and
-        * pass along the auxiliary information (to avoid dereferencing
-        * the node after the callback).
-        */
-       list_for_each_entry_safe(active, next, &rq->active_list, link) {
-               /*
-                * In microbenchmarks or focusing upon time inside the kernel,
-                * we may spend an inordinate amount of time simply handling
-                * the retirement of requests and processing their callbacks.
-                * Of which, this loop itself is particularly hot due to the
-                * cache misses when jumping around the list of
-                * i915_active_request.  So we try to keep this loop as
-                * streamlined as possible and also prefetch the next
-                * i915_active_request to try and hide the likely cache miss.
-                */
-               prefetchw(next);
-
-               INIT_LIST_HEAD(&active->link);
-               RCU_INIT_POINTER(active->request, NULL);
-
-               active->retire(active, rq);
-       }
-
        local_irq_disable();
 
        /*
@@ -288,8 +258,7 @@ static bool i915_request_retire(struct i915_request *rq)
 
        spin_lock(&rq->lock);
        i915_request_mark_complete(rq);
-       if (!i915_request_signaled(rq))
-               dma_fence_signal_locked(&rq->fence);
+       dma_fence_signal_locked(&rq->fence);
        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
                i915_request_cancel_breadcrumb(rq);
        if (rq->waitboost) {
@@ -328,11 +297,9 @@ void i915_request_retire_upto(struct i915_request *rq)
                  rq->fence.context, rq->fence.seqno,
                  hwsp_seqno(rq));
 
-       lockdep_assert_held(&rq->i915->drm.struct_mutex);
+       lockdep_assert_held(&rq->timeline->mutex);
        GEM_BUG_ON(!i915_request_completed(rq));
-
-       if (list_empty(&rq->ring_link))
-               return;
+       GEM_BUG_ON(list_empty(&rq->ring_link));
 
        do {
                tmp = list_first_entry(&ring->request_list,
@@ -567,6 +534,7 @@ static void ring_retire_requests(struct intel_ring *ring)
 {
        struct i915_request *rq, *rn;
 
+       lockdep_assert_held(&ring->timeline->mutex);
        list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
                if (!i915_request_retire(rq))
                        break;
@@ -687,7 +655,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        rq->waitboost = false;
        rq->execution_mask = ALL_ENGINES;
 
-       INIT_LIST_HEAD(&rq->active_list);
        INIT_LIST_HEAD(&rq->execute_cb);
 
        /*
@@ -726,7 +693,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
        ce->ring->emit = rq->head;
 
        /* Make sure we didn't add ourselves to external state before freeing */
-       GEM_BUG_ON(!list_empty(&rq->active_list));
        GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
        GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 
@@ -1113,7 +1079,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
         * precludes optimising to use semaphores serialisation of a single
         * timeline across engines.
         */
-       prev = rcu_dereference_protected(timeline->last_request.request, 1);
+       prev = to_request(__i915_active_fence_set(&timeline->last_request,
+                                                 &rq->fence));
        if (prev && !i915_request_completed(prev)) {
                if (is_power_of_2(prev->engine->mask | rq->engine->mask))
                        i915_sw_fence_await_sw_fence(&rq->submit,
@@ -1138,7 +1105,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
         * us, the timeline will hold its seqno which is later than ours.
         */
        GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
-       __i915_active_request_set(&timeline->last_request, rq);
 
        return prev;
 }
@@ -1369,10 +1335,6 @@ static void request_wait_wake(struct dma_fence *fence, 
struct dma_fence_cb *cb)
  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
  * unbounded wait).
  *
- * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
- * in via the flags, and vice versa if the struct_mutex is not held, the caller
- * must not specify that the wait is locked.
- *
  * Returns the remaining time (in jiffies) if the request completed, which may
  * be zero or -ETIME if the request is unfinished after the timeout expires.
  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
@@ -1482,7 +1444,11 @@ bool i915_retire_requests(struct drm_i915_private *i915)
        list_for_each_entry_safe(ring, tmp,
                                 &i915->gt.active_rings, active_link) {
                intel_ring_get(ring); /* last rq holds reference! */
+               mutex_lock(&ring->timeline->mutex);
+
                ring_retire_requests(ring);
+
+               mutex_unlock(&ring->timeline->mutex);
                intel_ring_put(ring);
        }
 
diff --git a/drivers/gpu/drm/i915/i915_request.h 
b/drivers/gpu/drm/i915/i915_request.h
index bebc1e9b4a5e..8277cff0df70 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -211,7 +211,6 @@ struct i915_request {
         * on the active_list (of their final request).
         */
        struct i915_capture_list *capture_list;
-       struct list_head active_list;
 
        /** Time at which this request was emitted, in jiffies. */
        unsigned long emitted_jiffies;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c 
b/drivers/gpu/drm/i915/i915_timeline.c
index e2f9336bab83..4ffb61991768 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -179,8 +179,7 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned 
int cacheline)
        cl->hwsp = hwsp;
        cl->vaddr = page_pack_bits(vaddr, cacheline);
 
-       i915_active_init(hwsp_to_i915(hwsp), &cl->active,
-                        __cacheline_active, __cacheline_retire);
+       i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
 
        return cl;
 }
@@ -263,7 +262,7 @@ int i915_timeline_init(struct drm_i915_private *i915,
 
        mutex_init(&timeline->mutex);
 
-       INIT_ACTIVE_REQUEST(&timeline->last_request);
+       INIT_ACTIVE_FENCE(&timeline->last_request);
        INIT_LIST_HEAD(&timeline->requests);
 
        i915_syncmap_init(&timeline->sync);
@@ -463,7 +462,7 @@ __i915_timeline_get_seqno(struct i915_timeline *tl,
         * all writes into the cacheline from previous requests are complete.
         */
        err = i915_active_ref(&tl->hwsp_cacheline->active,
-                             tl->fence_context, rq);
+                             tl->fence_context, &rq->fence);
        if (err)
                goto err_cacheline;
 
@@ -514,7 +513,7 @@ int i915_timeline_get_seqno(struct i915_timeline *tl,
 static int cacheline_ref(struct i915_timeline_cacheline *cl,
                         struct i915_request *rq)
 {
-       return i915_active_ref(&cl->active, rq->fence.context, rq);
+       return i915_active_ref(&cl->active, rq->fence.context, &rq->fence);
 }
 
 int i915_timeline_read_hwsp(struct i915_request *from,
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h 
b/drivers/gpu/drm/i915/i915_timeline_types.h
index fce5cb4f1090..5af6d185d70c 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -45,7 +45,7 @@ struct i915_timeline {
         * the request using i915_active_request_get_request_rcu(), or hold the
         * struct_mutex.
         */
-       struct i915_active_request last_request;
+       struct i915_active_fence last_request;
 
        /**
         * We track the most recent seqno that we wait on in every context so
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 5bc58d0baf73..b8b8a9ca1ac7 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -114,8 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
        vma->size = obj->base.size;
        vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
-       i915_active_init(vm->i915, &vma->active,
-                        __i915_vma_active, __i915_vma_retire);
+       i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
 
        INIT_LIST_HEAD(&vma->closed_link);
 
@@ -909,7 +908,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
         * add the active reference first and queue for it to be dropped
         * *last*.
         */
-       err = i915_active_ref(&vma->active, rq->fence.context, rq);
+       err = i915_active_ref(&vma->active, rq->fence.context, &rq->fence);
        if (err)
                return err;
 
@@ -920,7 +919,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
                if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
                        i915_active_ref(&obj->frontbuffer->write,
                                        rq->fence.context,
-                                       rq);
+                                       &rq->fence);
 
                obj->read_domains = 0;
        }
@@ -939,31 +938,10 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-       /*
-        * First wait upon any activity as retiring the request may
-        * have side-effects such as unpinning or even unbinding this vma.
-        */
-       might_sleep();
-       if (i915_vma_is_active(vma)) {
-               /*
-                * When a closed VMA is retired, it is unbound - eek.
-                * In order to prevent it from being recursively closed,
-                * take a pin on the vma so that the second unbind is
-                * aborted.
-                *
-                * Even more scary is that the retire callback may free
-                * the object (last active vma). To prevent the explosion
-                * we defer the actual object free to a worker that can
-                * only proceed once it acquires the struct_mutex (which
-                * we currently hold, therefore it cannot free this object
-                * before we are finished).
-                */
-               __i915_vma_pin(vma);
-               ret = i915_active_wait(&vma->active);
-               __i915_vma_unpin(vma);
-               if (ret)
-                       return ret;
-       }
+       ret = i915_active_wait(&vma->active);
+       if (ret)
+               return ret;
+
        flush_work(&vma->active.work);
        GEM_BUG_ON(i915_vma_is_active(vma));
 
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c 
b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 4fcec413f405..e8a1bb20f548 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -253,8 +253,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
        fb->obj = obj;
        kref_init(&fb->ref);
        atomic_set(&fb->bits, 0);
-       i915_active_init(i915, &fb->write,
-                        frontbuffer_active, frontbuffer_retire);
+       i915_active_init(&fb->write, frontbuffer_active, frontbuffer_retire);
 
        spin_lock(&i915->fb_tracking.lock);
        if (obj->frontbuffer) {
diff --git a/drivers/gpu/drm/i915/intel_overlay.c 
b/drivers/gpu/drm/i915/intel_overlay.c
index 38495df60f7b..dc7b66c94f74 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -230,7 +230,8 @@ alloc_request(struct intel_overlay *overlay, void 
(*fn)(struct intel_overlay *))
        if (IS_ERR(rq))
                return rq;
 
-       err = i915_active_ref(&overlay->last_flip, rq->fence.context, rq);
+       err = i915_active_ref(&overlay->last_flip,
+                             rq->fence.context, &rq->fence);
        if (err) {
                i915_request_add(rq);
                return ERR_PTR(err);
@@ -1366,8 +1367,7 @@ void intel_overlay_setup(struct drm_i915_private 
*dev_priv)
        overlay->contrast = 75;
        overlay->saturation = 146;
 
-       i915_active_init(dev_priv,
-                        &overlay->last_flip,
+       i915_active_init(&overlay->last_flip,
                         NULL, intel_overlay_last_flip_retire);
 
        ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c 
b/drivers/gpu/drm/i915/selftests/i915_active.c
index 3b3ca5658122..376ea04350b1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -36,7 +36,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
        if (!submit)
                return -ENOMEM;
 
-       i915_active_init(i915, &active->base, NULL, __live_active_retire);
+       i915_active_init(&active->base, NULL, __live_active_retire);
        active->retired = false;
 
        err = i915_active_acquire(&active->base);
@@ -57,7 +57,8 @@ static int __live_active_setup(struct drm_i915_private *i915,
                                                       GFP_KERNEL);
                if (err >= 0)
                        err = i915_active_ref(&active->base,
-                                             rq->fence.context, rq);
+                                             rq->fence.context,
+                                             &rq->fence);
                i915_request_add(rq);
                if (err) {
                        pr_err("Failed to track active ref!\n");
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c 
b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index 65b52be23d42..024de718f66f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -15,7 +15,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 
context)
 
        mutex_init(&timeline->mutex);
 
-       INIT_ACTIVE_REQUEST(&timeline->last_request);
+       INIT_ACTIVE_FENCE(&timeline->last_request);
        INIT_LIST_HEAD(&timeline->requests);
 
        i915_syncmap_init(&timeline->sync);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to