If we convert the tracing over from direct use of ring->irq_get() and
over to the breadcrumb infrastructure, we only have a single user of the
ring->irq_get and so we will be able to simplify the driver routines
(eliminating the redundant validation and irq refcounting).

v2: Move to a signalling framework based upon the waiter.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h          |   8 --
 drivers/gpu/drm/i915/i915_gem.c          |   6 --
 drivers/gpu/drm/i915/i915_trace.h        |   2 +-
 drivers/gpu/drm/i915/intel_breadcrumbs.c | 132 +++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h  |   6 +-
 5 files changed, 138 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 37f4ef59fb4a..dabfb043362f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3625,12 +3625,4 @@ wait_remaining_ms_from_jiffies(unsigned long 
timestamp_jiffies, int to_wait_ms)
                            schedule_timeout_uninterruptible(remaining_jiffies);
        }
 }
-
-static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
-                                     struct drm_i915_gem_request *req)
-{
-       if (ring->trace_irq_req == NULL && ring->irq_get(ring))
-               i915_gem_request_assign(&ring->trace_irq_req, req);
-}
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1ef677f3aac8..550ccd7873ab 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2907,12 +2907,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs 
*ring)
                i915_gem_object_retire__read(obj, ring->id);
        }
 
-       if (unlikely(ring->trace_irq_req &&
-                    i915_gem_request_completed(ring->trace_irq_req))) {
-               ring->irq_put(ring);
-               i915_gem_request_assign(&ring->trace_irq_req, NULL);
-       }
-
        WARN_ON(i915_verify_lists(ring->dev));
 }
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h 
b/drivers/gpu/drm/i915/i915_trace.h
index efca75bcace3..3446abb4cf59 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -503,7 +503,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
                           __entry->ring = ring->id;
                           __entry->seqno = i915_gem_request_get_seqno(req);
                           __entry->flags = flags;
-                          i915_trace_irq_get(ring, req);
+                          intel_engine_add_signal(req);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c 
b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index aefea0bf8a32..faf082b43202 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -22,6 +22,8 @@
  *
  */
 
+#include <linux/kthread.h>
+
 #include "i915_drv.h"
 
 static void intel_breadcrumbs_fake_irq(unsigned long data)
@@ -266,9 +268,139 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs 
*engine)
        setup_timer(&b->fake_irq, intel_breadcrumbs_fake_irq, (unsigned long)b);
 }
 
+struct signal {
+       struct rb_node node;
+       struct intel_wait wait;
+       struct drm_i915_gem_request *request;
+};
+
+static bool signal_complete(struct signal *signal)
+{
+       struct drm_i915_gem_request *rq = signal->request;
+
+       if (signal == NULL)
+               return false;
+
+       if (intel_wait_complete(&signal->wait))
+               return true;
+
+       if (i915_gem_request_completed(rq))
+               return true;
+
+       if (rq->ring->seqno_barrier) {
+               rq->ring->seqno_barrier(rq->ring);
+               if (i915_gem_request_completed(rq))
+                       return true;
+       }
+
+       if (rq->reset_counter != i915_reset_counter(&rq->i915->gpu_error))
+               return true;
+
+       return false;
+}
+
+static struct signal *to_signal(struct rb_node *rb)
+{
+       return container_of(rb, struct signal, node);
+}
+
+static int intel_breadcrumbs_signaller(void *arg)
+{
+       struct intel_engine_cs *engine = arg;
+
+       do {
+               struct signal *signal;
+
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               spin_lock(&engine->breadcrumbs.lock);
+               signal = to_signal(rb_first(&engine->breadcrumbs.signals));
+               spin_unlock(&engine->breadcrumbs.lock);
+
+               if (signal_complete(signal)) {
+                       intel_engine_remove_wait(engine, &signal->wait);
+
+                       i915_gem_request_unreference__unlocked(signal->request);
+
+                       spin_lock(&engine->breadcrumbs.lock);
+                       rb_erase(&signal->node, &engine->breadcrumbs.signals);
+                       spin_unlock(&engine->breadcrumbs.lock);
+
+                       kfree(signal);
+               } else {
+                       i915_queue_hangcheck(engine->i915);
+
+                       if (kthread_should_stop())
+                               break;
+
+                       schedule();
+               }
+       } while (1);
+
+       return 0;
+}
+
+int intel_engine_add_signal(struct drm_i915_gem_request *request)
+{
+       struct intel_engine_cs *engine = request->ring;
+       struct intel_breadcrumbs *b = &engine->breadcrumbs;
+       struct rb_node *parent, **p;
+       struct task_struct *task;
+       struct signal *signal;
+
+       signal = kmalloc(sizeof(*signal), GFP_ATOMIC);
+       if (signal == NULL)
+               return -ENOMEM;
+
+       task = READ_ONCE(b->signaller);
+       if (task == NULL) {
+               spin_lock(&b->lock);
+               task = kthread_create(intel_breadcrumbs_signaller,
+                                     engine, "irq/i915-%d", engine->id);
+               if (!IS_ERR(task))
+                       b->signaller = task;
+               spin_unlock(&b->lock);
+
+               if (IS_ERR(task)) {
+                       kfree(signal);
+                       return PTR_ERR(task);
+               }
+       }
+
+       signal->wait.task = task;
+       signal->wait.seqno = request->seqno;
+
+       signal->request = i915_gem_request_reference(request);
+
+       spin_lock(&b->lock);
+       parent = NULL;
+       p = &b->signals.rb_node;
+       while (*p) {
+               parent = *p;
+               if (i915_seqno_passed(signal->wait.seqno,
+                                     to_signal(parent)->wait.seqno))
+                       p = &parent->rb_right;
+               else
+                       p = &parent->rb_left;
+       }
+       rb_link_node(&signal->node, parent, p);
+       rb_insert_color(&signal->node, &b->signals);
+       spin_unlock(&b->lock);
+
+       if (intel_engine_add_wait(engine, &signal->wait)) {
+               intel_engine_enable_irq(engine);
+               wake_up_process(task);
+       }
+
+       return 0;
+}
+
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
 
+       if (b->signaller)
+               kthread_stop(b->signaller);
+
        del_timer_sync(&b->fake_irq);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3c390ce26919..cff3f15f9859 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,6 +144,8 @@ struct  i915_ctx_workarounds {
        struct drm_i915_gem_object *obj;
 };
 
+struct drm_i915_gem_request;
+
 struct  intel_engine_cs {
        const char      *name;
        enum intel_ring_id {
@@ -180,7 +182,9 @@ struct  intel_engine_cs {
        struct intel_breadcrumbs {
                spinlock_t lock; /* protects the lists of requests */
                struct rb_root waiters; /* sorted by retirement */
+               struct rb_root signals; /* sorted by retirement */
                struct task_struct *first_waiter; /* bh for user interrupts */
+               struct task_struct *signaller; /* used for fence signalling */
                struct timer_list fake_irq; /* used after a missed interrupt */
                bool irq_enabled;
                bool rpm_wakelock;
@@ -198,7 +202,6 @@ struct  intel_engine_cs {
 
        unsigned irq_refcount; /* protected by dev_priv->irq_lock */
        u32             irq_enable_mask;        /* bitmask to enable ring 
interrupt */
-       struct drm_i915_gem_request *trace_irq_req;
        bool __must_check (*irq_get)(struct intel_engine_cs *ring);
        void            (*irq_put)(struct intel_engine_cs *ring);
 
@@ -554,6 +557,7 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
                           struct intel_wait *wait);
 void intel_engine_remove_wait(struct intel_engine_cs *engine,
                              struct intel_wait *wait);
+int intel_engine_add_signal(struct drm_i915_gem_request *request);
 static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
 {
        return READ_ONCE(engine->breadcrumbs.first_waiter);
-- 
2.6.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to