From: Mika Kuoppala <mika.kuopp...@intel.com>

Instead of trusting that first available port is at index 0,
use accessor to hide this. This is a preparation for a
following patches where head can be at arbitrary location
in the port array.

v2: improved commit message, elsp_ready readability (Chris)
v3: s/execlist_port_index/execlist_port (Chris)
v4: rebase to new naming
v5: fix port_next indexing

Cc: Michał Winiarski <michal.winiar...@intel.com>
Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuopp...@intel.com>
---
 drivers/gpu/drm/i915/i915_gpu_error.c      |  6 ++-
 drivers/gpu/drm/i915/i915_guc_submission.c | 53 +++++++++++++++----------
 drivers/gpu/drm/i915/intel_engine_cs.c     |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 63 ++++++++++++++++++------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 38 ++++++++++++++++++
 5 files changed, 114 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 653fb69e7ecb..6d0bdb03b3f0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1333,11 +1333,13 @@ static void engine_record_requests(struct 
intel_engine_cs *engine,
 static void error_record_engine_execlists(struct intel_engine_cs *engine,
                                          struct drm_i915_error_engine *ee)
 {
-       const struct intel_engine_execlists * const execlists = 
&engine->execlists;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        unsigned int n;
 
        for (n = 0; n < execlists_num_ports(execlists); n++) {
-               struct drm_i915_gem_request *rq = 
port_request(&execlists->port[n]);
+               struct drm_i915_gem_request *rq;
+
+               rq = port_request(execlists_port(execlists, n));
 
                if (!rq)
                        break;
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index a2e8114b739d..5222004db039 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -496,17 +496,19 @@ static void i915_guc_submit(struct intel_engine_cs 
*engine)
        struct intel_guc *guc = &dev_priv->guc;
        struct i915_guc_client *client = guc->execbuf_client;
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct execlist_port *port = execlists->port;
        const unsigned int engine_id = engine->id;
        unsigned int n;
 
        for (n = 0; n < execlists_num_ports(execlists); n++) {
+               struct execlist_port *port;
                struct drm_i915_gem_request *rq;
                unsigned int count;
 
-               rq = port_unpack(&port[n], &count);
+               port = execlists_port(execlists, n);
+               rq = port_unpack(port, &count);
+
                if (rq && count == 0) {
-                       port_set(&port[n], port_pack(rq, ++count));
+                       port_set(port, port_pack(rq, ++count));
 
                        if (i915_vma_is_map_and_fenceable(rq->ring->vma))
                                POSTING_READ_FW(GUC_STATUS);
@@ -561,15 +563,20 @@ static void port_assign(struct execlist_port *port,
 static void i915_guc_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct execlist_port *port = execlists->port;
+       struct execlist_port *port;
        struct drm_i915_gem_request *last = NULL;
-       const struct execlist_port * const last_port =
-               &execlists->port[execlists->port_mask];
        bool submit = false;
        struct rb_node *rb;
 
-       if (port_isset(port))
-               port++;
+       port = execlists_port_head(execlists);
+
+       /*
+        * We don't coalesce into last submitted port with guc.
+        * Find first free port, this is safe as we dont dequeue without
+        * atleast last port free.
+        */
+       while (port_isset(port))
+               port = execlists_port_next(execlists, port);
 
        spin_lock_irq(&engine->timeline->lock);
        rb = execlists->first;
@@ -580,7 +587,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        if (last && rq->ctx != last->ctx) {
-                               if (port == last_port) {
+                               if (port == execlists_port_tail(execlists)) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
@@ -588,7 +595,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
 
                                if (submit)
                                        port_assign(port, last);
-                               port++;
+
+                               port = execlists_port_next(execlists, port);
                        }
 
                        INIT_LIST_HEAD(&rq->priotree.link);
@@ -619,22 +627,27 @@ static void i915_guc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct execlist_port *port = execlists->port;
-       const struct execlist_port * const last_port =
-               &execlists->port[execlists->port_mask];
-       struct drm_i915_gem_request *rq;
 
-       rq = port_request(&port[0]);
-       while (rq && i915_gem_request_completed(rq)) {
+       do {
+               struct execlist_port *port;
+               struct drm_i915_gem_request *rq;
+
+               port = execlists_port_head(execlists);
+               rq = port_request(port);
+
+               if (!rq)
+                       break;
+
+               if (!i915_gem_request_completed(rq))
+                       break;
+
                trace_i915_gem_request_out(rq);
                i915_gem_request_put(rq);
 
                execlists_port_complete(execlists, port);
+       } while (1);
 
-               rq = port_request(&port[0]);
-       }
-
-       if (!port_isset(last_port))
+       if (!port_isset(execlists_port_tail(execlists)))
                i915_guc_dequeue(engine);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index a47a9c6bea52..8ba62d4c010e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1549,7 +1549,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return false;
 
        /* Both ports drained, no more ELSP submission? */
-       if (port_request(&engine->execlists.port[0]))
+       if (port_request(execlists_port_head(&engine->execlists)))
                return false;
 
        /* ELSP is empty, but there are ready requests? */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f45dd7dc3e5..2945aadc4b7e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -437,24 +437,26 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
 
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-       struct execlist_port *port = engine->execlists.port;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        u32 __iomem *elsp =
                engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
        unsigned int n;
 
-       for (n = execlists_num_ports(&engine->execlists); n--; ) {
+       for (n = execlists_num_ports(execlists); n--; ) {
+               struct execlist_port *port;
                struct drm_i915_gem_request *rq;
                unsigned int count;
                u64 desc;
 
-               rq = port_unpack(&port[n], &count);
+               port = execlists_port(execlists, n);
+               rq = port_unpack(port, &count);
                if (rq) {
                        GEM_BUG_ON(count > !n);
                        if (!count++)
                                execlists_context_status_change(rq, 
INTEL_CONTEXT_SCHEDULE_IN);
-                       port_set(&port[n], port_pack(rq, count));
+                       port_set(port, port_pack(rq, count));
                        desc = execlists_update_context(rq);
-                       GEM_DEBUG_EXEC(port[n].context_id = 
upper_32_bits(desc));
+                       GEM_DEBUG_EXEC(port->context_id = upper_32_bits(desc));
                } else {
                        GEM_BUG_ON(!n);
                        desc = 0;
@@ -523,10 +525,8 @@ static bool can_preempt(struct intel_engine_cs *engine)
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct execlist_port *port = execlists->port;
-       const struct execlist_port * const last_port =
-               &execlists->port[execlists->port_mask];
-       struct drm_i915_gem_request *last = port_request(port);
+       struct execlist_port *port;
+       struct drm_i915_gem_request *last;
        struct rb_node *rb;
        bool submit = false;
 
@@ -557,6 +557,9 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
        if (!rb)
                goto unlock;
 
+       port = execlists_port_head(execlists);
+       last = port_request(port);
+
        if (last) {
                /*
                 * Don't resubmit or switch until all outstanding
@@ -564,7 +567,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                 * know the next preemption status we see corresponds
                 * to this ELSP update.
                 */
-               if (port_count(&port[0]) > 1)
+               if (port_count(port) > 1)
                        goto unlock;
 
                if (can_preempt(engine) &&
@@ -598,7 +601,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                         * the driver is unable to keep up the supply of new
                         * work).
                         */
-                       if (port_count(&port[1]))
+                       if (port_count(execlists_port_next(execlists, port)))
                                goto unlock;
 
                        /* WaIdleLiteRestore:bdw,skl
@@ -634,7 +637,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                                 * combine this request with the last, then we
                                 * are done.
                                 */
-                               if (port == last_port) {
+                               if (port == execlists_port_tail(execlists)) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
@@ -658,7 +661,8 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 
                                if (submit)
                                        port_assign(port, last);
-                               port++;
+
+                               port = execlists_port_next(execlists, port);
 
                                GEM_BUG_ON(port_isset(port));
                        }
@@ -688,19 +692,24 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 }
 
 static void
-execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+execlists_cancel_port_requests(struct intel_engine_execlists *execlists)
 {
-       struct execlist_port *port = execlists->port;
        unsigned int num_ports = execlists_num_ports(execlists);
 
-       while (num_ports-- && port_isset(port)) {
-               struct drm_i915_gem_request *rq = port_request(port);
+       while (num_ports--) {
+               struct execlist_port *port;
+               struct drm_i915_gem_request *rq;
+
+               port = execlists_port_head(execlists);
+               if (!port_isset(port))
+                       break;
+
+               rq = port_request(port);
 
                execlists_context_status_change(rq, 
INTEL_CONTEXT_SCHEDULE_PREEMPTED);
                i915_gem_request_put(rq);
 
-               memset(port, 0, sizeof(*port));
-               port++;
+               execlists_port_complete(execlists, port);
        }
 }
 
@@ -714,7 +723,7 @@ static void execlists_cancel_requests(struct 
intel_engine_cs *engine)
        spin_lock_irqsave(&engine->timeline->lock, flags);
 
        /* Cancel the requests on the HW and clear the ELSP tracker. */
-       execlist_cancel_port_requests(execlists);
+       execlists_cancel_port_requests(execlists);
 
        /* Mark all executing requests as skipped. */
        list_for_each_entry(rq, &engine->timeline->requests, link) {
@@ -769,7 +778,6 @@ static void intel_lrc_irq_handler(unsigned long data)
 {
        struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
        struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct execlist_port * const port = execlists->port;
        struct drm_i915_private *dev_priv = engine->i915;
 
        /* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -788,6 +796,8 @@ static void intel_lrc_irq_handler(unsigned long data)
         * new request (outside of the context-switch interrupt).
         */
        while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+               struct execlist_port *port;
+
                /* The HWSP contains a (cacheable) mirror of the CSB */
                const u32 *buf =
                        &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -855,7 +865,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 
                        if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
                            buf[2*head + 1] == PREEMPT_ID) {
-                               execlist_cancel_port_requests(execlists);
+                               execlists_cancel_port_requests(execlists);
 
                                spin_lock_irq(&engine->timeline->lock);
                                unwind_incomplete_requests(engine);
@@ -870,6 +880,8 @@ static void intel_lrc_irq_handler(unsigned long data)
                            execlists->preempt)
                                continue;
 
+                       port = execlists_port_head(execlists);
+
                        /* Check the context/desc id for this event matches */
                        GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
 
@@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                        }
 
                        /* After the final element, the hw should be idle */
-                       GEM_BUG_ON(port_count(port) == 0 &&
+                       GEM_BUG_ON(port_count(execlists_port_head(execlists)) 
== 0 &&
                                   !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
                }
 
@@ -921,6 +933,7 @@ static void insert_request(struct intel_engine_cs *engine,
 static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        unsigned long flags;
 
        /* Will be called from irq-context when using foreign fences. */
@@ -928,7 +941,7 @@ static void execlists_submit_request(struct 
drm_i915_gem_request *request)
 
        insert_request(engine, &request->priotree, request->priotree.priority);
 
-       GEM_BUG_ON(!engine->execlists.first);
+       GEM_BUG_ON(!execlists->first);
        GEM_BUG_ON(list_empty(&request->priotree.link));
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1520,7 +1533,7 @@ static void reset_common_ring(struct intel_engine_cs 
*engine,
         * guessing the missed context-switch events by looking at what
         * requests were completed.
         */
-       execlist_cancel_port_requests(execlists);
+       execlists_cancel_port_requests(execlists);
 
        /* Push back any incomplete requests for replay after the reset. */
        unwind_incomplete_requests(engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 17186f067408..cfec73400d0f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -251,6 +251,11 @@ struct intel_engine_execlists {
        unsigned int port_mask;
 
        /**
+        * @port_head: first used execlist port
+        */
+       unsigned int port_head;
+
+       /**
         * @queue: queue of requests, in priority lists
         */
        struct rb_root queue;
@@ -531,6 +536,39 @@ execlists_num_ports(const struct intel_engine_execlists * 
const execlists)
        return execlists->port_mask + 1;
 }
 
+#define __port_add(start, n, mask) (((start) + (n)) & (mask))
+#define port_head_add(e, n) __port_add((e)->port_head, n, (e)->port_mask)
+
+/* Index starting from port_head */
+static inline struct execlist_port *
+execlists_port(struct intel_engine_execlists * const execlists,
+              const unsigned int n)
+{
+       return &execlists->port[port_head_add(execlists, n)];
+}
+
+static inline struct execlist_port *
+execlists_port_head(struct intel_engine_execlists * const execlists)
+{
+       return execlists_port(execlists, 0);
+}
+
+static inline struct execlist_port *
+execlists_port_tail(struct intel_engine_execlists * const execlists)
+{
+       return execlists_port(execlists, -1);
+}
+
+static inline struct execlist_port *
+execlists_port_next(struct intel_engine_execlists * const execlists,
+                   const struct execlist_port * const port)
+{
+       const unsigned int n = __port_add(port_index(port, execlists),
+                                         1,
+                                         execlists->port_mask);
+       return &execlists->port[n];
+}
+
 static inline void
 execlists_port_complete(struct intel_engine_execlists * const execlists,
                        struct execlist_port * const port)
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to