Engine's execlist related items have been increasing to
a point where a separate struct is warranted. Carve execlist
specific items to a dedicated struct to add clarity.

Suggested-by: Chris Wilson <ch...@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuopp...@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  6 +--
 drivers/gpu/drm/i915/i915_gem.c            | 16 ++++----
 drivers/gpu/drm/i915/i915_gpu_error.c      |  4 +-
 drivers/gpu/drm/i915/i915_guc_submission.c | 19 ++++-----
 drivers/gpu/drm/i915/i915_irq.c            |  5 ++-
 drivers/gpu/drm/i915/intel_engine_cs.c     | 12 +++---
 drivers/gpu/drm/i915/intel_lrc.c           | 64 +++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 45 ++++++++++++---------
 8 files changed, 91 insertions(+), 80 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 48572b157222..cd4a81166f4e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3341,10 +3341,10 @@ static int i915_engine_info(struct seq_file *m, void 
*unused)
                        }
 
                        rcu_read_lock();
-                       for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); 
idx++) {
+                       for (idx = 0; idx < ARRAY_SIZE(engine->execlist.port); 
idx++) {
                                unsigned int count;
 
-                               rq = port_unpack(&engine->execlist_port[idx],
+                               rq = port_unpack(&engine->execlist.port[idx],
                                                 &count);
                                if (rq) {
                                        seq_printf(m, "\t\tELSP[%d] count=%d, ",
@@ -3358,7 +3358,7 @@ static int i915_engine_info(struct seq_file *m, void 
*unused)
                        rcu_read_unlock();
 
                        spin_lock_irq(&engine->timeline->lock);
-                       for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
+                       for (rb = engine->execlist.first; rb; rb = rb_next(rb)){
                                struct i915_priolist *p =
                                        rb_entry(rb, typeof(*p), node);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9e8e0d6e97b..754ac5cb22ce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2810,8 +2810,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs 
*engine)
         * Turning off the engine->irq_tasklet until the reset is over
         * prevents the race.
         */
-       tasklet_kill(&engine->irq_tasklet);
-       tasklet_disable(&engine->irq_tasklet);
+       tasklet_kill(&engine->execlist.irq_tasklet);
+       tasklet_disable(&engine->execlist.irq_tasklet);
 
        if (engine->irq_seqno_barrier)
                engine->irq_seqno_barrier(engine);
@@ -2990,7 +2990,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
-       tasklet_enable(&engine->irq_tasklet);
+       tasklet_enable(&engine->execlist.irq_tasklet);
        kthread_unpark(engine->breadcrumbs.signaler);
 }
 
@@ -3042,17 +3042,17 @@ static void engine_set_wedged(struct intel_engine_cs 
*engine)
         */
 
        if (i915.enable_execlists) {
-               struct execlist_port *port = engine->execlist_port;
+               struct execlist_port *port = engine->execlist.port;
                unsigned long flags;
                unsigned int n;
 
                spin_lock_irqsave(&engine->timeline->lock, flags);
 
-               for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+               for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++)
                        i915_gem_request_put(port_request(&port[n]));
-               memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
-               engine->execlist_queue = RB_ROOT;
-               engine->execlist_first = NULL;
+               memset(engine->execlist.port, 0, sizeof(engine->execlist.port));
+               engine->execlist.queue = RB_ROOT;
+               engine->execlist.first = NULL;
 
                spin_unlock_irqrestore(&engine->timeline->lock, flags);
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index ed5a1eb839ad..6114bf79219d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1327,10 +1327,10 @@ static void engine_record_requests(struct 
intel_engine_cs *engine,
 static void error_record_engine_execlists(struct intel_engine_cs *engine,
                                          struct drm_i915_error_engine *ee)
 {
-       const struct execlist_port *port = engine->execlist_port;
+       const struct execlist_port *port = engine->execlist.port;
        unsigned int n;
 
-       for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
+       for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++) {
                struct drm_i915_gem_request *rq = port_request(&port[n]);
 
                if (!rq)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index 48a1e9349a2c..f95defe18885 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -661,21 +661,22 @@ static void port_assign(struct execlist_port *port,
 
 static bool i915_guc_dequeue(struct intel_engine_cs *engine)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_execlist * const el = &engine->execlist;
+       struct execlist_port *port = el->port;
        struct drm_i915_gem_request *last = port_request(port);
        struct rb_node *rb;
        bool submit = false;
 
        spin_lock_irq(&engine->timeline->lock);
-       rb = engine->execlist_first;
-       GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
+       rb = el->first;
+       GEM_BUG_ON(rb_first(&el->queue) != rb);
        while (rb) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
                struct drm_i915_gem_request *rq, *rn;
 
                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
                        if (last && rq->ctx != last->ctx) {
-                               if (port != engine->execlist_port) {
+                               if (port != el->port) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
@@ -696,13 +697,13 @@ static bool i915_guc_dequeue(struct intel_engine_cs 
*engine)
                }
 
                rb = rb_next(rb);
-               rb_erase(&p->node, &engine->execlist_queue);
+               rb_erase(&p->node, &el->queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
-       engine->execlist_first = rb;
+       el->first = rb;
        if (submit)
                port_assign(port, last);
        spin_unlock_irq(&engine->timeline->lock);
@@ -712,8 +713,8 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
 
 static void i915_guc_irq_handler(unsigned long data)
 {
-       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+       struct execlist_port *port = engine->execlist.port;
        struct drm_i915_gem_request *rq;
        bool submit;
 
@@ -1256,7 +1257,7 @@ int i915_guc_submission_enable(struct drm_i915_private 
*dev_priv)
                 * take over the callback without changing any other state
                 * in the tasklet.
                 */
-               engine->irq_tasklet.func = i915_guc_irq_handler;
+               engine->execlist.irq_tasklet.func = i915_guc_irq_handler;
                clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
 
                /* Replay the current set of previously submitted requests */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e21ce9c18b6e..8347d3e94ed3 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1308,10 +1308,11 @@ static void snb_gt_irq_handler(struct drm_i915_private 
*dev_priv,
 static void
 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
 {
+       struct intel_engine_execlist * const el = &engine->execlist;
        bool tasklet = false;
 
        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
-               if (port_count(&engine->execlist_port[0])) {
+               if (port_count(&el->port[0])) {
                        __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
                        tasklet = true;
                }
@@ -1323,7 +1324,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 
iir, int test_shift)
        }
 
        if (tasklet)
-               tasklet_hi_schedule(&engine->irq_tasklet);
+               tasklet_hi_schedule(&el->irq_tasklet);
 }
 
 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c 
b/drivers/gpu/drm/i915/intel_engine_cs.c
index d23f18874309..74cc4bb4c2df 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -391,8 +391,8 @@ static void intel_engine_init_timeline(struct 
intel_engine_cs *engine)
  */
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
-       engine->execlist_queue = RB_ROOT;
-       engine->execlist_first = NULL;
+       engine->execlist.queue = RB_ROOT;
+       engine->execlist.first = NULL;
 
        intel_engine_init_timeline(engine);
        intel_engine_init_hangcheck(engine);
@@ -1305,11 +1305,11 @@ bool intel_engine_is_idle(struct intel_engine_cs 
*engine)
                return false;
 
        /* Both ports drained, no more ELSP submission? */
-       if (port_request(&engine->execlist_port[0]))
+       if (port_request(&engine->execlist.port[0]))
                return false;
 
        /* ELSP is empty, but there are ready requests? */
-       if (READ_ONCE(engine->execlist_first))
+       if (READ_ONCE(engine->execlist.first))
                return false;
 
        /* Ring stopped? */
@@ -1358,8 +1358,8 @@ void intel_engines_mark_idle(struct drm_i915_private 
*i915)
        for_each_engine(engine, i915, id) {
                intel_engine_disarm_breadcrumbs(engine);
                i915_gem_batch_pool_fini(&engine->batch_pool);
-               tasklet_kill(&engine->irq_tasklet);
-               engine->no_priolist = false;
+               tasklet_kill(&engine->execlist.irq_tasklet);
+               engine->execlist.no_priolist = false;
        }
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d89e1b8e1cc5..2964e7c0a873 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -338,12 +338,12 @@ static u64 execlists_update_context(struct 
drm_i915_gem_request *rq)
 
 static void execlists_submit_ports(struct intel_engine_cs *engine)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct execlist_port *port = engine->execlist.port;
        u32 __iomem *elsp =
                engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
        unsigned int n;
 
-       for (n = ARRAY_SIZE(engine->execlist_port); n--; ) {
+       for (n = ARRAY_SIZE(engine->execlist.port); n--; ) {
                struct drm_i915_gem_request *rq;
                unsigned int count;
                u64 desc;
@@ -398,7 +398,7 @@ static void port_assign(struct execlist_port *port,
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *last;
-       struct execlist_port *port = engine->execlist_port;
+       struct execlist_port *port = engine->execlist.port;
        struct rb_node *rb;
        bool submit = false;
 
@@ -436,8 +436,8 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
         */
 
        spin_lock_irq(&engine->timeline->lock);
-       rb = engine->execlist_first;
-       GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
+       rb = engine->execlist.first;
+       GEM_BUG_ON(rb_first(&engine->execlist.queue) != rb);
        while (rb) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
                struct drm_i915_gem_request *rq, *rn;
@@ -460,7 +460,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                                 * combine this request with the last, then we
                                 * are done.
                                 */
-                               if (port != engine->execlist_port) {
+                               if (port != engine->execlist.port) {
                                        __list_del_many(&p->requests,
                                                        &rq->priotree.link);
                                        goto done;
@@ -497,13 +497,13 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
                }
 
                rb = rb_next(rb);
-               rb_erase(&p->node, &engine->execlist_queue);
+               rb_erase(&p->node, &engine->execlist.queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
                        kmem_cache_free(engine->i915->priorities, p);
        }
 done:
-       engine->execlist_first = rb;
+       engine->execlist.first = rb;
        if (submit)
                port_assign(port, last);
        spin_unlock_irq(&engine->timeline->lock);
@@ -514,7 +514,7 @@ static void execlists_dequeue(struct intel_engine_cs 
*engine)
 
 static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
 {
-       const struct execlist_port *port = engine->execlist_port;
+       const struct execlist_port *port = engine->execlist.port;
 
        return port_count(&port[0]) + port_count(&port[1]) < 2;
 }
@@ -525,8 +525,9 @@ static bool execlists_elsp_ready(const struct 
intel_engine_cs *engine)
  */
 static void intel_lrc_irq_handler(unsigned long data)
 {
-       struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-       struct execlist_port *port = engine->execlist_port;
+       struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+       struct intel_engine_execlist * const el = &engine->execlist;
+       struct execlist_port *port = el->port;
        struct drm_i915_private *dev_priv = engine->i915;
 
        /* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -538,7 +539,7 @@ static void intel_lrc_irq_handler(unsigned long data)
         */
        GEM_BUG_ON(!dev_priv->gt.awake);
 
-       intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
+       intel_uncore_forcewake_get(dev_priv, el->fw_domains);
 
        /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
         * imposing the cost of a locked atomic transaction when submitting a
@@ -626,7 +627,7 @@ static void intel_lrc_irq_handler(unsigned long data)
        if (execlists_elsp_ready(engine))
                execlists_dequeue(engine);
 
-       intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
+       intel_uncore_forcewake_put(dev_priv, el->fw_domains);
 }
 
 static bool
@@ -634,17 +635,18 @@ insert_request(struct intel_engine_cs *engine,
               struct i915_priotree *pt,
               int prio)
 {
+       struct intel_engine_execlist *el = &engine->execlist;
        struct i915_priolist *p;
        struct rb_node **parent, *rb;
        bool first = true;
 
-       if (unlikely(engine->no_priolist))
+       if (unlikely(engine->execlist.no_priolist))
                prio = I915_PRIORITY_NORMAL;
 
 find_priolist:
        /* most positive priority is scheduled first, equal priorities fifo */
        rb = NULL;
-       parent = &engine->execlist_queue.rb_node;
+       parent = &el->queue.rb_node;
        while (*parent) {
                rb = *parent;
                p = rb_entry(rb, typeof(*p), node);
@@ -660,7 +662,7 @@ insert_request(struct intel_engine_cs *engine,
        }
 
        if (prio == I915_PRIORITY_NORMAL) {
-               p = &engine->default_priolist;
+               p = &el->default_priolist;
        } else {
                p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
@@ -675,20 +677,20 @@ insert_request(struct intel_engine_cs *engine,
                         * requests, so if userspace lied about their
                         * dependencies that reordering may be visible.
                         */
-                       engine->no_priolist = true;
+                       el->no_priolist = true;
                        goto find_priolist;
                }
        }
 
        p->priority = prio;
        rb_link_node(&p->node, rb, parent);
-       rb_insert_color(&p->node, &engine->execlist_queue);
+       rb_insert_color(&p->node, &el->queue);
 
        INIT_LIST_HEAD(&p->requests);
        list_add_tail(&pt->link, &p->requests);
 
        if (first)
-               engine->execlist_first = &p->node;
+               el->first = &p->node;
 
        return first;
 }
@@ -705,10 +707,10 @@ static void execlists_submit_request(struct 
drm_i915_gem_request *request)
                           &request->priotree,
                           request->priotree.priority)) {
                if (execlists_elsp_ready(engine))
-                       tasklet_hi_schedule(&engine->irq_tasklet);
+                       tasklet_hi_schedule(&engine->execlist.irq_tasklet);
        }
 
-       GEM_BUG_ON(!engine->execlist_first);
+       GEM_BUG_ON(!engine->execlist.first);
        GEM_BUG_ON(list_empty(&request->priotree.link));
 
        spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1234,7 +1236,7 @@ static u8 gtiir[] = {
 static int gen8_init_common_ring(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       struct execlist_port *port = engine->execlist_port;
+       struct execlist_port *port = engine->execlist.port;
        unsigned int n;
        bool submit;
        int ret;
@@ -1272,7 +1274,7 @@ static int gen8_init_common_ring(struct intel_engine_cs 
*engine)
 
        /* After a GPU reset, we may have requests to replay */
        submit = false;
-       for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
+       for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++) {
                if (!port_isset(&port[n]))
                        break;
 
@@ -1327,7 +1329,7 @@ static int gen9_init_render_ring(struct intel_engine_cs 
*engine)
 static void reset_common_ring(struct intel_engine_cs *engine,
                              struct drm_i915_gem_request *request)
 {
-       struct execlist_port *port = engine->execlist_port;
+       struct execlist_port *port = engine->execlist.port;
        struct intel_context *ce;
        unsigned int n;
 
@@ -1341,9 +1343,9 @@ static void reset_common_ring(struct intel_engine_cs 
*engine,
         * requests were completed.
         */
        if (!request) {
-               for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+               for (n = 0; n < ARRAY_SIZE(engine->execlist.port); n++)
                        i915_gem_request_put(port_request(&port[n]));
-               memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
+               memset(engine->execlist.port, 0, sizeof(engine->execlist.port));
                return;
        }
 
@@ -1668,8 +1670,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs 
*engine)
         * Tasklet cannot be active at this point due intel_mark_active/idle
         * so this is just for documentation.
         */
-       if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
-               tasklet_kill(&engine->irq_tasklet);
+       if (WARN_ON(test_bit(TASKLET_STATE_SCHED, 
&engine->execlist.irq_tasklet.state)))
+               tasklet_kill(&engine->execlist.irq_tasklet);
 
        dev_priv = engine->i915;
 
@@ -1697,7 +1699,7 @@ static void execlists_set_default_submission(struct 
intel_engine_cs *engine)
 {
        engine->submit_request = execlists_submit_request;
        engine->schedule = execlists_schedule;
-       engine->irq_tasklet.func = intel_lrc_irq_handler;
+       engine->execlist.irq_tasklet.func = intel_lrc_irq_handler;
 }
 
 static void
@@ -1772,9 +1774,9 @@ logical_ring_setup(struct intel_engine_cs *engine)
                                                     
RING_CONTEXT_STATUS_BUF_BASE(engine),
                                                     FW_REG_READ);
 
-       engine->fw_domains = fw_domains;
+       engine->execlist.fw_domains = fw_domains;
 
-       tasklet_init(&engine->irq_tasklet,
+       tasklet_init(&engine->execlist.irq_tasklet,
                     intel_lrc_irq_handler, (unsigned long)engine);
 
        logical_ring_default_vfuncs(engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 02d8974bf9ab..9773583ab844 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -184,6 +184,31 @@ struct i915_priolist {
        int priority;
 };
 
+/* Execlists */
+struct intel_engine_execlist {
+       struct tasklet_struct irq_tasklet;
+       struct i915_priolist default_priolist;
+       bool no_priolist;
+
+       struct execlist_port {
+               struct drm_i915_gem_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, 
EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, e) ((p) - (e)->execlist.port)
+               GEM_DEBUG_DECL(u32 context_id);
+       } port[2];
+
+       struct rb_root queue;
+       struct rb_node *first;
+
+       unsigned int fw_domains;
+};
+
 #define INTEL_ENGINE_CS_MAX_NAME 8
 
 struct intel_engine_cs {
@@ -372,25 +397,7 @@ struct intel_engine_cs {
                u32     *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
        } semaphore;
 
-       /* Execlists */
-       struct tasklet_struct irq_tasklet;
-       struct i915_priolist default_priolist;
-       bool no_priolist;
-       struct execlist_port {
-               struct drm_i915_gem_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, 
EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, e) ((p) - (e)->execlist_port)
-               GEM_DEBUG_DECL(u32 context_id);
-       } execlist_port[2];
-       struct rb_root execlist_queue;
-       struct rb_node *execlist_first;
-       unsigned int fw_domains;
+       struct intel_engine_execlist execlist;
 
        /* Contexts are pinned whilst they are active on the GPU. The last
         * context executed remains active whilst the GPU is idle - the
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to