The first pass implementation of execlists required a backpointer to the 
context to be held
in the intel_ringbuffer. However the context pointer is available higher in the 
call stack.
Remove the backpointer from the ring buffer structure and instead pass it down 
through the
call stack.

v2: Integrate this changeset with the removal of duplicate request/execlist 
queue item members.

Signed-off-by: Nick Hoath <nicholas.ho...@intel.com>
Issue: VIZ-4268
---
 drivers/gpu/drm/i915/i915_gem.c         |  7 ++--
 drivers/gpu/drm/i915/intel_lrc.c        | 67 +++++++++++++++++++++------------
 drivers/gpu/drm/i915/intel_lrc.h        |  8 +++-
 drivers/gpu/drm/i915/intel_ringbuffer.h | 12 +++---
 4 files changed, 56 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e5f521f..bd5a1e2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2451,8 +2451,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
                return -ENOMEM;
 
        if (i915.enable_execlists) {
-               struct intel_context *ctx = request->ctx;
-               ringbuf = ctx->engine[ring->id].ringbuf;
+               ringbuf = request->ctx->engine[ring->id].ringbuf;
        } else
                ringbuf = ring->buffer;
 
@@ -2465,7 +2464,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
         * what.
         */
        if (i915.enable_execlists) {
-               ret = logical_ring_flush_all_caches(ringbuf);
+               ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
                if (ret)
                        return ret;
        } else {
@@ -2487,7 +2486,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
        request_ring_position = intel_ring_get_tail(ringbuf);
 
        if (i915.enable_execlists) {
-               ret = ring->emit_request(ringbuf, request);
+               ret = ring->emit_request(ringbuf, request->ctx, request);
                if (ret)
                        return ret;
        } else {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0e2e33b..4bd9572 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -555,6 +555,10 @@ static int execlists_context_queue(struct intel_engine_cs 
*ring,
                request->ctx = to;
                request->ring = ring;
        }
+       else
+       {
+               WARN_ON(to != request->ctx);
+       }
        req->request = request;
        i915_gem_request_reference(request);
        i915_gem_context_reference(req->request->ctx);
@@ -591,7 +595,8 @@ static int execlists_context_queue(struct intel_engine_cs 
*ring,
        return 0;
 }
 
-static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf)
+static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
+                                             struct intel_context *ctx)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
        uint32_t flush_domains;
@@ -601,7 +606,8 @@ static int logical_ring_invalidate_all_caches(struct 
intel_ringbuffer *ringbuf)
        if (ring->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->emit_flush(ringbuf, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = ring->emit_flush(ringbuf, ctx,
+                              I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
@@ -610,6 +616,7 @@ static int logical_ring_invalidate_all_caches(struct 
intel_ringbuffer *ringbuf)
 }
 
 static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
+                                struct intel_context *ctx,
                                 struct list_head *vmas)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
@@ -637,7 +644,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer 
*ringbuf,
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       return logical_ring_invalidate_all_caches(ringbuf);
+       return logical_ring_invalidate_all_caches(ringbuf, ctx);
 }
 
 /**
@@ -717,13 +724,13 @@ int intel_execlists_submission(struct drm_device *dev, 
struct drm_file *file,
                return -EINVAL;
        }
 
-       ret = execlists_move_to_gpu(ringbuf, vmas);
+       ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
        if (ret)
                return ret;
 
        if (ring == &dev_priv->ring[RCS] &&
            instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_logical_ring_begin(ringbuf, 4);
+               ret = intel_logical_ring_begin(ringbuf, ctx, 4);
                if (ret)
                        return ret;
 
@@ -736,7 +743,7 @@ int intel_execlists_submission(struct drm_device *dev, 
struct drm_file *file,
                dev_priv->relative_constants_mode = instp_mode;
        }
 
-       ret = ring->emit_bb_start(ringbuf, exec_start, flags);
+       ret = ring->emit_bb_start(ringbuf, ctx, exec_start, flags);
        if (ret)
                return ret;
 
@@ -768,7 +775,8 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
        I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf)
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+                                 struct intel_context *ctx)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
        int ret;
@@ -776,7 +784,7 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer 
*ringbuf)
        if (!ring->gpu_caches_dirty)
                return 0;
 
-       ret = ring->emit_flush(ringbuf, 0, I915_GEM_GPU_DOMAINS);
+       ret = ring->emit_flush(ringbuf, ctx, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -794,10 +802,10 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer 
*ringbuf)
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+                                          struct intel_context *ctx,
                                           struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
-       struct intel_context *ctx = ringbuf->FIXME_lrc_ctx;
 
        intel_logical_ring_advance(ringbuf);
 
@@ -879,6 +887,7 @@ static int logical_ring_wait_request(struct 
intel_ringbuffer *ringbuf,
 }
 
 static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
+                                      struct intel_context *ctx,
                                       int bytes)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
@@ -892,7 +901,7 @@ static int logical_ring_wait_for_space(struct 
intel_ringbuffer *ringbuf,
                return ret;
 
        /* Force the context submission in case we have been skipping it */
-       intel_logical_ring_advance_and_submit(ringbuf, NULL);
+       intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
 
        /* With GEM the hangcheck timer should kick us out of the loop,
         * leaving it early runs the risk of corrupting GEM state (due
@@ -930,13 +939,14 @@ static int logical_ring_wait_for_space(struct 
intel_ringbuffer *ringbuf,
        return ret;
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf)
+static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
+                                   struct intel_context *ctx)
 {
        uint32_t __iomem *virt;
        int rem = ringbuf->size - ringbuf->tail;
 
        if (ringbuf->space < rem) {
-               int ret = logical_ring_wait_for_space(ringbuf, rem);
+               int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
 
                if (ret)
                        return ret;
@@ -953,18 +963,19 @@ static int logical_ring_wrap_buffer(struct 
intel_ringbuffer *ringbuf)
        return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf, int bytes)
+static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
+                               struct intel_context *ctx, int bytes)
 {
        int ret;
 
        if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-               ret = logical_ring_wrap_buffer(ringbuf);
+               ret = logical_ring_wrap_buffer(ringbuf, ctx);
                if (unlikely(ret))
                        return ret;
        }
 
        if (unlikely(ringbuf->space < bytes)) {
-               ret = logical_ring_wait_for_space(ringbuf, bytes);
+               ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
                if (unlikely(ret))
                        return ret;
        }
@@ -985,7 +996,8 @@ static int logical_ring_prepare(struct intel_ringbuffer 
*ringbuf, int bytes)
  *
  * Return: non-zero if the ringbuffer is not ready to be written to.
  */
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+                            struct intel_context *ctx, int num_dwords)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
        struct drm_device *dev = ring->dev;
@@ -997,12 +1009,12 @@ int intel_logical_ring_begin(struct intel_ringbuffer 
*ringbuf, int num_dwords)
        if (ret)
                return ret;
 
-       ret = logical_ring_prepare(ringbuf, num_dwords * sizeof(uint32_t));
+       ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
        if (ret)
                return ret;
 
        /* Preallocate the olr before touching the ring */
-       ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx);
+       ret = logical_ring_alloc_request(ring, ctx);
        if (ret)
                return ret;
 
@@ -1095,12 +1107,13 @@ static int gen8_init_render_ring(struct intel_engine_cs 
*ring)
 }
 
 static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
+                             struct intel_context *ctx,
                              u64 offset, unsigned flags)
 {
        bool ppgtt = !(flags & I915_DISPATCH_SECURE);
        int ret;
 
-       ret = intel_logical_ring_begin(ringbuf, 4);
+       ret = intel_logical_ring_begin(ringbuf, ctx, 4);
        if (ret)
                return ret;
 
@@ -1148,6 +1161,7 @@ static void gen8_logical_ring_put_irq(struct 
intel_engine_cs *ring)
 }
 
 static int gen8_emit_flush(struct intel_ringbuffer *ringbuf,
+                          struct intel_context *ctx,
                           u32 invalidate_domains,
                           u32 unused)
 {
@@ -1157,7 +1171,7 @@ static int gen8_emit_flush(struct intel_ringbuffer 
*ringbuf,
        uint32_t cmd;
        int ret;
 
-       ret = intel_logical_ring_begin(ringbuf, 4);
+       ret = intel_logical_ring_begin(ringbuf, ctx, 4);
        if (ret)
                return ret;
 
@@ -1186,6 +1200,7 @@ static int gen8_emit_flush(struct intel_ringbuffer 
*ringbuf,
 }
 
 static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+                                 struct intel_context *ctx,
                                  u32 invalidate_domains,
                                  u32 flush_domains)
 {
@@ -1212,7 +1227,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer 
*ringbuf,
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
        }
 
-       ret = intel_logical_ring_begin(ringbuf, 6);
+       ret = intel_logical_ring_begin(ringbuf, ctx, 6);
        if (ret)
                return ret;
 
@@ -1237,13 +1252,15 @@ static void gen8_set_seqno(struct intel_engine_cs 
*ring, u32 seqno)
        intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
 }
 
-static int gen8_emit_request(struct intel_ringbuffer *ringbuf, struct 
drm_i915_gem_request *request)
+static int gen8_emit_request(struct intel_ringbuffer *ringbuf,
+                            struct intel_context *ctx,
+                            struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *ring = ringbuf->ring;
        u32 cmd;
        int ret;
 
-       ret = intel_logical_ring_begin(ringbuf, 6);
+       ret = intel_logical_ring_begin(ringbuf, ctx, 6);
        if (ret)
                return ret;
 
@@ -1259,7 +1276,7 @@ static int gen8_emit_request(struct intel_ringbuffer 
*ringbuf, struct drm_i915_g
                i915_gem_request_get_seqno(ring->outstanding_lazy_request));
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance_and_submit(ringbuf, request);
+       intel_logical_ring_advance_and_submit(ringbuf, ctx, request);
 
        return 0;
 }
@@ -1536,6 +1553,7 @@ int intel_lr_context_render_state_init(struct 
intel_engine_cs *ring,
                return 0;
 
        ret = ring->emit_bb_start(ringbuf,
+                       ctx,
                        so.ggtt_offset,
                        I915_DISPATCH_SECURE);
        if (ret)
@@ -1785,7 +1803,6 @@ int intel_lr_context_deferred_create(struct intel_context 
*ctx,
        }
 
        ringbuf->ring = ring;
-       ringbuf->FIXME_lrc_ctx = ctx;
 
        ringbuf->size = 32 * PAGE_SIZE;
        ringbuf->effective_size = ringbuf->size;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 76141ce..bc6ff0d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -36,8 +36,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring);
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
 
-int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
+int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
+                                 struct intel_context *ctx);
 void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
+                                          struct intel_context *ctx,
                                           struct drm_i915_gem_request 
*request);
 /**
  * intel_logical_ring_advance() - advance the ringbuffer tail
@@ -60,7 +62,9 @@ static inline void intel_logical_ring_emit(struct 
intel_ringbuffer *ringbuf,
        iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
        ringbuf->tail += 4;
 }
-int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
+int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
+                            struct intel_context *ctx,
+                            int num_dwords);
 
 /* Logical Ring Contexts */
 int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b669f68..6acc254 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -99,13 +99,6 @@ struct intel_ringbuffer {
 
        struct intel_engine_cs *ring;
 
-       /*
-        * FIXME: This backpointer is an artifact of the history of how the
-        * execlist patches came into being. It will get removed once the basic
-        * code has landed.
-        */
-       struct intel_context *FIXME_lrc_ctx;
-
        u32 head;
        u32 tail;
        int space;
@@ -123,6 +116,8 @@ struct intel_ringbuffer {
        u32 last_retired_head;
 };
 
+struct intel_context;
+
 struct  intel_engine_cs {
        const char      *name;
        enum intel_ring_id {
@@ -239,11 +234,14 @@ struct  intel_engine_cs {
        u8 next_context_status_buffer;
        u32             irq_keep_mask; /* bitmask for interrupts that should 
not be masked */
        int             (*emit_request)(struct intel_ringbuffer *ringbuf,
+                                       struct intel_context *ctx,
                                        struct drm_i915_gem_request *request);
        int             (*emit_flush)(struct intel_ringbuffer *ringbuf,
+                                     struct intel_context *ctx,
                                      u32 invalidate_domains,
                                      u32 flush_domains);
        int             (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
+                                        struct intel_context *ctx,
                                         u64 offset, unsigned flags);
 
        /**
-- 
2.1.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to