In order to disambiguate between the pointer to the intel_engine_cs
(called ring) and the intel_ringbuffer (called ringbuf), rename
s/ring/engine/.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c          |  11 +--
 drivers/gpu/drm/i915/i915_drv.h              |   2 +-
 drivers/gpu/drm/i915/i915_gem.c              |  32 +++----
 drivers/gpu/drm/i915/i915_gem_context.c      |  70 +++++++-------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   8 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  47 +++++-----
 drivers/gpu/drm/i915/i915_gem_render_state.c |  18 ++--
 drivers/gpu/drm/i915/i915_gem_request.c      |  53 ++++-------
 drivers/gpu/drm/i915/i915_gem_request.h      |  10 +-
 drivers/gpu/drm/i915/i915_gpu_error.c        |   3 +-
 drivers/gpu/drm/i915/i915_guc_submission.c   |   8 +-
 drivers/gpu/drm/i915/i915_trace.h            |  32 +++----
 drivers/gpu/drm/i915/intel_breadcrumbs.c     |   2 +-
 drivers/gpu/drm/i915/intel_display.c         |  10 +-
 drivers/gpu/drm/i915/intel_lrc.c             | 134 +++++++++++++--------------
 drivers/gpu/drm/i915/intel_mocs.c            |  13 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.c      |  62 ++++++-------
 17 files changed, 240 insertions(+), 275 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index 387ae77d3c29..018076c89247 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -185,8 +185,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object 
*obj)
                seq_printf(m, " (%s mappable)", s);
        }
        if (obj->last_write_req != NULL)
-               seq_printf(m, " (%s)",
-                          
i915_gem_request_get_ring(obj->last_write_req)->name);
+               seq_printf(m, " (%s)", obj->last_write_req->engine->name);
        if (obj->frontbuffer_bits)
                seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
@@ -593,14 +592,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, 
void *data)
                                           pipe, plane);
                        }
                        if (work->flip_queued_req) {
-                               struct intel_engine_cs *ring =
-                                       
i915_gem_request_get_ring(work->flip_queued_req);
+                               struct intel_engine_cs *engine =
+                                       work->flip_queued_req->engine;
 
                                seq_printf(m, "Flip queued on %s at seqno %x, 
next seqno %x [current breadcrumb %x], completed? %d\n",
-                                          ring->name,
+                                          engine->name,
                                           
i915_gem_request_get_seqno(work->flip_queued_req),
                                           dev_priv->next_seqno,
-                                          intel_ring_get_seqno(ring),
+                                          intel_ring_get_seqno(engine),
                                           
i915_gem_request_completed(work->flip_queued_req));
                        } else
                                seq_printf(m, "Flip not associated with any 
ring\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 58e9e5e50769..baede4517c70 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3410,7 +3410,7 @@ wait_remaining_ms_from_jiffies(unsigned long 
timestamp_jiffies, int to_wait_ms)
 }
 static inline bool __i915_request_irq_complete(struct drm_i915_gem_request 
*req)
 {
-       struct intel_engine_cs *engine = req->ring;
+       struct intel_engine_cs *engine = req->engine;
 
        /* Before we do the heavier coherent read of the seqno,
         * check the value (hopefully) in the CPU cacheline.
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 247731672cb1..6622c9bb3af8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1122,7 +1122,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object 
*obj,
                        if (ret)
                                return ret;
 
-                       i = obj->last_write_req->ring->id;
+                       i = obj->last_write_req->engine->id;
                        if (obj->last_read_req[i] == obj->last_write_req)
                                i915_gem_object_retire__read(obj, i);
                        else
@@ -1149,7 +1149,7 @@ static void
 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
                               struct drm_i915_gem_request *req)
 {
-       int ring = req->ring->id;
+       int ring = req->engine->id;
 
        if (obj->last_read_req[ring] == req)
                i915_gem_object_retire__read(obj, ring);
@@ -2062,17 +2062,15 @@ void i915_vma_move_to_active(struct i915_vma *vma,
                             struct drm_i915_gem_request *req)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct intel_engine_cs *ring;
-
-       ring = i915_gem_request_get_ring(req);
+       struct intel_engine_cs *engine = req->engine;
 
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
                drm_gem_object_reference(&obj->base);
-       obj->active |= intel_ring_flag(ring);
+       obj->active |= intel_ring_flag(engine);
 
-       list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
-       i915_gem_request_assign(&obj->last_read_req[ring->id], req);
+       list_move_tail(&obj->ring_list[engine->id], &engine->active_list);
+       i915_gem_request_assign(&obj->last_read_req[engine->id], req);
 
        list_move_tail(&vma->mm_list, &vma->vm->active_list);
 }
@@ -2081,7 +2079,7 @@ static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
        GEM_BUG_ON(obj->last_write_req == NULL);
-       GEM_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
+       GEM_BUG_ON(!(obj->active & 
intel_ring_flag(obj->last_write_req->engine)));
 
        i915_gem_request_assign(&obj->last_write_req, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2098,7 +2096,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object 
*obj, int ring)
        list_del_init(&obj->ring_list[ring]);
        i915_gem_request_assign(&obj->last_read_req[ring], NULL);
 
-       if (obj->last_write_req && obj->last_write_req->ring->id == ring)
+       if (obj->last_write_req && obj->last_write_req->engine->id == ring)
                i915_gem_object_retire__write(obj);
 
        obj->active &= ~(1 << ring);
@@ -2560,7 +2558,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
        struct intel_engine_cs *from;
        int ret;
 
-       from = i915_gem_request_get_ring(from_req);
+       from = from_req->engine;
        if (to == from)
                return 0;
 
@@ -3737,7 +3735,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        BUILD_BUG_ON(I915_NUM_RINGS > 16);
        args->busy = obj->active << 16;
        if (obj->last_write_req)
-               args->busy |= obj->last_write_req->ring->id;
+               args->busy |= obj->last_write_req->engine->id;
 
 unref:
        drm_gem_object_unreference(&obj->base);
@@ -4068,7 +4066,6 @@ err:
 
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
-       struct intel_ringbuffer *ring = req->ringbuf;
        struct drm_i915_private *dev_priv = req->i915;
        u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
        int i, ret;
@@ -4086,12 +4083,11 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, 
int slice)
         * at initialization time.
         */
        for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-               intel_ring_emit(ring, remap_info[i]);
+               intel_ring_emit(req->ringbuf, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit_reg(req->ringbuf, GEN7_L3LOG(slice, i));
+               intel_ring_emit(req->ringbuf, remap_info[i]);
        }
-
-       intel_ring_advance(ring);
+       intel_ring_advance(req->ringbuf);
 
        return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index d58de7e084dc..dece033cf604 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -450,14 +450,14 @@ void i915_gem_context_fini(struct drm_device *dev)
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
        if (i915.enable_execlists) {
-               if (ring->init_context == NULL)
+               if (engine->init_context == NULL)
                        return 0;
 
-               ret = ring->init_context(req);
+               ret = engine->init_context(req);
        } else
                ret = i915_switch_context(req);
 
@@ -534,7 +534,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
         * itlb_before_ctx_switch.
         */
        if (IS_GEN6(req->i915)) {
-               ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+               ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
@@ -562,7 +562,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
 
                        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
                        for_each_ring(signaller, req->i915, i) {
-                               if (signaller == req->ring)
+                               if (signaller == req->engine)
                                        continue;
 
                                intel_ring_emit_reg(ring, 
RING_PSMI_CTL(signaller->mmio_base));
@@ -587,7 +587,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
 
                        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
                        for_each_ring(signaller, req->i915, i) {
-                               if (signaller == req->ring)
+                               if (signaller == req->engine)
                                        continue;
 
                                intel_ring_emit_reg(ring, 
RING_PSMI_CTL(signaller->mmio_base));
@@ -657,24 +657,18 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct 
intel_context *to,
 static int do_switch(struct drm_i915_gem_request *req)
 {
        struct intel_context *to = req->ctx;
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct intel_context *from = ring->last_context;
+       struct intel_engine_cs *engine = req->engine;
+       struct intel_context *from = engine->last_context;
        u32 hw_flags = 0;
        int ret, i;
 
-       if (from != NULL && ring == &dev_priv->ring[RCS]) {
-               BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
-               BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
-       }
-
-       if (should_skip_switch(ring, from, to))
+       if (should_skip_switch(engine, from, to))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
-       if (ring == &dev_priv->ring[RCS]) {
+       if (engine->id == RCS) {
                ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
-                                           get_context_alignment(ring->dev), 
0);
+                                           get_context_alignment(engine->dev), 
0);
                if (ret)
                        return ret;
        }
@@ -684,23 +678,23 @@ static int do_switch(struct drm_i915_gem_request *req)
         * evict_everything - as a last ditch gtt defrag effort that also
         * switches to the default context. Hence we need to reload from here.
         */
-       from = ring->last_context;
+       from = engine->last_context;
 
-       if (needs_pd_load_pre(ring, to)) {
+       if (needs_pd_load_pre(engine, to)) {
                /* Older GENs and non render rings still want the load first,
                 * "PP_DCLV followed by PP_DIR_BASE register through Load
                 * Register Immediate commands in Ring Buffer before submitting
                 * a context."*/
-               trace_switch_mm(ring, to);
+               trace_switch_mm(engine, to);
                ret = to->ppgtt->switch_mm(to->ppgtt, req);
                if (ret)
                        goto unpin_out;
 
                /* Doing a PD load always reloads the page dirs */
-               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
        }
 
-       if (ring != &dev_priv->ring[RCS]) {
+       if (engine->id != RCS) {
                if (from)
                        i915_gem_context_unreference(from);
                goto done;
@@ -725,14 +719,14 @@ static int do_switch(struct drm_i915_gem_request *req)
                 * space. This means we must enforce that a page table load
                 * occur when this occurs. */
        } else if (to->ppgtt &&
-                  (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) {
+                  (intel_ring_flag(engine) & to->ppgtt->pd_dirty_rings)) {
                hw_flags |= MI_FORCE_RESTORE;
-               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring);
+               to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(engine);
        }
 
        /* We should never emit switch_mm more than once */
-       WARN_ON(needs_pd_load_pre(ring, to) &&
-               needs_pd_load_post(ring, to, hw_flags));
+       WARN_ON(needs_pd_load_pre(engine, to) &&
+               needs_pd_load_post(engine, to, hw_flags));
 
        ret = mi_set_context(req, hw_flags);
        if (ret)
@@ -741,8 +735,8 @@ static int do_switch(struct drm_i915_gem_request *req)
        /* GEN8 does *not* require an explicit reload if the PDPs have been
         * setup, and we do not wish to move them.
         */
-       if (needs_pd_load_post(ring, to, hw_flags)) {
-               trace_switch_mm(ring, to);
+       if (needs_pd_load_post(engine, to, hw_flags)) {
+               trace_switch_mm(engine, to);
                ret = to->ppgtt->switch_mm(to->ppgtt, req);
                /* The hardware context switch is emitted, but we haven't
                 * actually changed the state - so it's probably safe to bail
@@ -768,8 +762,8 @@ static int do_switch(struct drm_i915_gem_request *req)
        }
 
        if (!to->legacy_hw_ctx.initialized) {
-               if (ring->init_context) {
-                       ret = ring->init_context(req);
+               if (engine->init_context) {
+                       ret = engine->init_context(req);
                        if (ret)
                                goto unpin_out;
                }
@@ -801,12 +795,11 @@ static int do_switch(struct drm_i915_gem_request *req)
 
 done:
        i915_gem_context_reference(to);
-       ring->last_context = to;
-
+       engine->last_context = to;
        return 0;
 
 unpin_out:
-       if (ring->id == RCS)
+       if (engine->id == RCS)
                i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
        return ret;
 }
@@ -826,17 +819,18 @@ unpin_out:
  */
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
 
        WARN_ON(i915.enable_execlists);
        WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
 
        if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake 
context */
-               if (req->ctx != ring->last_context) {
+               struct intel_engine_cs *engine = req->engine;
+
+               if (req->ctx != engine->last_context) {
                        i915_gem_context_reference(req->ctx);
-                       if (ring->last_context)
-                               
i915_gem_context_unreference(ring->last_context);
-                       ring->last_context = req->ctx;
+                       if (engine->last_context)
+                               
i915_gem_context_unreference(engine->last_context);
+                       engine->last_context = req->ctx;
                }
                return 0;
        }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 603a247ac333..e7df91f9a51f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -942,7 +942,7 @@ static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                                struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(req->ring);
+       const unsigned other_rings = ~intel_ring_flag(req->engine);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -952,7 +952,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request 
*req,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, req->ring, &req);
+                       ret = i915_gem_object_sync(obj, req->engine, &req);
                        if (ret)
                                return ret;
                }
@@ -964,7 +964,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request 
*req,
        }
 
        if (flush_chipset)
-               i915_gem_chipset_flush(req->ring->dev);
+               i915_gem_chipset_flush(req->engine->dev);
 
        if (flush_domains & I915_GEM_DOMAIN_GTT)
                wmb();
@@ -1151,7 +1151,7 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request 
*req)
        struct intel_ringbuffer *ring = req->ringbuf;
        int ret, i;
 
-       if (!IS_GEN7(req->i915) || req->ring->id != RCS) {
+       if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 98841b05f764..cb7cb59d4c4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -666,10 +666,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request 
*req,
                return ret;
 
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(req->ring, entry));
+       intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(req->engine, entry));
        intel_ring_emit(ring, upper_32_bits(addr));
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(req->ring, entry));
+       intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(req->engine, entry));
        intel_ring_emit(ring, lower_32_bits(addr));
        intel_ring_advance(ring);
 
@@ -1652,7 +1652,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = req->engine->flush(req,
+                                I915_GEM_GPU_DOMAINS,
+                                I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -1661,9 +1663,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
                return ret;
 
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->engine));
        intel_ring_emit(ring, PP_DIR_DCLV_2G);
-       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->engine));
        intel_ring_emit(ring, get_pd_offset(ppgtt));
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
@@ -1674,11 +1676,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+       struct drm_i915_private *dev_priv = req->i915;
 
-       I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-       I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+       I915_WRITE(RING_PP_DIR_DCLV(req->engine), PP_DIR_DCLV_2G);
+       I915_WRITE(RING_PP_DIR_BASE(req->engine), get_pd_offset(ppgtt));
        return 0;
 }
 
@@ -1689,7 +1690,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = req->engine->flush(req,
+                                I915_GEM_GPU_DOMAINS,
+                                I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -1698,16 +1701,18 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
                return ret;
 
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->engine));
        intel_ring_emit(ring, PP_DIR_DCLV_2G);
-       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->engine));
        intel_ring_emit(ring, get_pd_offset(ppgtt));
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
        /* XXX: RCS is the only one to auto invalidate the TLBs? */
-       if (req->ring->id != RCS) {
-               ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 
I915_GEM_GPU_DOMAINS);
+       if (req->engine->id != RCS) {
+               ret = req->engine->flush(req,
+                                        I915_GEM_GPU_DOMAINS,
+                                        I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
@@ -1718,15 +1723,12 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
+       struct drm_i915_private *dev_priv = req->i915;
 
-       I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-       I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
+       I915_WRITE(RING_PP_DIR_DCLV(req->engine), PP_DIR_DCLV_2G);
+       I915_WRITE(RING_PP_DIR_BASE(req->engine), get_pd_offset(ppgtt));
 
-       POSTING_READ(RING_PP_DIR_DCLV(ring));
+       POSTING_READ(RING_PP_DIR_DCLV(req->engine));
 
        return 0;
 }
@@ -2169,8 +2171,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 
 int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
 {
-       struct drm_i915_private *dev_priv = req->ring->dev->dev_private;
-       struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+       struct i915_hw_ppgtt *ppgtt = req->i915->mm.aliasing_ppgtt;
 
        if (i915.enable_execlists)
                return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c 
b/drivers/gpu/drm/i915/i915_gem_render_state.c
index fc7e6d5c6251..bee3f0ccd0cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -198,25 +198,25 @@ int i915_gem_render_state_init(struct 
drm_i915_gem_request *req)
        struct render_state so;
        int ret;
 
-       ret = i915_gem_render_state_prepare(req->ring, &so);
+       ret = i915_gem_render_state_prepare(req->engine, &so);
        if (ret)
                return ret;
 
        if (so.rodata == NULL)
                return 0;
 
-       ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
-                                            so.rodata->batch_items * 4,
-                                            I915_DISPATCH_SECURE);
+       ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
+                                              so.rodata->batch_items * 4,
+                                              I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
        if (so.aux_batch_size > 8) {
-               ret = req->ring->dispatch_execbuffer(req,
-                                                    (so.ggtt_offset +
-                                                     so.aux_batch_offset),
-                                                    so.aux_batch_size,
-                                                    I915_DISPATCH_SECURE);
+               ret = req->engine->dispatch_execbuffer(req,
+                                                      (so.ggtt_offset +
+                                                       so.aux_batch_offset),
+                                                      so.aux_batch_size,
+                                                      I915_DISPATCH_SECURE);
                if (ret)
                        goto out;
        }
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c 
b/drivers/gpu/drm/i915/i915_gem_request.c
index 85067069995e..8adf2c134048 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,7 +37,7 @@ static const char *i915_fence_get_driver_name(struct fence 
*fence)
 
 static const char *i915_fence_get_timeline_name(struct fence *fence)
 {
-       return to_i915_request(fence)->ring->name;
+       return to_i915_request(fence)->engine->name;
 }
 
 static bool i915_fence_signaled(struct fence *fence)
@@ -90,7 +90,7 @@ static void i915_fence_timeline_value_str(struct fence 
*fence, char *str,
                                          int size)
 {
        snprintf(str, size, "%u",
-                intel_ring_get_seqno(to_i915_request(fence)->ring));
+                intel_ring_get_seqno(to_i915_request(fence)->engine));
 }
 
 static void i915_fence_release(struct fence *fence)
@@ -195,11 +195,11 @@ i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 
*seqno)
        return 0;
 }
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
+int i915_gem_request_alloc(struct intel_engine_cs *engine,
                           struct intel_context *ctx,
                           struct drm_i915_gem_request **req_out)
 {
-       struct drm_i915_private *dev_priv = ring->i915;
+       struct drm_i915_private *dev_priv = engine->i915;
        unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        u32 seqno;
@@ -230,11 +230,11 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
        fence_init(&req->fence,
                   &i915_fence_ops,
                   &req->lock,
-                  ring->fence_context,
+                  engine->fence_context,
                   seqno);
 
        req->i915 = dev_priv;
-       req->ring = ring;
+       req->engine = engine;
        req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
@@ -279,7 +279,6 @@ err:
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file)
 {
-       struct drm_i915_private *dev_private;
        struct drm_i915_file_private *file_priv;
 
        WARN_ON(!req || !file || req->file_priv);
@@ -290,7 +289,6 @@ int i915_gem_request_add_to_client(struct 
drm_i915_gem_request *req,
        if (req->file_priv)
                return -EINVAL;
 
-       dev_private = req->ring->dev->dev_private;
        file_priv = file->driver_priv;
 
        spin_lock(&file_priv->mm.lock);
@@ -332,7 +330,7 @@ void i915_gem_request_cancel(struct drm_i915_gem_request 
*req)
 {
        intel_ring_reserved_space_cancel(req->ringbuf);
        if (i915.enable_execlists) {
-               if (req->ctx != req->ring->default_context)
+               if (req->ctx != req->engine->default_context)
                        intel_lr_context_unpin(req);
        }
        __i915_gem_request_release(req);
@@ -358,7 +356,7 @@ static void i915_gem_request_retire(struct 
drm_i915_gem_request *request)
 void
 i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *engine = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *tmp;
 
        lockdep_assert_held(&engine->dev->struct_mutex);
@@ -403,8 +401,6 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
                        struct drm_i915_gem_object *obj,
                        bool flush_caches)
 {
-       struct intel_engine_cs *ring;
-       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        int ret;
@@ -412,8 +408,6 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
        if (WARN_ON(request == NULL))
                return;
 
-       ring = request->ring;
-       dev_priv = ring->dev->dev_private;
        ringbuf = request->ringbuf;
 
        /*
@@ -448,9 +442,9 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
        request->postfix = intel_ring_get_tail(ringbuf);
 
        if (i915.enable_execlists)
-               ret = ring->emit_request(request);
+               ret = request->engine->emit_request(request);
        else {
-               ret = ring->add_request(request);
+               ret = request->engine->add_request(request);
 
                request->tail = intel_ring_get_tail(ringbuf);
        }
@@ -468,13 +462,13 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
        request->batch_obj = obj;
 
        request->emitted_jiffies = jiffies;
-       request->previous_seqno = ring->last_submitted_seqno;
-       ring->last_submitted_seqno = request->fence.seqno;
-       list_add_tail(&request->list, &ring->request_list);
+       request->previous_seqno = request->engine->last_submitted_seqno;
+       request->engine->last_submitted_seqno = request->fence.seqno;
+       list_add_tail(&request->list, &request->engine->request_list);
 
        trace_i915_gem_request_add(request);
 
-       i915_gem_mark_busy(dev_priv);
+       i915_gem_mark_busy(request->i915);
 
        /* Sanity check that the reserved size was large enough. */
        intel_ring_reserved_space_end(ringbuf);
@@ -627,7 +621,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        set_task_state(wait.task, state);
 
        /* Optimistic spin for the next ~jiffie before touching IRQs */
-       if (intel_engine_add_wait(req->ring, &wait)) {
+       if (intel_engine_add_wait(req->engine, &wait)) {
                if (__i915_spin_request(req, &wait, state))
                        goto complete;
 
@@ -635,7 +629,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                 * as we enabled it, we need to kick ourselves to do a
                 * coherent check on the seqno before we sleep.
                 */
-               if (intel_engine_enable_wait_irq(req->ring, &wait))
+               if (intel_engine_enable_wait_irq(req->engine, &wait))
                        goto wakeup;
        }
 
@@ -670,7 +664,7 @@ wakeup:
        }
 
 complete:
-       intel_engine_remove_wait(req->ring, &wait);
+       intel_engine_remove_wait(req->engine, &wait);
        __set_task_state(wait.task, TASK_RUNNING);
        trace_i915_gem_request_wait_end(req);
 
@@ -691,7 +685,7 @@ complete:
        }
 
        if (ret == 0 && !IS_ERR_OR_NULL(rps) &&
-           req->fence.seqno == req->ring->last_submitted_seqno) {
+           req->fence.seqno == req->engine->last_submitted_seqno) {
                /* The GPU is now idle and this client has stalled.
                 * Since no other client has submitted a request in the
                 * meantime, assume that this client is the only one
@@ -717,20 +711,13 @@ complete:
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-       struct drm_device *dev;
-       struct drm_i915_private *dev_priv;
-       bool interruptible;
        int ret;
 
        BUG_ON(req == NULL);
 
-       dev = req->ring->dev;
-       dev_priv = dev->dev_private;
-       interruptible = dev_priv->mm.interruptible;
-
-       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+       BUG_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
 
-       ret = __i915_wait_request(req, interruptible, NULL, NULL);
+       ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL);
        if (ret)
                return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h 
b/drivers/gpu/drm/i915/i915_gem_request.h
index 6b3de827929a..802862e5007d 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -46,7 +46,7 @@ struct drm_i915_gem_request {
 
        /** On Which ring this request was generated */
        struct drm_i915_private *i915;
-       struct intel_engine_cs *ring;
+       struct intel_engine_cs *engine;
        unsigned reset_counter;
 
         /** GEM sequence number associated with the previous request,
@@ -133,9 +133,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
 }
 
 static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+i915_gem_request_get_engine(struct drm_i915_gem_request *req)
 {
-       return req ? req->ring : NULL;
+       return req ? req->engine : NULL;
 }
 
 static inline struct drm_i915_gem_request *
@@ -198,13 +198,13 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 
 static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
+       return i915_seqno_passed(intel_ring_get_seqno(req->engine),
                                 req->previous_seqno);
 }
 
 static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
 {
-       return i915_seqno_passed(intel_ring_get_seqno(req->ring),
+       return i915_seqno_passed(intel_ring_get_seqno(req->engine),
                                 req->fence.seqno);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 84ce91275fdd..5bf208d8009e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -721,8 +721,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
        err->dirty = obj->dirty;
        err->purgeable = obj->madv != I915_MADV_WILLNEED;
        err->userptr = obj->userptr.mm != NULL;
-       err->ring = obj->last_write_req ?
-                       i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+       err->ring = obj->last_write_req ?  obj->last_write_req->engine->id : -1;
        err->cache_level = obj->cache_level;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index 56d3064d32ed..eaf680ce5c9c 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -510,7 +510,7 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
                                  struct drm_i915_gem_request *rq)
 {
-       enum intel_ring_id ring_id = rq->ring->id;
+       enum intel_ring_id ring_id = rq->engine->id;
        struct guc_wq_item *wqi;
        void *base;
        u32 tail, wq_len, wq_off, space;
@@ -548,7 +548,7 @@ static int guc_add_workqueue_item(struct i915_guc_client 
*gc,
                        WQ_NO_WCFLUSH_WAIT;
 
        /* The GuC wants only the low-order word of the context descriptor */
-       wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring);
+       wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, 
rq->engine);
 
        /* The GuC firmware wants the tail index in QWords, not bytes */
        tail = rq->ringbuf->tail >> 3;
@@ -565,7 +565,7 @@ static int guc_add_workqueue_item(struct i915_guc_client 
*gc,
 /* Update the ringbuffer pointer in a saved context image */
 static void lr_context_update(struct drm_i915_gem_request *rq)
 {
-       enum intel_ring_id ring_id = rq->ring->id;
+       enum intel_ring_id ring_id = rq->engine->id;
        struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
        struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
        struct page *page;
@@ -594,7 +594,7 @@ int i915_guc_submit(struct i915_guc_client *client,
                    struct drm_i915_gem_request *rq)
 {
        struct intel_guc *guc = client->guc;
-       enum intel_ring_id ring_id = rq->ring->id;
+       enum intel_ring_id ring_id = rq->engine->id;
        int q_ret, b_ret;
 
        /* Need this because of the deferred pin ctx and ring */
diff --git a/drivers/gpu/drm/i915/i915_trace.h 
b/drivers/gpu/drm/i915/i915_trace.h
index dc2ff5cac2f4..0204ff72b3e4 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -475,7 +475,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
            TP_fast_assign(
                           __entry->dev = from->dev->primary->index;
                           __entry->sync_from = from->id;
-                          __entry->sync_to = to_req->ring->id;
+                          __entry->sync_to = to_req->engine->id;
                           __entry->seqno = i915_gem_request_get_seqno(req);
                           ),
 
@@ -497,11 +497,9 @@ TRACE_EVENT(i915_gem_ring_dispatch,
                             ),
 
            TP_fast_assign(
-                          struct intel_engine_cs *ring =
-                                               i915_gem_request_get_ring(req);
-                          __entry->dev = ring->dev->primary->index;
-                          __entry->ring = ring->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->ring = req->engine->id;
+                          __entry->seqno = req->fence.seqno;
                           __entry->flags = flags;
                           fence_enable_sw_signaling(&req->fence);
                           ),
@@ -522,8 +520,8 @@ TRACE_EVENT(i915_gem_ring_flush,
                             ),
 
            TP_fast_assign(
-                          __entry->dev = req->ring->dev->primary->index;
-                          __entry->ring = req->ring->id;
+                          __entry->dev = req->engine->dev->primary->index;
+                          __entry->ring = req->engine->id;
                           __entry->invalidate = invalidate;
                           __entry->flush = flush;
                           ),
@@ -544,11 +542,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
                             ),
 
            TP_fast_assign(
-                          struct intel_engine_cs *ring =
-                                               i915_gem_request_get_ring(req);
-                          __entry->dev = ring->dev->primary->index;
-                          __entry->ring = ring->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->ring = req->engine->id;
+                          __entry->seqno = req->fence.seqno;
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -608,13 +604,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
             * less desirable.
             */
            TP_fast_assign(
-                          struct intel_engine_cs *ring =
-                                               i915_gem_request_get_ring(req);
-                          __entry->dev = ring->dev->primary->index;
-                          __entry->ring = ring->id;
-                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->dev = req->i915->dev->primary->index;
+                          __entry->ring = req->engine->id;
+                          __entry->seqno = req->fence.seqno;
                           __entry->blocking =
-                                    mutex_is_locked(&ring->dev->struct_mutex);
+                                    
mutex_is_locked(&req->i915->dev->struct_mutex);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c 
b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index aca1b72edcd8..5ba8b4cd8a18 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -419,7 +419,7 @@ static int intel_breadcrumbs_signaler(void *arg)
 
 int intel_engine_enable_signaling(struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *engine = request->ring;
+       struct intel_engine_cs *engine = request->engine;
        struct intel_breadcrumbs *b = &engine->breadcrumbs;
        struct rb_node *parent, **p;
        struct task_struct *task;
diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index b28e783f6f04..323b0d905c89 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11215,7 +11215,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        }
 
        len = 4;
-       if (req->ring->id == RCS) {
+       if (req->engine->id == RCS) {
                len += 6;
                /*
                 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11253,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
         * for the RCS also doesn't appear to drop events. Setting the DERRMR
         * to zero does lead to lockups within MI_DISPLAY_FLIP.
         */
-       if (req->ring->id == RCS) {
+       if (req->engine->id == RCS) {
                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
                intel_ring_emit_reg(ring, DERRMR);
                intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
@@ -11266,7 +11266,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
                                              MI_SRM_LRM_GLOBAL_GTT);
                intel_ring_emit_reg(ring, DERRMR);
-               intel_ring_emit(ring, req->ring->scratch.gtt_offset + 256);
+               intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
                if (IS_GEN8(req->i915)) {
                        intel_ring_emit(ring, 0);
                        intel_ring_emit(ring, MI_NOOP);
@@ -11310,7 +11310,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring,
                                                       false))
                return true;
        else
-               return ring != i915_gem_request_get_ring(obj->last_write_req);
+               return ring != i915_gem_request_get_engine(obj->last_write_req);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11654,7 +11654,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
                ring = &dev_priv->ring[BCS];
        } else if (INTEL_INFO(dev)->gen >= 7) {
-               ring = i915_gem_request_get_ring(obj->last_write_req);
+               ring = i915_gem_request_get_engine(obj->last_write_req);
                if (ring == NULL || ring->id != RCS)
                        ring = &dev_priv->ring[BCS];
        } else {
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4f1944929330..1b70a76df31d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -287,11 +287,9 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object 
*ctx_obj)
 
 static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ring->dev;
-
-       return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-               IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-              (ring->id == VCS || ring->id == VCS2);
+       return (IS_SKL_REVID(ring->dev, 0, SKL_REVID_B0) ||
+               IS_BXT_REVID(ring->dev, 0, BXT_REVID_A1)) &&
+               (ring->id == VCS || ring->id == VCS2);
 }
 
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
@@ -305,8 +303,8 @@ uint64_t intel_lr_context_descriptor(struct intel_context 
*ctx,
        WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
 
        desc = GEN8_CTX_VALID;
-       desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-       if (IS_GEN8(ctx_obj->base.dev))
+       desc |= GEN8_CTX_ADDRESSING_MODE(ring->i915) << 
GEN8_CTX_ADDRESSING_MODE_SHIFT;
+       if (IS_GEN8(ring->i915))
                desc |= GEN8_CTX_L3LLC_COHERENT;
        desc |= GEN8_CTX_PRIVILEGE;
        desc |= lrca;
@@ -328,41 +326,40 @@ static void execlists_elsp_write(struct 
drm_i915_gem_request *rq0,
                                 struct drm_i915_gem_request *rq1)
 {
 
-       struct intel_engine_cs *ring = rq0->ring;
+       struct intel_engine_cs *engine = rq0->engine;
        struct drm_i915_private *dev_priv = rq0->i915;
        uint64_t desc[2];
 
        if (rq1) {
-               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring);
+               desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine);
                rq1->elsp_submitted++;
        } else {
                desc[1] = 0;
        }
 
-       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring);
+       desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine);
        rq0->elsp_submitted++;
 
        /* You must always write both descriptors in the order below. */
        spin_lock(&dev_priv->uncore.lock);
        intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
-       I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1]));
-       I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1]));
+       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1]));
+       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1]));
 
-       I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0]));
+       I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0]));
        /* The context is automatically loaded after the following */
-       I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
+       I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0]));
 
        /* ELSP is a wo register, use another nearby reg for posting */
-       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
+       POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine));
        intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
        spin_unlock(&dev_priv->uncore.lock);
 }
 
 static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
-       struct intel_engine_cs *ring = rq->ring;
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+       struct drm_i915_gem_object *ctx_obj = 
rq->ctx->engine[rq->engine->id].state;
        struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
        struct page *page;
        uint32_t *reg_state;
@@ -377,7 +374,7 @@ static int execlists_update_context(struct 
drm_i915_gem_request *rq)
        reg_state[CTX_RING_TAIL+1] = rq->tail;
        reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
-       if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+       if (ppgtt && !USES_FULL_48BIT_PPGTT(rq->i915)) {
                /* True 32b PPGTT with dynamic page allocation: update PDP
                 * registers and point the unallocated PDPs to scratch page.
                 * PML4 is allocated during ppgtt init, so this is not needed
@@ -582,22 +579,22 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *ring = request->ring;
+       struct intel_engine_cs *engine = request->engine;
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
        i915_gem_request_get(request);
 
-       spin_lock_irq(&ring->execlist_lock);
+       spin_lock_irq(&engine->execlist_lock);
 
-       list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+       list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
                if (++num_elements > 2)
                        break;
 
        if (num_elements > 2) {
                struct drm_i915_gem_request *tail_req;
 
-               tail_req = list_last_entry(&ring->execlist_queue,
+               tail_req = list_last_entry(&engine->execlist_queue,
                                           struct drm_i915_gem_request,
                                           execlist_link);
 
@@ -606,41 +603,41 @@ static int execlists_context_queue(struct 
drm_i915_gem_request *request)
                                "More than 2 already-submitted reqs queued\n");
                        list_del(&tail_req->execlist_link);
                        list_add_tail(&tail_req->execlist_link,
-                               &ring->execlist_retired_req_list);
+                               &engine->execlist_retired_req_list);
                }
        }
 
-       list_add_tail(&request->execlist_link, &ring->execlist_queue);
+       list_add_tail(&request->execlist_link, &engine->execlist_queue);
        if (num_elements == 0)
-               execlists_context_unqueue(ring);
+               execlists_context_unqueue(engine);
 
-       spin_unlock_irq(&ring->execlist_lock);
+       spin_unlock_irq(&engine->execlist_lock);
 
        return 0;
 }
 
 static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        uint32_t flush_domains;
        int ret;
 
        flush_domains = 0;
-       if (ring->gpu_caches_dirty)
+       if (engine->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
                                 struct list_head *vmas)
 {
-       const unsigned other_rings = ~intel_ring_flag(req->ring);
+       const unsigned other_rings = ~intel_ring_flag(req->engine);
        struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
@@ -650,7 +647,7 @@ static int execlists_move_to_gpu(struct 
drm_i915_gem_request *req,
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->active & other_rings) {
-                       ret = i915_gem_object_sync(obj, req->ring, &req);
+                       ret = i915_gem_object_sync(obj, req->engine, &req);
                        if (ret)
                                return ret;
                }
@@ -674,9 +671,9 @@ int intel_logical_ring_alloc_request_extras(struct 
drm_i915_gem_request *request
 {
        int ret;
 
-       request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
+       request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
 
-       if (request->ctx != request->ring->default_context) {
+       if (request->ctx != request->engine->default_context) {
                ret = intel_lr_context_pin(request);
                if (ret)
                        return ret;
@@ -865,17 +862,17 @@ void intel_logical_ring_stop(struct intel_engine_cs *ring)
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       if (!ring->gpu_caches_dirty)
+       if (!engine->gpu_caches_dirty)
                return 0;
 
-       ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
+       ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
@@ -913,34 +910,33 @@ unpin_ctx_obj:
 
 static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
 {
-       int ret = 0;
-       struct intel_engine_cs *ring = rq->ring;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = rq->ringbuf;
+       int engine = rq->engine->id;
+       int ret;
 
-       if (rq->ctx->engine[ring->id].pin_count++ == 0) {
-               ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
-               if (ret)
-                       goto reset_pin_count;
+       if (rq->ctx->engine[engine].pin_count++)
+               return 0;
 
-               i915_gem_context_reference(rq->ctx);
+       ret = intel_lr_context_do_pin(rq->engine,
+                                     rq->ctx->engine[engine].state,
+                                     rq->ringbuf);
+       if (ret) {
+               rq->ctx->engine[engine].pin_count = 0;
+               return ret;
        }
-       return ret;
 
-reset_pin_count:
-       rq->ctx->engine[ring->id].pin_count = 0;
-       return ret;
+       i915_gem_context_reference(rq->ctx);
+       return 0;
 }
 
 void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-       struct intel_engine_cs *ring = rq->ring;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+       int engine = rq->engine->id;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[engine].state;
        struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
        if (ctx_obj) {
-               WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-               if (--rq->ctx->engine[ring->id].pin_count == 0) {
+               WARN_ON(!mutex_is_locked(&rq->i915->dev->struct_mutex));
+               if (--rq->ctx->engine[engine].pin_count == 0) {
                        intel_unpin_ringbuffer_obj(ringbuf);
                        i915_gem_object_ggtt_unpin(ctx_obj);
                        i915_gem_context_unreference(rq->ctx);
@@ -951,7 +947,7 @@ void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request 
*req)
 {
        int ret, i;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct intel_ringbuffer *ringbuf = req->ringbuf;
        struct drm_i915_private *dev_priv = req->i915;
        struct i915_workarounds *w = &dev_priv->workarounds;
@@ -959,7 +955,7 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        if (w->count == 0)
                return 0;
 
-       ring->gpu_caches_dirty = true;
+       engine->gpu_caches_dirty = true;
        ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -977,7 +973,7 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
 
        intel_ring_advance(ringbuf);
 
-       ring->gpu_caches_dirty = true;
+       engine->gpu_caches_dirty = true;
        ret = logical_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -1421,7 +1417,7 @@ static int gen9_init_render_ring(struct intel_engine_cs 
*ring)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
        struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct intel_ringbuffer *ringbuf = req->ringbuf;
        const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
        int i, ret;
@@ -1434,9 +1430,9 @@ static int intel_logical_ring_emit_pdps(struct 
drm_i915_gem_request *req)
        for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
                const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-               intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+               intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(engine, i));
                intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-               intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+               intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(engine, i));
                intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
        }
 
@@ -1460,7 +1456,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
         * not idle). PML4 is allocated during ppgtt init so this is
         * not needed in 48-bit.*/
        if (req->ctx->ppgtt &&
-           (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) {
+           (intel_ring_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
                if (!USES_FULL_48BIT_PPGTT(req->i915) &&
                    !intel_vgpu_active(req->i915->dev)) {
                        ret = intel_logical_ring_emit_pdps(req);
@@ -1468,7 +1464,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request 
*req,
                                return ret;
                }
 
-               req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring);
+               req->ctx->ppgtt->pd_dirty_rings &= 
~intel_ring_flag(req->engine);
        }
 
        ret = intel_ring_begin(req, 4);
@@ -1672,21 +1668,21 @@ static int intel_lr_context_render_state_init(struct 
drm_i915_gem_request *req)
        struct render_state so;
        int ret;
 
-       ret = i915_gem_render_state_prepare(req->ring, &so);
+       ret = i915_gem_render_state_prepare(req->engine, &so);
        if (ret)
                return ret;
 
        if (so.rodata == NULL)
                return 0;
 
-       ret = req->ring->emit_bb_start(req, so.ggtt_offset,
-                                      I915_DISPATCH_SECURE);
+       ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+                                        I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
-       ret = req->ring->emit_bb_start(req,
-                                      (so.ggtt_offset + so.aux_batch_offset),
-                                      I915_DISPATCH_SECURE);
+       ret = req->engine->emit_bb_start(req,
+                                        (so.ggtt_offset + so.aux_batch_offset),
+                                        I915_DISPATCH_SECURE);
        if (ret)
                goto out;
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index 5d4f6f3b67cd..40041bebc3dc 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -138,21 +138,21 @@ static const struct drm_i915_mocs_entry 
broxton_mocs_table[] = {
  *
  * Return: true if there are applicable MOCS settings for the device.
  */
-static bool get_mocs_settings(struct drm_device *dev,
+static bool get_mocs_settings(struct drm_i915_private *dev_priv,
                              struct drm_i915_mocs_table *table)
 {
        bool result = false;
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                table->size  = ARRAY_SIZE(skylake_mocs_table);
                table->table = skylake_mocs_table;
                result = true;
-       } else if (IS_BROXTON(dev)) {
+       } else if (IS_BROXTON(dev_priv)) {
                table->size  = ARRAY_SIZE(broxton_mocs_table);
                table->table = broxton_mocs_table;
                result = true;
        } else {
-               WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
+               WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
                          "Platform that should have a MOCS table does not.\n");
        }
 
@@ -316,13 +316,12 @@ int intel_rcs_context_init_mocs(struct 
drm_i915_gem_request *req)
        struct drm_i915_mocs_table t;
        int ret;
 
-       if (get_mocs_settings(req->ring->dev, &t)) {
-               struct drm_i915_private *dev_priv = req->i915;
+       if (get_mocs_settings(req->i915, &t)) {
                struct intel_engine_cs *ring;
                enum intel_ring_id ring_id;
 
                /* Program the control registers */
-               for_each_ring(ring, dev_priv, ring_id) {
+               for_each_ring(ring, req->i915, ring_id) {
                        ret = emit_mocs_control_table(req, &t, ring_id);
                        if (ret)
                                return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index db5c407f7720..072fd0fc7748 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -192,7 +192,7 @@ static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
        struct intel_ringbuffer *ring = req->ringbuf;
-       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * 
CACHELINE_BYTES;
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -229,7 +229,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 {
        struct intel_ringbuffer *ring = req->ringbuf;
        u32 flags = 0;
-       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * 
CACHELINE_BYTES;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -302,7 +302,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
 {
        struct intel_ringbuffer *ring = req->ringbuf;
        u32 flags = 0;
-       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * 
CACHELINE_BYTES;
        int ret;
 
        /*
@@ -386,7 +386,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
        u32 flags = 0;
-       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * 
CACHELINE_BYTES;
        int ret;
 
        flags |= PIPE_CONTROL_CS_STALL;
@@ -696,7 +696,7 @@ static int intel_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        if (w->count == 0)
                return 0;
 
-       req->ring->gpu_caches_dirty = true;
+       req->engine->gpu_caches_dirty = true;
        ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -714,7 +714,7 @@ static int intel_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
 
        intel_ring_advance(ring);
 
-       req->ring->gpu_caches_dirty = true;
+       req->engine->gpu_caches_dirty = true;
        ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -1205,7 +1205,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
-               u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
+               u64 gtt_offset = 
signaller_req->engine->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
@@ -1243,7 +1243,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
-               u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
+               u64 gtt_offset = 
signaller_req->engine->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
@@ -1279,7 +1279,7 @@ static int gen6_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(useless, dev_priv, i) {
-               i915_reg_t mbox_reg = 
signaller_req->ring->semaphore.mbox.signal[i];
+               i915_reg_t mbox_reg = 
signaller_req->engine->semaphore.mbox.signal[i];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1309,8 +1309,8 @@ gen6_add_request(struct drm_i915_gem_request *req)
        struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
-       if (req->ring->semaphore.signal)
-               ret = req->ring->semaphore.signal(req, 4);
+       if (req->engine->semaphore.signal)
+               ret = req->engine->semaphore.signal(req, 4);
        else
                ret = intel_ring_begin(req, 4);
 
@@ -1321,7 +1321,7 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, req->fence.seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       __intel_ring_advance(req->ring);
+       __intel_ring_advance(req->engine);
 
        return 0;
 }
@@ -1359,10 +1359,10 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
                                MI_SEMAPHORE_SAD_GTE_SDD);
        intel_ring_emit(waiter, seqno);
        intel_ring_emit(waiter,
-                       lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+                       lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->engine,
                                                       signaller->id)));
        intel_ring_emit(waiter,
-                       upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+                       upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->engine,
                                                       signaller->id)));
        intel_ring_advance(waiter);
        return 0;
@@ -1377,7 +1377,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
-       u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->ring->id];
+       u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
        int ret;
 
        /* Throughout all of the GEM code, seqno passed implies our current
@@ -1422,7 +1422,7 @@ static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
        struct intel_ringbuffer *ring = req->ringbuf;
-       u32 addr = req->ring->status_page.gfx_addr +
+       u32 addr = req->engine->status_page.gfx_addr +
                (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        u32 scratch_addr = addr;
        int ret;
@@ -1465,7 +1465,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(ring, addr | PIPE_CONTROL_GLOBAL_GTT);
        intel_ring_emit(ring, req->fence.seqno);
        intel_ring_emit(ring, 0);
-       __intel_ring_advance(req->ring);
+       __intel_ring_advance(req->engine);
 
        return 0;
 }
@@ -1575,7 +1575,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, req->fence.seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       __intel_ring_advance(req->ring);
+       __intel_ring_advance(req->engine);
 
        return 0;
 }
@@ -1686,7 +1686,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         unsigned dispatch_flags)
 {
        struct intel_ringbuffer *ring = req->ringbuf;
-       u32 cs_offset = req->ring->scratch.gtt_offset;
+       u32 cs_offset = req->engine->scratch.gtt_offset;
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -2082,7 +2082,7 @@ int intel_ring_idle(struct intel_engine_cs *ring)
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       request->ringbuf = request->ring->buffer;
+       request->ringbuf = request->engine->buffer;
        return 0;
 }
 
@@ -2136,7 +2136,7 @@ void intel_ring_reserved_space_end(struct 
intel_ringbuffer *ringbuf)
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
        struct intel_ringbuffer *ringbuf = req->ringbuf;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *target;
        unsigned space;
        int ret;
@@ -2147,7 +2147,7 @@ static int wait_for_space(struct drm_i915_gem_request 
*req, int bytes)
        /* The whole point of reserving space is to not wait! */
        WARN_ON(ringbuf->reserved_in_use);
 
-       list_for_each_entry(target, &ring->request_list, list) {
+       list_for_each_entry(target, &engine->request_list, list) {
                /*
                 * The request queue is per-engine, so can contain requests
                 * from multiple ringbuffers. Here, we must ignore any that
@@ -2163,7 +2163,7 @@ static int wait_for_space(struct drm_i915_gem_request 
*req, int bytes)
                        break;
        }
 
-       if (WARN_ON(&target->list == &ring->request_list))
+       if (WARN_ON(&target->list == &engine->request_list))
                return -ENOSPC;
 
        ret = i915_wait_request(target);
@@ -2836,40 +2836,40 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 int
 intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        int ret;
 
-       if (!ring->gpu_caches_dirty)
+       if (!engine->gpu_caches_dirty)
                return 0;
 
-       ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS);
+       ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
        trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
 int
 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_engine_cs *engine = req->engine;
        uint32_t flush_domains;
        int ret;
 
        flush_domains = 0;
-       if (ring->gpu_caches_dirty)
+       if (engine->gpu_caches_dirty)
                flush_domains = I915_GEM_GPU_DOMAINS;
 
-       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
+       ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
        if (ret)
                return ret;
 
        trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
 
-       ring->gpu_caches_dirty = false;
+       engine->gpu_caches_dirty = false;
        return 0;
 }
 
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to