Both perform the same actions with more or less indirection, so just
unify the code.

Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c            |   2 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |   8 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  34 ++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  26 +++----
 drivers/gpu/drm/i915/intel_display.c       |  26 +++----
 drivers/gpu/drm/i915/intel_lrc.c           | 114 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_lrc.h           |  26 -------
 drivers/gpu/drm/i915/intel_mocs.c          |  30 ++++----
 drivers/gpu/drm/i915/intel_overlay.c       |  42 +++++------
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 101 ++++++++++++-------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  21 ++----
 11 files changed, 194 insertions(+), 236 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c2a1ec8abc11..247731672cb1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4068,7 +4068,7 @@ err:
 
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        struct drm_i915_private *dev_priv = req->i915;
        u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
        int i, ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 3e3b4bf3fed1..d58de7e084dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -519,7 +519,7 @@ i915_gem_context_get(struct drm_i915_file_private 
*file_priv, u32 id)
 static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
@@ -534,7 +534,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
         * itlb_before_ctx_switch.
         */
        if (IS_GEN6(req->i915)) {
-               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
+               ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
                if (ret)
                        return ret;
        }
@@ -562,7 +562,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
 
                        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
                        for_each_ring(signaller, req->i915, i) {
-                               if (signaller == ring)
+                               if (signaller == req->ring)
                                        continue;
 
                                intel_ring_emit_reg(ring, 
RING_PSMI_CTL(signaller->mmio_base));
@@ -587,7 +587,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 
hw_flags)
 
                        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
                        for_each_ring(signaller, req->i915, i) {
-                               if (signaller == ring)
+                               if (signaller == req->ring)
                                        continue;
 
                                intel_ring_emit_reg(ring, 
RING_PSMI_CTL(signaller->mmio_base));
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 78b462956c78..603a247ac333 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1146,14 +1146,12 @@ i915_gem_execbuffer_retire_commands(struct 
i915_execbuffer_params *params)
 }
 
 static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
-                           struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret, i;
 
-       if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
+       if (!IS_GEN7(req->i915) || req->ring->id != RCS) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
@@ -1231,9 +1229,8 @@ i915_gem_ringbuffer_submission(struct 
i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
                               struct list_head *vmas)
 {
-       struct drm_device *dev = params->dev;
-       struct intel_engine_cs *ring = params->ring;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ringbuffer *ring = params->request->ringbuf;
+       struct drm_i915_private *dev_priv = params->request->i915;
        u64 exec_start, exec_len;
        int instp_mode;
        u32 instp_mask;
@@ -1247,34 +1244,31 @@ i915_gem_ringbuffer_submission(struct 
i915_execbuffer_params *params,
        if (ret)
                return ret;
 
-       WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & 
(1<<ring->id),
-            "%s didn't clear reload\n", ring->name);
-
        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
        instp_mask = I915_EXEC_CONSTANTS_MASK;
        switch (instp_mode) {
        case I915_EXEC_CONSTANTS_REL_GENERAL:
        case I915_EXEC_CONSTANTS_ABSOLUTE:
        case I915_EXEC_CONSTANTS_REL_SURFACE:
-               if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+               if (instp_mode != 0 && params->ring->id != RCS) {
                        DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
                        return -EINVAL;
                }
 
                if (instp_mode != dev_priv->relative_constants_mode) {
-                       if (INTEL_INFO(dev)->gen < 4) {
+                       if (INTEL_INFO(dev_priv)->gen < 4) {
                                DRM_DEBUG("no rel constants on pre-gen4\n");
                                return -EINVAL;
                        }
 
-                       if (INTEL_INFO(dev)->gen > 5 &&
+                       if (INTEL_INFO(dev_priv)->gen > 5 &&
                            instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
                                DRM_DEBUG("rel surface constants mode invalid 
on gen5+\n");
                                return -EINVAL;
                        }
 
                        /* The HW changed the meaning on this bit on gen6 */
-                       if (INTEL_INFO(dev)->gen >= 6)
+                       if (INTEL_INFO(dev_priv)->gen >= 6)
                                instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
                }
                break;
@@ -1283,7 +1277,7 @@ i915_gem_ringbuffer_submission(struct 
i915_execbuffer_params *params,
                return -EINVAL;
        }
 
-       if (ring == &dev_priv->ring[RCS] &&
+       if (params->ring->id == RCS &&
            instp_mode != dev_priv->relative_constants_mode) {
                ret = intel_ring_begin(params->request, 4);
                if (ret)
@@ -1299,7 +1293,7 @@ i915_gem_ringbuffer_submission(struct 
i915_execbuffer_params *params,
        }
 
        if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-               ret = i915_reset_gen7_sol_offsets(dev, params->request);
+               ret = i915_reset_gen7_sol_offsets(params->request);
                if (ret)
                        return ret;
        }
@@ -1308,9 +1302,9 @@ i915_gem_ringbuffer_submission(struct 
i915_execbuffer_params *params,
        exec_start = params->batch_obj_vm_offset +
                     params->args_batch_start_offset;
 
-       ret = ring->dispatch_execbuffer(params->request,
-                                       exec_start, exec_len,
-                                       params->dispatch_flags);
+       ret = params->ring->dispatch_execbuffer(params->request,
+                                               exec_start, exec_len,
+                                               params->dispatch_flags);
        if (ret)
                return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c 
b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 224fe89baca3..98841b05f764 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -656,7 +656,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
                          unsigned entry,
                          dma_addr_t addr)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        BUG_ON(entry >= 4);
@@ -666,10 +666,10 @@ static int gen8_write_pdp(struct drm_i915_gem_request 
*req,
                return ret;
 
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry));
+       intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(req->ring, entry));
        intel_ring_emit(ring, upper_32_bits(addr));
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-       intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry));
+       intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(req->ring, entry));
        intel_ring_emit(ring, lower_32_bits(addr));
        intel_ring_advance(ring);
 
@@ -1648,11 +1648,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt 
*ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
                         struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -1661,9 +1661,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
                return ret;
 
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->ring));
        intel_ring_emit(ring, PP_DIR_DCLV_2G);
-       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->ring));
        intel_ring_emit(ring, get_pd_offset(ppgtt));
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
@@ -1685,11 +1685,11 @@ static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
                          struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        /* NB: TLBs must be flushed and invalidated before a switch */
-       ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+       ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
                return ret;
 
@@ -1698,16 +1698,16 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
                return ret;
 
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->ring));
        intel_ring_emit(ring, PP_DIR_DCLV_2G);
-       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring));
+       intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->ring));
        intel_ring_emit(ring, get_pd_offset(ppgtt));
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
        /* XXX: RCS is the only one to auto invalidate the TLBs? */
-       if (ring->id != RCS) {
-               ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 
I915_GEM_GPU_DOMAINS);
+       if (req->ring->id != RCS) {
+               ret = req->ring->flush(req, I915_GEM_GPU_DOMAINS, 
I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
diff --git a/drivers/gpu/drm/i915/intel_display.c 
b/drivers/gpu/drm/i915/intel_display.c
index e2822530af25..b28e783f6f04 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11052,7 +11052,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 flip_mask;
        int ret;
@@ -11087,7 +11087,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 flip_mask;
        int ret;
@@ -11119,8 +11119,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ringbuffer *ring = req->ringbuf;
+       struct drm_i915_private *dev_priv = req->i915;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
@@ -11158,8 +11158,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *ring = req->ring;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ringbuffer *ring = req->ringbuf;
+       struct drm_i915_private *dev_priv = req->i915;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pf, pipesrc;
        int ret;
@@ -11194,7 +11194,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
                                 struct drm_i915_gem_request *req,
                                 uint32_t flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t plane_bit = 0;
        int len, ret;
@@ -11215,14 +11215,14 @@ static int intel_gen7_queue_flip(struct drm_device 
*dev,
        }
 
        len = 4;
-       if (ring->id == RCS) {
+       if (req->ring->id == RCS) {
                len += 6;
                /*
                 * On Gen 8, SRM is now taking an extra dword to accommodate
                 * 48bits addresses, and we need a NOOP for the batch size to
                 * stay even.
                 */
-               if (IS_GEN8(dev))
+               if (IS_GEN8(req->i915))
                        len += 2;
        }
 
@@ -11253,21 +11253,21 @@ static int intel_gen7_queue_flip(struct drm_device 
*dev,
         * for the RCS also doesn't appear to drop events. Setting the DERRMR
         * to zero does lead to lockups within MI_DISPLAY_FLIP.
         */
-       if (ring->id == RCS) {
+       if (req->ring->id == RCS) {
                intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
                intel_ring_emit_reg(ring, DERRMR);
                intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
                                        DERRMR_PIPEB_PRI_FLIP_DONE |
                                        DERRMR_PIPEC_PRI_FLIP_DONE));
-               if (IS_GEN8(dev))
+               if (IS_GEN8(req->i915))
                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
                                              MI_SRM_LRM_GLOBAL_GTT);
                else
                        intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
                                              MI_SRM_LRM_GLOBAL_GTT);
                intel_ring_emit_reg(ring, DERRMR);
-               intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
-               if (IS_GEN8(dev)) {
+               intel_ring_emit(ring, req->ring->scratch.gtt_offset + 256);
+               if (IS_GEN8(req->i915)) {
                        intel_ring_emit(ring, 0);
                        intel_ring_emit(ring, MI_NOOP);
                }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index a369aa041522..dc4fc9d8612c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -754,7 +754,7 @@ intel_logical_ring_advance_and_submit(struct 
drm_i915_gem_request *request)
 {
        struct drm_i915_private *dev_priv = request->i915;
 
-       intel_logical_ring_advance(request->ringbuf);
+       intel_ring_advance(request->ringbuf);
        request->tail = request->ringbuf->tail;
 
        if (dev_priv->guc.execbuf_client)
@@ -932,11 +932,11 @@ int intel_execlists_submission(struct 
i915_execbuffer_params *params,
                if (ret)
                        return ret;
 
-               intel_logical_ring_emit(ringbuf, MI_NOOP);
-               intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-               intel_logical_ring_emit_reg(ringbuf, INSTPM);
-               intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-               intel_logical_ring_advance(ringbuf);
+               intel_ring_emit(ringbuf, MI_NOOP);
+               intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit_reg(ringbuf, INSTPM);
+               intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+               intel_ring_advance(ringbuf);
 
                dev_priv->relative_constants_mode = instp_mode;
        }
@@ -1108,14 +1108,14 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+       intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
        for (i = 0; i < w->count; i++) {
-               intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
-               intel_logical_ring_emit(ringbuf, w->reg[i].value);
+               intel_ring_emit_reg(ringbuf, w->reg[i].addr);
+               intel_ring_emit(ringbuf, w->reg[i].value);
        }
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_emit(ringbuf, MI_NOOP);
 
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_advance(ringbuf);
 
        ring->gpu_caches_dirty = true;
        ret = logical_ring_flush_all_caches(req);
@@ -1570,18 +1570,18 @@ static int intel_logical_ring_emit_pdps(struct 
drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+       intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
        for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
                const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-               intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, 
i));
-               intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-               intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, 
i));
-               intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+               intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i));
+               intel_ring_emit(ringbuf, upper_32_bits(pd_daddr));
+               intel_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i));
+               intel_ring_emit(ringbuf, lower_32_bits(pd_daddr));
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
@@ -1616,14 +1616,14 @@ static int gen8_emit_bb_start(struct 
drm_i915_gem_request *req,
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
-                               (ppgtt<<8) |
-                               (dispatch_flags & I915_DISPATCH_RS ?
-                                MI_BATCH_RESOURCE_STREAMER : 0));
-       intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-       intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+                       (ppgtt<<8) |
+                       (dispatch_flags & I915_DISPATCH_RS ?
+                        MI_BATCH_RESOURCE_STREAMER : 0));
+       intel_ring_emit(ringbuf, lower_32_bits(offset));
+       intel_ring_emit(ringbuf, upper_32_bits(offset));
+       intel_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
@@ -1674,13 +1674,13 @@ static int gen8_emit_flush(struct drm_i915_gem_request 
*request,
                        cmd |= MI_INVALIDATE_BSD;
        }
 
-       intel_logical_ring_emit(ringbuf, cmd);
-       intel_logical_ring_emit(ringbuf,
-                               I915_GEM_HWS_SCRATCH_ADDR |
-                               MI_FLUSH_DW_USE_GTT);
-       intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-       intel_logical_ring_emit(ringbuf, 0); /* value */
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, cmd);
+       intel_ring_emit(ringbuf,
+                       I915_GEM_HWS_SCRATCH_ADDR |
+                       MI_FLUSH_DW_USE_GTT);
+       intel_ring_emit(ringbuf, 0); /* upper addr */
+       intel_ring_emit(ringbuf, 0); /* value */
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
@@ -1727,21 +1727,21 @@ static int gen8_emit_flush_render(struct 
drm_i915_gem_request *request,
                return ret;
 
        if (vf_flush_wa) {
-               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
+               intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+               intel_ring_emit(ringbuf, 0);
+               intel_ring_emit(ringbuf, 0);
+               intel_ring_emit(ringbuf, 0);
+               intel_ring_emit(ringbuf, 0);
+               intel_ring_emit(ringbuf, 0);
        }
 
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-       intel_logical_ring_emit(ringbuf, flags);
-       intel_logical_ring_emit(ringbuf, scratch_addr);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+       intel_ring_emit(ringbuf, flags);
+       intel_ring_emit(ringbuf, scratch_addr);
+       intel_ring_emit(ringbuf, 0);
+       intel_ring_emit(ringbuf, 0);
+       intel_ring_emit(ringbuf, 0);
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
@@ -1786,23 +1786,23 @@ static int gen8_emit_request(struct 
drm_i915_gem_request *request)
        cmd = MI_STORE_DWORD_IMM_GEN4;
        cmd |= MI_GLOBAL_GTT;
 
-       intel_logical_ring_emit(ringbuf, cmd);
-       intel_logical_ring_emit(ringbuf,
-                               (ring->status_page.gfx_addr +
-                               (I915_GEM_HWS_INDEX << 
MI_STORE_DWORD_INDEX_SHIFT)));
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, request->fence.seqno);
-       intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_emit(ringbuf, cmd);
+       intel_ring_emit(ringbuf,
+                       (ring->status_page.gfx_addr +
+                        (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+       intel_ring_emit(ringbuf, 0);
+       intel_ring_emit(ringbuf, request->fence.seqno);
+       intel_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_ring_emit(ringbuf, MI_NOOP);
        intel_logical_ring_advance_and_submit(request);
 
        /*
         * Here we add two extra NOOPs as padding to avoid
         * lite restore of a context with HEAD==TAIL.
         */
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 1e58f2550777..9d4aa699e593 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,32 +63,6 @@ int intel_logical_rings_init(struct drm_device *dev);
 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
-       intel_ringbuffer_advance(ringbuf);
-}
-
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-                                          u32 data)
-{
-       intel_ringbuffer_emit(ringbuf, data);
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer 
*ringbuf,
-                                              i915_reg_t reg)
-{
-       intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
 
 /* Logical Ring Contexts */
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index fed7bea19cc9..d8a7fdc7baeb 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -206,13 +206,11 @@ static int emit_mocs_control_table(struct 
drm_i915_gem_request *req,
                return ret;
        }
 
-       intel_logical_ring_emit(ringbuf,
-                               MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+       intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
        for (index = 0; index < table->size; index++) {
-               intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, 
index));
-               intel_logical_ring_emit(ringbuf,
-                                       table->table[index].control_value);
+               intel_ring_emit_reg(ringbuf, mocs_register(ring, index));
+               intel_ring_emit(ringbuf, table->table[index].control_value);
        }
 
        /*
@@ -224,12 +222,12 @@ static int emit_mocs_control_table(struct 
drm_i915_gem_request *req,
         * that value to all the used entries.
         */
        for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-               intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, 
index));
-               intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+               intel_ring_emit_reg(ringbuf, mocs_register(ring, index));
+               intel_ring_emit(ringbuf, table->table[0].control_value);
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
@@ -265,15 +263,15 @@ static int emit_mocs_l3cc_table(struct 
drm_i915_gem_request *req,
                return ret;
        }
 
-       intel_logical_ring_emit(ringbuf,
+       intel_ring_emit(ringbuf,
                        MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
        for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
                value = (table->table[count].l3cc_value & 0xffff) |
                        ((table->table[count + 1].l3cc_value & 0xffff) << 16);
 
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, value);
+               intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+               intel_ring_emit(ringbuf, value);
        }
 
        if (table->size & 0x01) {
@@ -289,14 +287,14 @@ static int emit_mocs_l3cc_table(struct 
drm_i915_gem_request *req,
         * they are reserved by the hardware.
         */
        for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, value);
+               intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+               intel_ring_emit(ringbuf, value);
 
                value = filler;
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_ring_emit(ringbuf, MI_NOOP);
+       intel_ring_advance(ringbuf);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c 
b/drivers/gpu/drm/i915/intel_overlay.c
index 76f1980a7541..6dca0e470e61 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -252,11 +252,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
        overlay->active = true;
 
-       intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-       intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-       intel_ring_emit(ring, MI_NOOP);
-       intel_ring_advance(ring);
+       intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+       intel_ring_emit(req->ringbuf, overlay->flip_addr | OFC_UPDATE);
+       intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | 
MI_WAIT_FOR_OVERLAY_FLIP);
+       intel_ring_emit(req->ringbuf, MI_NOOP);
+       intel_ring_advance(req->ringbuf);
 
        return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -293,9 +293,9 @@ static int intel_overlay_continue(struct intel_overlay 
*overlay,
                return ret;
        }
 
-       intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-       intel_ring_emit(ring, flip_addr);
-       intel_ring_advance(ring);
+       intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+       intel_ring_emit(req->ringbuf, flip_addr);
+       intel_ring_advance(req->ringbuf);
 
        WARN_ON(overlay->last_flip_req);
        i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -360,22 +360,22 @@ static int intel_overlay_off(struct intel_overlay 
*overlay)
        }
 
        /* wait for overlay to go idle */
-       intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-       intel_ring_emit(ring, flip_addr);
-       intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+       intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+       intel_ring_emit(req->ringbuf, flip_addr);
+       intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | 
MI_WAIT_FOR_OVERLAY_FLIP);
        /* turn overlay off */
        if (IS_I830(dev)) {
                /* Workaround: Don't disable the overlay fully, since otherwise
                 * it dies on the next OVERLAY_ON cmd. */
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_emit(req->ringbuf, MI_NOOP);
+               intel_ring_emit(req->ringbuf, MI_NOOP);
+               intel_ring_emit(req->ringbuf, MI_NOOP);
        } else {
-               intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-               intel_ring_emit(ring, flip_addr);
-               intel_ring_emit(ring, MI_WAIT_FOR_EVENT | 
MI_WAIT_FOR_OVERLAY_FLIP);
+               intel_ring_emit(req->ringbuf, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+               intel_ring_emit(req->ringbuf, flip_addr);
+               intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | 
MI_WAIT_FOR_OVERLAY_FLIP);
        }
-       intel_ring_advance(ring);
+       intel_ring_advance(req->ringbuf);
 
        return intel_overlay_do_wait_request(overlay, req, 
intel_overlay_off_tail);
 }
@@ -433,9 +433,9 @@ static int intel_overlay_release_old_vid(struct 
intel_overlay *overlay)
                        return ret;
                }
 
-               intel_ring_emit(ring, MI_WAIT_FOR_EVENT | 
MI_WAIT_FOR_OVERLAY_FLIP);
-               intel_ring_emit(ring, MI_NOOP);
-               intel_ring_advance(ring);
+               intel_ring_emit(req->ringbuf, MI_WAIT_FOR_EVENT | 
MI_WAIT_FOR_OVERLAY_FLIP);
+               intel_ring_emit(req->ringbuf, MI_NOOP);
+               intel_ring_advance(req->ringbuf);
 
                ret = intel_overlay_do_wait_request(overlay, req,
                                                    
intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d17dd33ee94c..86c54584f64a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -71,7 +71,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        u32 cmd;
        int ret;
 
@@ -98,7 +98,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
                       u32      invalidate_domains,
                       u32      flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        u32 cmd;
        int ret;
 
@@ -191,8 +191,8 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       struct intel_ringbuffer *ring = req->ringbuf;
+       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -227,9 +227,9 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -279,7 +279,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 4);
@@ -300,9 +300,9 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
                       u32 invalidate_domains, u32 flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        u32 flags = 0;
-       u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+       u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
        /*
@@ -363,7 +363,7 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
                       u32 flags, u32 scratch_addr)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -688,15 +688,15 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-       int ret, i;
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        struct drm_i915_private *dev_priv = req->i915;
        struct i915_workarounds *w = &dev_priv->workarounds;
+       int ret, i;
 
        if (w->count == 0)
                return 0;
 
-       ring->gpu_caches_dirty = true;
+       req->ring->gpu_caches_dirty = true;
        ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -714,7 +714,7 @@ static int intel_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
 
        intel_ring_advance(ring);
 
-       ring->gpu_caches_dirty = true;
+       req->ring->gpu_caches_dirty = true;
        ret = intel_ring_flush_all_caches(req);
        if (ret)
                return ret;
@@ -1191,7 +1191,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request 
*signaller_req,
                           unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-       struct intel_engine_cs *signaller = signaller_req->ring;
+       struct intel_ringbuffer *signaller = signaller_req->ringbuf;
        struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
        int i, ret, num_rings;
@@ -1205,7 +1205,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
-               u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+               u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
@@ -1229,7 +1229,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request 
*signaller_req,
                           unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-       struct intel_engine_cs *signaller = signaller_req->ring;
+       struct intel_ringbuffer *signaller = signaller_req->ringbuf;
        struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *waiter;
        int i, ret, num_rings;
@@ -1243,7 +1243,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(waiter, dev_priv, i) {
-               u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+               u64 gtt_offset = signaller_req->ring->semaphore.signal_ggtt[i];
                if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
                        continue;
 
@@ -1264,7 +1264,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request 
*signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
                       unsigned int num_dwords)
 {
-       struct intel_engine_cs *signaller = signaller_req->ring;
+       struct intel_ringbuffer *signaller = signaller_req->ringbuf;
        struct drm_i915_private *dev_priv = signaller_req->i915;
        struct intel_engine_cs *useless;
        int i, ret, num_rings;
@@ -1279,7 +1279,7 @@ static int gen6_signal(struct drm_i915_gem_request 
*signaller_req,
                return ret;
 
        for_each_ring(useless, dev_priv, i) {
-               i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i];
+               i915_reg_t mbox_reg = 
signaller_req->ring->semaphore.mbox.signal[i];
 
                if (i915_mmio_reg_valid(mbox_reg)) {
                        intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1306,11 +1306,11 @@ static int gen6_signal(struct drm_i915_gem_request 
*signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
-       if (ring->semaphore.signal)
-               ret = ring->semaphore.signal(req, 4);
+       if (req->ring->semaphore.signal)
+               ret = req->ring->semaphore.signal(req, 4);
        else
                ret = intel_ring_begin(req, 4);
 
@@ -1321,15 +1321,14 @@ gen6_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, req->fence.seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       __intel_ring_advance(ring);
+       __intel_ring_advance(req->ring);
 
        return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
+static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private 
*dev_priv,
                                              u32 seqno)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        return dev_priv->last_seqno < seqno;
 }
 
@@ -1346,7 +1345,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
               struct intel_engine_cs *signaller,
               u32 seqno)
 {
-       struct intel_engine_cs *waiter = waiter_req->ring;
+       struct intel_ringbuffer *waiter = waiter_req->ringbuf;
        struct drm_i915_private *dev_priv = waiter_req->i915;
        int ret;
 
@@ -1360,9 +1359,11 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
                                MI_SEMAPHORE_SAD_GTE_SDD);
        intel_ring_emit(waiter, seqno);
        intel_ring_emit(waiter,
-                       lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+                       lower_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+                                                      signaller->id)));
        intel_ring_emit(waiter,
-                       upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+                       upper_32_bits(GEN8_WAIT_OFFSET(waiter_req->ring,
+                                                      signaller->id)));
        intel_ring_advance(waiter);
        return 0;
 }
@@ -1372,11 +1373,11 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
               struct intel_engine_cs *signaller,
               u32 seqno)
 {
-       struct intel_engine_cs *waiter = waiter_req->ring;
+       struct intel_ringbuffer *waiter = waiter_req->ringbuf;
        u32 dw1 = MI_SEMAPHORE_MBOX |
                  MI_SEMAPHORE_COMPARE |
                  MI_SEMAPHORE_REGISTER;
-       u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+       u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->ring->id];
        int ret;
 
        /* Throughout all of the GEM code, seqno passed implies our current
@@ -1392,7 +1393,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
                return ret;
 
        /* If seqno wrap happened, omit the wait with no-ops */
-       if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
+       if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
                intel_ring_emit(waiter, dw1 | wait_mbox);
                intel_ring_emit(waiter, seqno);
                intel_ring_emit(waiter, 0);
@@ -1420,7 +1421,7 @@ do {                                                      
                \
 static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        u32 addr = req->ring->status_page.gfx_addr +
                (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        u32 scratch_addr = addr;
@@ -1464,7 +1465,7 @@ pc_render_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(ring, addr | PIPE_CONTROL_GLOBAL_GTT);
        intel_ring_emit(ring, req->fence.seqno);
        intel_ring_emit(ring, 0);
-       __intel_ring_advance(ring);
+       __intel_ring_advance(req->ring);
 
        return 0;
 }
@@ -1547,7 +1548,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
               u32     invalidate_domains,
               u32     flush_domains)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 2);
@@ -1563,7 +1564,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 4);
@@ -1574,7 +1575,7 @@ i9xx_add_request(struct drm_i915_gem_request *req)
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, req->fence.seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       __intel_ring_advance(ring);
+       __intel_ring_advance(req->ring);
 
        return 0;
 }
@@ -1657,7 +1658,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 length,
                         unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 2);
@@ -1684,8 +1685,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 len,
                         unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
-       u32 cs_offset = ring->scratch.gtt_offset;
+       struct intel_ringbuffer *ring = req->ringbuf;
+       u32 cs_offset = req->ring->scratch.gtt_offset;
        int ret;
 
        ret = intel_ring_begin(req, 6);
@@ -1747,7 +1748,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
                         u64 offset, u32 len,
                         unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 2);
@@ -2256,8 +2257,8 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-       struct intel_engine_cs *ring = req->ring;
-       int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / 
sizeof(uint32_t);
+       struct intel_ringbuffer *ring = req->ringbuf;
+       int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / 
sizeof(uint32_t);
        int ret;
 
        if (num_dwords == 0)
@@ -2331,7 +2332,7 @@ static void gen6_bsd_ring_write_tail(struct 
intel_engine_cs *ring,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
                               u32 invalidate, u32 flush)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        uint32_t cmd;
        int ret;
 
@@ -2340,7 +2341,7 @@ static int gen6_bsd_ring_flush(struct 
drm_i915_gem_request *req,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (INTEL_INFO(ring->dev)->gen >= 8)
+       if (INTEL_INFO(req->i915)->gen >= 8)
                cmd += 1;
 
        /* We always require a command barrier so that subsequent
@@ -2361,7 +2362,7 @@ static int gen6_bsd_ring_flush(struct 
drm_i915_gem_request *req,
 
        intel_ring_emit(ring, cmd);
        intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-       if (INTEL_INFO(ring->dev)->gen >= 8) {
+       if (INTEL_INFO(req->i915)->gen >= 8) {
                intel_ring_emit(ring, 0); /* upper addr */
                intel_ring_emit(ring, 0); /* value */
        } else  {
@@ -2377,7 +2378,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request 
*req,
                              u64 offset, u32 len,
                              unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        bool ppgtt = USES_PPGTT(req->i915) &&
                        !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
@@ -2403,7 +2404,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request 
*req,
                             u64 offset, u32 len,
                             unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 2);
@@ -2428,7 +2429,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request 
*req,
                              u64 offset, u32 len,
                              unsigned dispatch_flags)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        int ret;
 
        ret = intel_ring_begin(req, 2);
@@ -2451,7 +2452,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request 
*req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
                           u32 invalidate, u32 flush)
 {
-       struct intel_engine_cs *ring = req->ring;
+       struct intel_ringbuffer *ring = req->ringbuf;
        uint32_t cmd;
        int ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7669a8d30f27..9c19a6ca8e7d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -468,29 +468,20 @@ int intel_ring_alloc_request_extras(struct 
drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ringbuffer_emit(struct intel_ringbuffer *rb,
-                                        u32 data)
+static inline void intel_ring_emit(struct intel_ringbuffer *rb,
+                                  u32 data)
 {
        *(uint32_t *)(rb->virtual_start + rb->tail) = data;
        rb->tail += 4;
 }
-static inline void intel_ringbuffer_advance(struct intel_ringbuffer *rb)
-{
-       rb->tail &= rb->size - 1;
-}
-static inline void intel_ring_emit(struct intel_engine_cs *ring,
-                                  u32 data)
-{
-       intel_ringbuffer_emit(ring->buffer, data);
-}
-static inline void intel_ring_emit_reg(struct intel_engine_cs *ring,
+static inline void intel_ring_emit_reg(struct intel_ringbuffer *rb,
                                       i915_reg_t reg)
 {
-       intel_ring_emit(ring, i915_mmio_reg_offset(reg));
+       intel_ring_emit(rb, i915_mmio_reg_offset(reg));
 }
-static inline void intel_ring_advance(struct intel_engine_cs *ring)
+static inline void intel_ring_advance(struct intel_ringbuffer *rb)
 {
-       intel_ringbuffer_advance(ring->buffer);
+       rb->tail &= rb->size - 1;
 }
 int __intel_ring_space(int head, int tail, int size);
 void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to