From: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

A mix of Coccinelle, sed and manual editing.

v2: Rename engine->buffer as well.
v3: Rebased.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
Cc: Chris Wilson <ch...@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  14 +-
 drivers/gpu/drm/i915/i915_drv.h            |   4 +-
 drivers/gpu/drm/i915/i915_gem.c            |  26 ++--
 drivers/gpu/drm/i915/i915_gpu_error.c      |   6 +-
 drivers/gpu/drm/i915/i915_guc_submission.c |   4 +-
 drivers/gpu/drm/i915/i915_irq.c            |  12 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 239 +++++++++++++++--------------
 drivers/gpu/drm/i915/intel_lrc.h           |  14 +-
 drivers/gpu/drm/i915/intel_mocs.c          |  38 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 193 +++++++++++------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  33 ++--
 11 files changed, 295 insertions(+), 288 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
b/drivers/gpu/drm/i915/i915_debugfs.c
index ccdca2c7d799..9209250e50c5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1933,11 +1933,11 @@ static int i915_gem_framebuffer_info(struct seq_file 
*m, void *data)
 }
 
 static void describe_ctx_ringbuf(struct seq_file *m,
-                                struct intel_ringbuffer *ringbuf)
+                                struct intel_ringbuffer *ring)
 {
        seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: 
%d)",
-                  ringbuf->space, ringbuf->head, ringbuf->tail,
-                  ringbuf->last_retired_head);
+                  ring->space, ring->head, ring->tail,
+                  ring->last_retired_head);
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
@@ -1968,14 +1968,14 @@ static int i915_context_status(struct seq_file *m, void 
*unused)
                        for_each_engine(engine, dev_priv, i) {
                                struct drm_i915_gem_object *ctx_obj =
                                        ctx->engine[i].state;
-                               struct intel_ringbuffer *ringbuf =
-                                       ctx->engine[i].ringbuf;
+                               struct intel_ringbuffer *ring =
+                                       ctx->engine[i].ring;
 
                                seq_printf(m, "%s: ", engine->name);
                                if (ctx_obj)
                                        describe_obj(m, ctx_obj);
-                               if (ringbuf)
-                                       describe_ctx_ringbuf(m, ringbuf);
+                               if (ring)
+                                       describe_ctx_ringbuf(m, ring);
                                seq_putc(m, '\n');
                        }
                } else {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 00c41a4bde2a..1e8e207f72df 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -842,7 +842,7 @@ struct intel_context {
        /* Execlists */
        struct {
                struct drm_i915_gem_object *state;
-               struct intel_ringbuffer *ringbuf;
+               struct intel_ringbuffer *ring;
                int pin_count;
                struct i915_vma *lrc_vma;
                u64 lrc_desc;
@@ -2247,7 +2247,7 @@ struct drm_i915_gem_request {
         * context.
         */
        struct intel_context *ctx;
-       struct intel_ringbuffer *ringbuf;
+       struct intel_ringbuffer *ring;
 
        /** Batch buffer related to this request if any (used for
            error state dump only) */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a4e015530b0c..e0ec11075bd6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1423,7 +1423,7 @@ static void i915_gem_request_retire(struct 
drm_i915_gem_request *request)
         * Note this requires that we are always called in request
         * completion order.
         */
-       request->ringbuf->last_retired_head = request->postfix;
+       request->ring->last_retired_head = request->postfix;
 
        list_del_init(&request->list);
        i915_gem_request_remove_from_client(request);
@@ -2544,7 +2544,7 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
 {
        struct intel_engine_cs *engine;
        struct drm_i915_private *dev_priv;
-       struct intel_ringbuffer *ringbuf;
+       struct intel_ringbuffer *ring;
        u32 request_start;
        int ret;
 
@@ -2553,16 +2553,16 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
 
        engine = request->engine;
        dev_priv = engine->dev->dev_private;
-       ringbuf = request->ringbuf;
+       ring = request->ring;
 
        /*
         * To ensure that this call will not fail, space for its emissions
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       intel_ring_reserved_space_use(ringbuf);
+       intel_ring_reserved_space_use(ring);
 
-       request_start = intel_ring_get_tail(ringbuf);
+       request_start = intel_ring_get_tail(ring);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
@@ -2584,14 +2584,14 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       request->postfix = intel_ring_get_tail(ringbuf);
+       request->postfix = intel_ring_get_tail(ring);
 
        if (i915.enable_execlists)
                ret = engine->emit_request(request);
        else {
                ret = engine->add_request(request);
 
-               request->tail = intel_ring_get_tail(ringbuf);
+               request->tail = intel_ring_get_tail(ring);
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
@@ -2621,7 +2621,7 @@ void __i915_add_request(struct drm_i915_gem_request 
*request,
        intel_mark_busy(dev_priv->dev);
 
        /* Sanity check that the reserved size was large enough. */
-       intel_ring_reserved_space_end(ringbuf);
+       intel_ring_reserved_space_end(ring);
 }
 
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2781,7 +2781,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
-       intel_ring_reserved_space_cancel(req->ringbuf);
+       intel_ring_reserved_space_cancel(req->ring);
 
        i915_gem_request_unreference(req);
 }
@@ -2823,7 +2823,7 @@ static void i915_gem_reset_engine_status(struct 
drm_i915_private *dev_priv,
 static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
                                        struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *buffer;
+       struct intel_ringbuffer *ring;
 
        while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
@@ -2876,9 +2876,9 @@ static void i915_gem_reset_engine_cleanup(struct 
drm_i915_private *dev_priv,
         * upon reset is less than when we start. Do one more pass over
         * all the ringbuffers to reset last_retired_head.
         */
-       list_for_each_entry(buffer, &engine->buffers, link) {
-               buffer->last_retired_head = buffer->tail;
-               intel_ring_update_space(buffer);
+       list_for_each_entry(ring, &engine->buffers, link) {
+               ring->last_retired_head = ring->tail;
+               intel_ring_update_space(ring);
        }
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
b/drivers/gpu/drm/i915/i915_gpu_error.c
index 34397a67b09e..e79f6739e7b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1074,11 +1074,11 @@ static void i915_gem_record_rings(struct drm_device 
*dev,
                         * executed).
                         */
                        if (request)
-                               rbuf = request->ctx->engine[engine->id].ringbuf;
+                               rbuf = request->ctx->engine[engine->id].ring;
                        else
-                               rbuf = 
dev_priv->kernel_context->engine[engine->id].ringbuf;
+                               rbuf = 
dev_priv->kernel_context->engine[engine->id].ring;
                } else
-                       rbuf = engine->buffer;
+                       rbuf = engine->ring;
 
                error->ring[i].cpu_ring_head = rbuf->head;
                error->ring[i].cpu_ring_tail = rbuf->tail;
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c 
b/drivers/gpu/drm/i915/i915_guc_submission.c
index ae1f58d073f2..34225b57bac2 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -415,7 +415,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
                                (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-               obj = ctx->engine[i].ringbuf->obj;
+               obj = ctx->engine[i].ring->obj;
 
                lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
                lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
@@ -550,7 +550,7 @@ static int guc_add_workqueue_item(struct i915_guc_client 
*gc,
                                                             rq->engine);
 
        /* The GuC firmware wants the tail index in QWords, not bytes */
-       tail = rq->ringbuf->tail >> 3;
+       tail = rq->ring->tail >> 3;
        wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
        wqi->fence_id = 0; /*XXX: what fence to be here */
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8f3e3309c3ab..46c03e9e60ee 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2882,7 +2882,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 
*seqno)
         * Therefore, this function does not support execlist mode in its
         * current form. Just return NULL and move on.
         */
-       if (engine->buffer == NULL)
+       if (engine->ring == NULL)
                return NULL;
 
        ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
@@ -2906,10 +2906,10 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 
*seqno)
                 * our ring is smaller than what the hardware (and hence
                 * HEAD_ADDR) allows. Also handles wrap-around.
                 */
-               head &= engine->buffer->size - 1;
+               head &= engine->ring->size - 1;
 
                /* This here seems to blow up */
-               cmd = ioread32(engine->buffer->virtual_start + head);
+               cmd = ioread32(engine->ring->virtual_start + head);
                if (cmd == ipehr)
                        break;
 
@@ -2919,11 +2919,11 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 
*seqno)
        if (!i)
                return NULL;
 
-       *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
+       *seqno = ioread32(engine->ring->virtual_start + head + 4) + 1;
        if (INTEL_INFO(engine->dev)->gen >= 8) {
-               offset = ioread32(engine->buffer->virtual_start + head + 12);
+               offset = ioread32(engine->ring->virtual_start + head + 12);
                offset <<= 32;
-               offset = ioread32(engine->buffer->virtual_start + head + 8);
+               offset = ioread32(engine->ring->virtual_start + head + 8);
        }
        return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f72782200226..18e734d4c976 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -468,11 +468,11 @@ static void execlists_context_unqueue__locked(struct 
intel_engine_cs *engine)
                 * resubmit the request. See gen8_emit_request() for where we
                 * prepare the padding after the end of the request.
                 */
-               struct intel_ringbuffer *ringbuf;
+               struct intel_ringbuffer *ring;
 
-               ringbuf = req0->ctx->engine[engine->id].ringbuf;
+               ring = req0->ctx->engine[engine->id].ring;
                req0->tail += 8;
-               req0->tail &= ringbuf->size - 1;
+               req0->tail &= ring->size - 1;
        }
 
        execlists_submit_requests(req0, req1);
@@ -703,7 +703,7 @@ int intel_logical_ring_alloc_request_extras(struct 
drm_i915_gem_request *request
 {
        int ret = 0;
 
-       request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+       request->ring = request->ctx->engine[request->engine->id].ring;
 
        if (i915.enable_guc_submission) {
                /*
@@ -727,17 +727,17 @@ int intel_logical_ring_alloc_request_extras(struct 
drm_i915_gem_request *request
 static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
                                       int bytes)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ringbuffer *ring = req->ring;
        struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *target;
        unsigned space;
        int ret;
 
-       if (intel_ring_space(ringbuf) >= bytes)
+       if (intel_ring_space(ring) >= bytes)
                return 0;
 
        /* The whole point of reserving space is to not wait! */
-       WARN_ON(ringbuf->reserved_in_use);
+       WARN_ON(ring->reserved_in_use);
 
        list_for_each_entry(target, &engine->request_list, list) {
                /*
@@ -745,12 +745,12 @@ static int logical_ring_wait_for_space(struct 
drm_i915_gem_request *req,
                 * from multiple ringbuffers. Here, we must ignore any that
                 * aren't from the ringbuffer we're considering.
                 */
-               if (target->ringbuf != ringbuf)
+               if (target->ring != ring)
                        continue;
 
                /* Would completion of this request free enough space? */
-               space = __intel_ring_space(target->postfix, ringbuf->tail,
-                                          ringbuf->size);
+               space = __intel_ring_space(target->postfix, ring->tail,
+                                          ring->size);
                if (space >= bytes)
                        break;
        }
@@ -762,7 +762,7 @@ static int logical_ring_wait_for_space(struct 
drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       ringbuf->space = space;
+       ring->space = space;
        return 0;
 }
 
@@ -778,12 +778,12 @@ static int logical_ring_wait_for_space(struct 
drm_i915_gem_request *req,
 static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       struct intel_ringbuffer *ring = request->ring;
        struct drm_i915_private *dev_priv = request->i915;
        struct intel_engine_cs *engine = request->engine;
 
-       intel_logical_ring_advance(ringbuf);
-       request->tail = ringbuf->tail;
+       intel_logical_ring_advance(ring);
+       request->tail = ring->tail;
 
        /*
         * Here we add two extra NOOPs as padding to avoid
@@ -791,9 +791,9 @@ intel_logical_ring_advance_and_submit(struct 
drm_i915_gem_request *request)
         *
         * Caller must reserve WA_TAIL_DWORDS for us!
         */
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_emit(ring, MI_NOOP);
+       intel_logical_ring_emit(ring, MI_NOOP);
+       intel_logical_ring_advance(ring);
 
        if (intel_engine_stopped(engine))
                return 0;
@@ -817,32 +817,32 @@ intel_logical_ring_advance_and_submit(struct 
drm_i915_gem_request *request)
        return 0;
 }
 
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ring)
 {
        uint32_t __iomem *virt;
-       int rem = ringbuf->size - ringbuf->tail;
+       int rem = ring->size - ring->tail;
 
-       virt = ringbuf->virtual_start + ringbuf->tail;
+       virt = ring->virtual_start + ring->tail;
        rem /= 4;
        while (rem--)
                iowrite32(MI_NOOP, virt++);
 
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
+       ring->tail = 0;
+       intel_ring_update_space(ring);
 }
 
 static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
-       int remain_usable = ringbuf->effective_size - ringbuf->tail;
-       int remain_actual = ringbuf->size - ringbuf->tail;
+       struct intel_ringbuffer *ring = req->ring;
+       int remain_usable = ring->effective_size - ring->tail;
+       int remain_actual = ring->size - ring->tail;
        int ret, total_bytes, wait_bytes = 0;
        bool need_wrap = false;
 
-       if (ringbuf->reserved_in_use)
+       if (ring->reserved_in_use)
                total_bytes = bytes;
        else
-               total_bytes = bytes + ringbuf->reserved_size;
+               total_bytes = bytes + ring->reserved_size;
 
        if (unlikely(bytes > remain_usable)) {
                /*
@@ -858,9 +858,9 @@ static int logical_ring_prepare(struct drm_i915_gem_request 
*req, int bytes)
                         * falls off the end. So only need to to wait for the
                         * reserved size after flushing out the remainder.
                         */
-                       wait_bytes = remain_actual + ringbuf->reserved_size;
+                       wait_bytes = remain_actual + ring->reserved_size;
                        need_wrap = true;
-               } else if (total_bytes > ringbuf->space) {
+               } else if (total_bytes > ring->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
                }
@@ -872,7 +872,7 @@ static int logical_ring_prepare(struct drm_i915_gem_request 
*req, int bytes)
                        return ret;
 
                if (need_wrap)
-                       __wrap_ring_buffer(ringbuf);
+                       __wrap_ring_buffer(ring);
        }
 
        return 0;
@@ -908,7 +908,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request 
*req, int num_dwords)
        if (ret)
                return ret;
 
-       req->ringbuf->space -= num_dwords * sizeof(uint32_t);
+       req->ring->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
@@ -922,7 +922,8 @@ int intel_logical_ring_reserve_space(struct 
drm_i915_gem_request *request)
         * adding any commands to it then there might not actually be
         * sufficient room for the submission commands.
         */
-       intel_ring_reserved_space_reserve(request->ringbuf, 
MIN_SPACE_FOR_ADD_REQUEST);
+       intel_ring_reserved_space_reserve(request->ring,
+                                         MIN_SPACE_FOR_ADD_REQUEST);
 
        return intel_logical_ring_begin(request, 0);
 }
@@ -951,7 +952,7 @@ int intel_execlists_submission(struct 
i915_execbuffer_params *params,
        struct drm_device       *dev = params->dev;
        struct intel_engine_cs *engine = params->engine;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = 
params->ctx->engine[engine->id].ringbuf;
+       struct intel_ringbuffer *ring = params->ctx->engine[engine->id].ring;
        u64 exec_start;
        int instp_mode;
        u32 instp_mask;
@@ -998,11 +999,11 @@ int intel_execlists_submission(struct 
i915_execbuffer_params *params,
                if (ret)
                        return ret;
 
-               intel_logical_ring_emit(ringbuf, MI_NOOP);
-               intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-               intel_logical_ring_emit_reg(ringbuf, INSTPM);
-               intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-               intel_logical_ring_advance(ringbuf);
+               intel_logical_ring_emit(ring, MI_NOOP);
+               intel_logical_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_logical_ring_emit_reg(ring, INSTPM);
+               intel_logical_ring_emit(ring, instp_mask << 16 | instp_mode);
+               intel_logical_ring_advance(ring);
 
                dev_priv->relative_constants_mode = instp_mode;
        }
@@ -1093,7 +1094,7 @@ static int intel_lr_context_do_pin(struct intel_context 
*ctx,
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+       struct intel_ringbuffer *ring = ctx->engine[engine->id].ring;
        struct page *lrc_state_page;
        uint32_t *lrc_reg_state;
        int ret;
@@ -1111,14 +1112,14 @@ static int intel_lr_context_do_pin(struct intel_context 
*ctx,
                goto unpin_ctx_obj;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ring);
        if (ret)
                goto unpin_ctx_obj;
 
        ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
        intel_lr_context_descriptor_update(ctx, engine);
        lrc_reg_state = kmap(lrc_state_page);
-       lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
+       lrc_reg_state[CTX_RING_BUFFER_START+1] = ring->vma->node.start;
        ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
        ctx_obj->dirty = true;
 
@@ -1161,7 +1162,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
        WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
        if (--ctx->engine[engine->id].pin_count == 0) {
                kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
-               intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
+               intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ring);
                i915_gem_object_ggtt_unpin(ctx_obj);
                ctx->engine[engine->id].lrc_vma = NULL;
                ctx->engine[engine->id].lrc_desc = 0;
@@ -1175,7 +1176,7 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
 {
        int ret, i;
        struct intel_engine_cs *engine = req->engine;
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ringbuffer *ring = req->ring;
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_workarounds *w = &dev_priv->workarounds;
@@ -1192,14 +1193,14 @@ static int intel_logical_ring_workarounds_emit(struct 
drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+       intel_logical_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
        for (i = 0; i < w->count; i++) {
-               intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
-               intel_logical_ring_emit(ringbuf, w->reg[i].value);
+               intel_logical_ring_emit_reg(ring, w->reg[i].addr);
+               intel_logical_ring_emit(ring, w->reg[i].value);
        }
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_emit(ring, MI_NOOP);
 
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_advance(ring);
 
        engine->gpu_caches_dirty = true;
        ret = logical_ring_flush_all_caches(req);
@@ -1648,7 +1649,7 @@ static int intel_logical_ring_emit_pdps(struct 
drm_i915_gem_request *req)
 {
        struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
        struct intel_engine_cs *engine = req->engine;
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ringbuffer *ring = req->ring;
        const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
        int i, ret;
 
@@ -1656,20 +1657,20 @@ static int intel_logical_ring_emit_pdps(struct 
drm_i915_gem_request *req)
        if (ret)
                return ret;
 
-       intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+       intel_logical_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
        for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
                const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-               intel_logical_ring_emit_reg(ringbuf,
+               intel_logical_ring_emit_reg(ring,
                                            GEN8_RING_PDP_UDW(engine, i));
-               intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-               intel_logical_ring_emit_reg(ringbuf,
+               intel_logical_ring_emit(ring, upper_32_bits(pd_daddr));
+               intel_logical_ring_emit_reg(ring,
                                            GEN8_RING_PDP_LDW(engine, i));
-               intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+               intel_logical_ring_emit(ring, lower_32_bits(pd_daddr));
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_emit(ring, MI_NOOP);
+       intel_logical_ring_advance(ring);
 
        return 0;
 }
@@ -1677,7 +1678,7 @@ static int intel_logical_ring_emit_pdps(struct 
drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
                              u64 offset, unsigned dispatch_flags)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ringbuffer *ring = req->ring;
        bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
        int ret;
 
@@ -1704,14 +1705,14 @@ static int gen8_emit_bb_start(struct 
drm_i915_gem_request *req,
                return ret;
 
        /* FIXME(BDW): Address space and security selectors. */
-       intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
+       intel_logical_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
                                (ppgtt<<8) |
                                (dispatch_flags & I915_DISPATCH_RS ?
                                 MI_BATCH_RESOURCE_STREAMER : 0));
-       intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-       intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_emit(ring, lower_32_bits(offset));
+       intel_logical_ring_emit(ring, upper_32_bits(offset));
+       intel_logical_ring_emit(ring, MI_NOOP);
+       intel_logical_ring_advance(ring);
 
        return 0;
 }
@@ -1754,8 +1755,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request 
*request,
                           u32 invalidate_domains,
                           u32 unused)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *engine = ringbuf->engine;
+       struct intel_ringbuffer *ring = request->ring;
+       struct intel_engine_cs *engine = ring->engine;
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t cmd;
@@ -1780,13 +1781,13 @@ static int gen8_emit_flush(struct drm_i915_gem_request 
*request,
                        cmd |= MI_INVALIDATE_BSD;
        }
 
-       intel_logical_ring_emit(ringbuf, cmd);
-       intel_logical_ring_emit(ringbuf,
+       intel_logical_ring_emit(ring, cmd);
+       intel_logical_ring_emit(ring,
                                I915_GEM_HWS_SCRATCH_ADDR |
                                MI_FLUSH_DW_USE_GTT);
-       intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-       intel_logical_ring_emit(ringbuf, 0); /* value */
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_emit(ring, 0); /* upper addr */
+       intel_logical_ring_emit(ring, 0); /* value */
+       intel_logical_ring_advance(ring);
 
        return 0;
 }
@@ -1795,8 +1796,8 @@ static int gen8_emit_flush_render(struct 
drm_i915_gem_request *request,
                                  u32 invalidate_domains,
                                  u32 flush_domains)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *engine = ringbuf->engine;
+       struct intel_ringbuffer *ring = request->ring;
+       struct intel_engine_cs *engine = ring->engine;
        u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        bool vf_flush_wa = false;
        u32 flags = 0;
@@ -1834,21 +1835,21 @@ static int gen8_emit_flush_render(struct 
drm_i915_gem_request *request,
                return ret;
 
        if (vf_flush_wa) {
-               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-               intel_logical_ring_emit(ringbuf, 0);
-       }
-
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-       intel_logical_ring_emit(ringbuf, flags);
-       intel_logical_ring_emit(ringbuf, scratch_addr);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_advance(ringbuf);
+               intel_logical_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+               intel_logical_ring_emit(ring, 0);
+               intel_logical_ring_emit(ring, 0);
+               intel_logical_ring_emit(ring, 0);
+               intel_logical_ring_emit(ring, 0);
+               intel_logical_ring_emit(ring, 0);
+       }
+
+       intel_logical_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+       intel_logical_ring_emit(ring, flags);
+       intel_logical_ring_emit(ring, scratch_addr);
+       intel_logical_ring_emit(ring, 0);
+       intel_logical_ring_emit(ring, 0);
+       intel_logical_ring_emit(ring, 0);
+       intel_logical_ring_advance(ring);
 
        return 0;
 }
@@ -1906,7 +1907,7 @@ static inline u32 hws_seqno_address(struct 
intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       struct intel_ringbuffer *ring = request->ring;
        int ret;
 
        ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1916,21 +1917,21 @@ static int gen8_emit_request(struct 
drm_i915_gem_request *request)
        /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
        BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-       intel_logical_ring_emit(ringbuf,
+       intel_logical_ring_emit(ring,
                                (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-       intel_logical_ring_emit(ringbuf,
+       intel_logical_ring_emit(ring,
                                hws_seqno_address(request->engine) |
                                MI_FLUSH_DW_USE_GTT);
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
-       intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_emit(ring, 0);
+       intel_logical_ring_emit(ring, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ring, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ring, MI_NOOP);
        return intel_logical_ring_advance_and_submit(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       struct intel_ringbuffer *ring = request->ring;
        int ret;
 
        ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1941,15 +1942,15 @@ static int gen8_emit_request_render(struct 
drm_i915_gem_request *request)
         * need a prior CS_STALL, which is emitted by the flush
         * following the batch.
         */
-       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
-       intel_logical_ring_emit(ringbuf,
+       intel_logical_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ring,
                                (PIPE_CONTROL_GLOBAL_GTT_IVB |
                                 PIPE_CONTROL_CS_STALL |
                                 PIPE_CONTROL_QW_WRITE));
-       intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
-       intel_logical_ring_emit(ringbuf, 0);
-       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
-       intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       intel_logical_ring_emit(ring, hws_seqno_address(request->engine));
+       intel_logical_ring_emit(ring, 0);
+       intel_logical_ring_emit(ring, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ring, MI_USER_INTERRUPT);
        return intel_logical_ring_advance_and_submit(request);
 }
 
@@ -2017,7 +2018,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs 
*engine)
 
        dev_priv = engine->dev->dev_private;
 
-       if (engine->buffer) {
+       if (engine->ring) {
                intel_logical_ring_stop(engine);
                WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
        }
@@ -2075,7 +2076,7 @@ logical_ring_init(struct drm_device *dev, struct 
intel_engine_cs *engine)
        int ret;
 
        /* Intentionally left blank. */
-       engine->buffer = NULL;
+       engine->ring = NULL;
 
        engine->dev = dev;
        INIT_LIST_HEAD(&engine->active_list);
@@ -2359,7 +2360,7 @@ static u32 intel_lr_indirect_ctx_offset(struct 
intel_engine_cs *engine)
 static int
 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object 
*ctx_obj,
                    struct intel_engine_cs *engine,
-                   struct intel_ringbuffer *ringbuf)
+                   struct intel_ringbuffer *ring)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2414,7 +2415,7 @@ populate_lr_context(struct intel_context *ctx, struct 
drm_i915_gem_object *ctx_o
                       RING_START(engine->mmio_base), 0);
        ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
                       RING_CTL(engine->mmio_base),
-                      ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | 
RING_VALID);
+                      ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
        ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
                       RING_BBADDR_UDW(engine->mmio_base), 0);
        ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2512,19 +2513,19 @@ void intel_lr_context_free(struct intel_context *ctx)
        int i;
 
        for (i = I915_NUM_ENGINES; --i >= 0; ) {
-               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+               struct intel_ringbuffer *ring = ctx->engine[i].ring;
                struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
                if (!ctx_obj)
                        continue;
 
                if (ctx == ctx->i915->kernel_context) {
-                       intel_unpin_ringbuffer_obj(ringbuf);
+                       intel_unpin_ringbuffer_obj(ring);
                        i915_gem_object_ggtt_unpin(ctx_obj);
                }
 
                WARN_ON(ctx->engine[i].pin_count);
-               intel_ringbuffer_free(ringbuf);
+               intel_ringbuffer_free(ring);
                drm_gem_object_unreference(&ctx_obj->base);
        }
 }
@@ -2605,7 +2606,7 @@ int intel_lr_context_deferred_alloc(struct intel_context 
*ctx,
        struct drm_device *dev = engine->dev;
        struct drm_i915_gem_object *ctx_obj;
        uint32_t context_size;
-       struct intel_ringbuffer *ringbuf;
+       struct intel_ringbuffer *ring;
        int ret;
 
        WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
@@ -2622,19 +2623,19 @@ int intel_lr_context_deferred_alloc(struct 
intel_context *ctx,
                return -ENOMEM;
        }
 
-       ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
-       if (IS_ERR(ringbuf)) {
-               ret = PTR_ERR(ringbuf);
+       ring = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
+       if (IS_ERR(ring)) {
+               ret = PTR_ERR(ring);
                goto error_deref_obj;
        }
 
-       ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
+       ret = populate_lr_context(ctx, ctx_obj, engine, ring);
        if (ret) {
                DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
                goto error_ringbuf;
        }
 
-       ctx->engine[engine->id].ringbuf = ringbuf;
+       ctx->engine[engine->id].ring = ring;
        ctx->engine[engine->id].state = ctx_obj;
 
        if (ctx != ctx->i915->kernel_context && engine->init_context) {
@@ -2659,10 +2660,10 @@ int intel_lr_context_deferred_alloc(struct 
intel_context *ctx,
        return 0;
 
 error_ringbuf:
-       intel_ringbuffer_free(ringbuf);
+       intel_ringbuffer_free(ring);
 error_deref_obj:
        drm_gem_object_unreference(&ctx_obj->base);
-       ctx->engine[engine->id].ringbuf = NULL;
+       ctx->engine[engine->id].ring = NULL;
        ctx->engine[engine->id].state = NULL;
        return ret;
 }
@@ -2677,8 +2678,8 @@ void intel_lr_context_reset(struct drm_device *dev,
        for_each_engine(engine, dev_priv, i) {
                struct drm_i915_gem_object *ctx_obj =
                                ctx->engine[engine->id].state;
-               struct intel_ringbuffer *ringbuf =
-                               ctx->engine[engine->id].ringbuf;
+               struct intel_ringbuffer *ring =
+                               ctx->engine[engine->id].ring;
                uint32_t *reg_state;
                struct page *page;
 
@@ -2697,7 +2698,7 @@ void intel_lr_context_reset(struct drm_device *dev,
 
                kunmap_atomic(reg_state);
 
-               ringbuf->head = 0;
-               ringbuf->tail = 0;
+               ring->head = 0;
+               ring->tail = 0;
        }
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index a17cb12221ba..cb1a2db16cd1 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -69,25 +69,25 @@ int logical_ring_flush_all_caches(struct 
drm_i915_gem_request *req);
  *
  * The tail is only updated in our logical ringbuffer struct.
  */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
+static inline void intel_logical_ring_advance(struct intel_ringbuffer *ring)
 {
-       ringbuf->tail &= ringbuf->size - 1;
+       ring->tail &= ring->size - 1;
 }
 /**
  * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
  * @ringbuf: Ringbuffer to write to.
  * @data: DWORD to write.
  */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
+static inline void intel_logical_ring_emit(struct intel_ringbuffer *ring,
                                           u32 data)
 {
-       iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-       ringbuf->tail += 4;
+       iowrite32(data, ring->virtual_start + ring->tail);
+       ring->tail += 4;
 }
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer 
*ringbuf,
+static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ring,
                                               i915_reg_t reg)
 {
-       intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
+       intel_logical_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
 
 /* Logical Ring Contexts */
diff --git a/drivers/gpu/drm/i915/intel_mocs.c 
b/drivers/gpu/drm/i915/intel_mocs.c
index 3c725dde16ed..6e7e4ca0ec7f 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -191,9 +191,9 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, 
int index)
  */
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
                                   const struct drm_i915_mocs_table *table,
-                                  enum intel_engine_id ring)
+                                  enum intel_engine_id engine_id)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ringbuffer *ring = req->ring;
        unsigned int index;
        int ret;
 
@@ -206,12 +206,13 @@ static int emit_mocs_control_table(struct 
drm_i915_gem_request *req,
                return ret;
        }
 
-       intel_logical_ring_emit(ringbuf,
+       intel_logical_ring_emit(ring,
                                MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
        for (index = 0; index < table->size; index++) {
-               intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, 
index));
-               intel_logical_ring_emit(ringbuf,
+               intel_logical_ring_emit_reg(ring,
+                                           mocs_register(engine_id, index));
+               intel_logical_ring_emit(ring,
                                        table->table[index].control_value);
        }
 
@@ -224,12 +225,13 @@ static int emit_mocs_control_table(struct 
drm_i915_gem_request *req,
         * that value to all the used entries.
         */
        for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-               intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, 
index));
-               intel_logical_ring_emit(ringbuf, table->table[0].control_value);
+               intel_logical_ring_emit_reg(ring,
+                                           mocs_register(engine_id, index));
+               intel_logical_ring_emit(ring, table->table[0].control_value);
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_emit(ring, MI_NOOP);
+       intel_logical_ring_advance(ring);
 
        return 0;
 }
@@ -248,7 +250,7 @@ static int emit_mocs_control_table(struct 
drm_i915_gem_request *req,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
                                const struct drm_i915_mocs_table *table)
 {
-       struct intel_ringbuffer *ringbuf = req->ringbuf;
+       struct intel_ringbuffer *ring = req->ring;
        unsigned int count;
        unsigned int i;
        u32 value;
@@ -265,15 +267,15 @@ static int emit_mocs_l3cc_table(struct 
drm_i915_gem_request *req,
                return ret;
        }
 
-       intel_logical_ring_emit(ringbuf,
-                       MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+       intel_logical_ring_emit(ring,
+                               MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 
2));
 
        for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
                value = (table->table[count].l3cc_value & 0xffff) |
                        ((table->table[count + 1].l3cc_value & 0xffff) << 16);
 
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, value);
+               intel_logical_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+               intel_logical_ring_emit(ring, value);
        }
 
        if (table->size & 0x01) {
@@ -289,14 +291,14 @@ static int emit_mocs_l3cc_table(struct 
drm_i915_gem_request *req,
         * they are reserved by the hardware.
         */
        for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-               intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-               intel_logical_ring_emit(ringbuf, value);
+               intel_logical_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+               intel_logical_ring_emit(ring, value);
 
                value = filler;
        }
 
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+       intel_logical_ring_emit(ring, MI_NOOP);
+       intel_logical_ring_advance(ring);
 
        return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 015dc7db32b7..770169b42196 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -42,21 +42,21 @@ int __intel_ring_space(int head, int tail, int size)
        return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ringbuffer *ring)
 {
-       if (ringbuf->last_retired_head != -1) {
-               ringbuf->head = ringbuf->last_retired_head;
-               ringbuf->last_retired_head = -1;
+       if (ring->last_retired_head != -1) {
+               ring->head = ring->last_retired_head;
+               ring->last_retired_head = -1;
        }
 
-       ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
-                                           ringbuf->tail, ringbuf->size);
+       ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+                                           ring->tail, ring->size);
 }
 
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
+int intel_ring_space(struct intel_ringbuffer *ring)
 {
-       intel_ring_update_space(ringbuf);
-       return ringbuf->space;
+       intel_ring_update_space(ring);
+       return ring->space;
 }
 
 bool intel_engine_stopped(struct intel_engine_cs *engine)
@@ -67,11 +67,11 @@ bool intel_engine_stopped(struct intel_engine_cs *engine)
 
 static void __intel_ring_advance(struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       ringbuf->tail &= ringbuf->size - 1;
+       struct intel_ringbuffer *ring = engine->ring;
+       ring->tail &= ring->size - 1;
        if (intel_engine_stopped(engine))
                return;
-       engine->write_tail(engine, ringbuf->tail);
+       engine->write_tail(engine, ring->tail);
 }
 
 static int
@@ -559,8 +559,8 @@ static int init_ring_common(struct intel_engine_cs *engine)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       struct drm_i915_gem_object *obj = ringbuf->obj;
+       struct intel_ringbuffer *ring = engine->ring;
+       struct drm_i915_gem_object *obj = ring->obj;
        int ret = 0;
 
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -610,7 +610,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
        (void)I915_READ_HEAD(engine);
 
        I915_WRITE_CTL(engine,
-                       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+                       ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
                        | RING_VALID);
 
        /* If the head is still not zero, the ring is dead */
@@ -629,10 +629,10 @@ static int init_ring_common(struct intel_engine_cs 
*engine)
                goto out;
        }
 
-       ringbuf->last_retired_head = -1;
-       ringbuf->head = I915_READ_HEAD(engine);
-       ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-       intel_ring_update_space(ringbuf);
+       ring->last_retired_head = -1;
+       ring->head = I915_READ_HEAD(engine);
+       ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
+       intel_ring_update_space(ring);
 
        memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
 
@@ -2065,15 +2065,15 @@ static int init_phys_status_page(struct intel_engine_cs 
*engine)
        return 0;
 }
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ring)
 {
-       if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-               vunmap(ringbuf->virtual_start);
+       if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
+               vunmap(ring->virtual_start);
        else
-               iounmap(ringbuf->virtual_start);
-       ringbuf->virtual_start = NULL;
-       ringbuf->vma = NULL;
-       i915_gem_object_ggtt_unpin(ringbuf->obj);
+               iounmap(ring->virtual_start);
+       ring->virtual_start = NULL;
+       ring->vma = NULL;
+       i915_gem_object_ggtt_unpin(ring->obj);
 }
 
 static u32 *vmap_obj(struct drm_i915_gem_object *obj)
@@ -2098,10 +2098,10 @@ static u32 *vmap_obj(struct drm_i915_gem_object *obj)
 }
 
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
-                                    struct intel_ringbuffer *ringbuf)
+                                    struct intel_ringbuffer *ring)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_i915_gem_object *obj = ringbuf->obj;
+       struct drm_i915_gem_object *obj = ring->obj;
        int ret;
 
        if (HAS_LLC(dev_priv) && !obj->stolen) {
@@ -2115,8 +2115,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device 
*dev,
                        return ret;
                }
 
-               ringbuf->virtual_start = vmap_obj(obj);
-               if (ringbuf->virtual_start == NULL) {
+               ring->virtual_start = vmap_obj(obj);
+               if (ring->virtual_start == NULL) {
                        i915_gem_object_ggtt_unpin(obj);
                        return -ENOMEM;
                }
@@ -2134,42 +2134,43 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device 
*dev,
                /* Access through the GTT requires the device to be awake. */
                assert_rpm_wakelock_held(dev_priv);
 
-               ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base 
+
-                                                   
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
-               if (ringbuf->virtual_start == NULL) {
+               ring->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+                                                   
i915_gem_obj_ggtt_offset(obj),
+                                                   ring->size);
+               if (ring->virtual_start == NULL) {
                        i915_gem_object_ggtt_unpin(obj);
                        return -EINVAL;
                }
        }
 
-       ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+       ring->vma = i915_gem_obj_to_ggtt(obj);
 
        return 0;
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ring)
 {
-       drm_gem_object_unreference(&ringbuf->obj->base);
-       ringbuf->obj = NULL;
+       drm_gem_object_unreference(&ring->obj->base);
+       ring->obj = NULL;
 }
 
 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-                                     struct intel_ringbuffer *ringbuf)
+                                     struct intel_ringbuffer *ring)
 {
        struct drm_i915_gem_object *obj;
 
        obj = NULL;
        if (!HAS_LLC(dev))
-               obj = i915_gem_object_create_stolen(dev, ringbuf->size);
+               obj = i915_gem_object_create_stolen(dev, ring->size);
        if (obj == NULL)
-               obj = i915_gem_alloc_object(dev, ringbuf->size);
+               obj = i915_gem_alloc_object(dev, ring->size);
        if (obj == NULL)
                return -ENOMEM;
 
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
 
-       ringbuf->obj = obj;
+       ring->obj = obj;
 
        return 0;
 }
@@ -2225,10 +2226,10 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
 static int intel_init_ring_buffer(struct drm_device *dev,
                                  struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *ringbuf;
+       struct intel_ringbuffer *ring;
        int ret;
 
-       WARN_ON(engine->buffer);
+       WARN_ON(engine->ring);
 
        engine->dev = dev;
        INIT_LIST_HEAD(&engine->active_list);
@@ -2241,12 +2242,12 @@ static int intel_init_ring_buffer(struct drm_device 
*dev,
 
        init_waitqueue_head(&engine->irq_queue);
 
-       ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
-       if (IS_ERR(ringbuf)) {
-               ret = PTR_ERR(ringbuf);
+       ring = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
+       if (IS_ERR(ring)) {
+               ret = PTR_ERR(ring);
                goto error;
        }
-       engine->buffer = ringbuf;
+       engine->ring = ring;
 
        if (I915_NEED_GFX_HWS(dev)) {
                ret = init_status_page(engine);
@@ -2259,11 +2260,11 @@ static int intel_init_ring_buffer(struct drm_device 
*dev,
                        goto error;
        }
 
-       ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
+       ret = intel_pin_and_map_ringbuffer_obj(dev, ring);
        if (ret) {
                DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
                                engine->name, ret);
-               intel_destroy_ringbuffer_obj(ringbuf);
+               intel_destroy_ringbuffer_obj(ring);
                goto error;
        }
 
@@ -2287,13 +2288,13 @@ void intel_cleanup_engine(struct intel_engine_cs 
*engine)
 
        dev_priv = to_i915(engine->dev);
 
-       if (engine->buffer) {
+       if (engine->ring) {
                intel_stop_engine(engine);
                WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & 
MODE_IDLE) == 0);
 
-               intel_unpin_ringbuffer_obj(engine->buffer);
-               intel_ringbuffer_free(engine->buffer);
-               engine->buffer = NULL;
+               intel_unpin_ringbuffer_obj(engine->ring);
+               intel_ringbuffer_free(engine->ring);
+               engine->ring = NULL;
        }
 
        if (engine->cleanup)
@@ -2313,20 +2314,20 @@ void intel_cleanup_engine(struct intel_engine_cs 
*engine)
 
 static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
+       struct intel_ringbuffer *ring = engine->ring;
        struct drm_i915_gem_request *request;
        unsigned space;
        int ret;
 
-       if (intel_ring_space(ringbuf) >= n)
+       if (intel_ring_space(ring) >= n)
                return 0;
 
        /* The whole point of reserving space is to not wait! */
-       WARN_ON(ringbuf->reserved_in_use);
+       WARN_ON(ring->reserved_in_use);
 
        list_for_each_entry(request, &engine->request_list, list) {
-               space = __intel_ring_space(request->postfix, ringbuf->tail,
-                                          ringbuf->size);
+               space = __intel_ring_space(request->postfix, ring->tail,
+                                          ring->size);
                if (space >= n)
                        break;
        }
@@ -2338,22 +2339,22 @@ static int ring_wait_for_space(struct intel_engine_cs 
*engine, int n)
        if (ret)
                return ret;
 
-       ringbuf->space = space;
+       ring->space = space;
        return 0;
 }
 
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ring)
 {
        uint32_t __iomem *virt;
-       int rem = ringbuf->size - ringbuf->tail;
+       int rem = ring->size - ring->tail;
 
-       virt = ringbuf->virtual_start + ringbuf->tail;
+       virt = ring->virtual_start + ring->tail;
        rem /= 4;
        while (rem--)
                iowrite32(MI_NOOP, virt++);
 
-       ringbuf->tail = 0;
-       intel_ring_update_space(ringbuf);
+       ring->tail = 0;
+       intel_ring_update_space(ring);
 }
 
 int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2377,7 +2378,7 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       request->ringbuf = request->engine->buffer;
+       request->ring = request->engine->ring;
        return 0;
 }
 
@@ -2391,42 +2392,44 @@ int intel_ring_reserve_space(struct 
drm_i915_gem_request *request)
         * adding any commands to it then there might not actually be
         * sufficient room for the submission commands.
         */
-       intel_ring_reserved_space_reserve(request->ringbuf, 
MIN_SPACE_FOR_ADD_REQUEST);
+       intel_ring_reserved_space_reserve(request->ring,
+                                         MIN_SPACE_FOR_ADD_REQUEST);
 
        return intel_ring_begin(request, 0);
 }
 
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int 
size)
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ring,
+                                      int size)
 {
-       WARN_ON(ringbuf->reserved_size);
-       WARN_ON(ringbuf->reserved_in_use);
+       WARN_ON(ring->reserved_size);
+       WARN_ON(ring->reserved_in_use);
 
-       ringbuf->reserved_size = size;
+       ring->reserved_size = size;
 }
 
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ring)
 {
-       WARN_ON(ringbuf->reserved_in_use);
+       WARN_ON(ring->reserved_in_use);
 
-       ringbuf->reserved_size   = 0;
-       ringbuf->reserved_in_use = false;
+       ring->reserved_size   = 0;
+       ring->reserved_in_use = false;
 }
 
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ring)
 {
-       WARN_ON(ringbuf->reserved_in_use);
+       WARN_ON(ring->reserved_in_use);
 
-       ringbuf->reserved_in_use = true;
-       ringbuf->reserved_tail   = ringbuf->tail;
+       ring->reserved_in_use = true;
+       ring->reserved_tail   = ring->tail;
 }
 
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ring)
 {
-       WARN_ON(!ringbuf->reserved_in_use);
-       if (ringbuf->tail > ringbuf->reserved_tail) {
-               WARN(ringbuf->tail > ringbuf->reserved_tail + 
ringbuf->reserved_size,
+       WARN_ON(!ring->reserved_in_use);
+       if (ring->tail > ring->reserved_tail) {
+               WARN(ring->tail > ring->reserved_tail + ring->reserved_size,
                     "request reserved size too small: %d vs %d!\n",
-                    ringbuf->tail - ringbuf->reserved_tail, 
ringbuf->reserved_size);
+                    ring->tail - ring->reserved_tail, ring->reserved_size);
        } else {
                /*
                 * The ring was wrapped while the reserved space was in use.
@@ -2439,22 +2442,22 @@ void intel_ring_reserved_space_end(struct 
intel_ringbuffer *ringbuf)
                 */
        }
 
-       ringbuf->reserved_size   = 0;
-       ringbuf->reserved_in_use = false;
+       ring->reserved_size   = 0;
+       ring->reserved_in_use = false;
 }
 
 static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       int remain_usable = ringbuf->effective_size - ringbuf->tail;
-       int remain_actual = ringbuf->size - ringbuf->tail;
+       struct intel_ringbuffer *ring = engine->ring;
+       int remain_usable = ring->effective_size - ring->tail;
+       int remain_actual = ring->size - ring->tail;
        int ret, total_bytes, wait_bytes = 0;
        bool need_wrap = false;
 
-       if (ringbuf->reserved_in_use)
+       if (ring->reserved_in_use)
                total_bytes = bytes;
        else
-               total_bytes = bytes + ringbuf->reserved_size;
+               total_bytes = bytes + ring->reserved_size;
 
        if (unlikely(bytes > remain_usable)) {
                /*
@@ -2470,9 +2473,9 @@ static int __intel_ring_prepare(struct intel_engine_cs 
*engine, int bytes)
                         * falls off the end. So only need to to wait for the
                         * reserved size after flushing out the remainder.
                         */
-                       wait_bytes = remain_actual + ringbuf->reserved_size;
+                       wait_bytes = remain_actual + ring->reserved_size;
                        need_wrap = true;
-               } else if (total_bytes > ringbuf->space) {
+               } else if (total_bytes > ring->space) {
                        /* No wrapping required, just waiting. */
                        wait_bytes = total_bytes;
                }
@@ -2484,7 +2487,7 @@ static int __intel_ring_prepare(struct intel_engine_cs 
*engine, int bytes)
                        return ret;
 
                if (need_wrap)
-                       __wrap_ring_buffer(ringbuf);
+                       __wrap_ring_buffer(ring);
        }
 
        return 0;
@@ -2510,7 +2513,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
        if (ret)
                return ret;
 
-       engine->buffer->space -= num_dwords * sizeof(uint32_t);
+       engine->ring->space -= num_dwords * sizeof(uint32_t);
        return 0;
 }
 
@@ -2518,7 +2521,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
        struct intel_engine_cs *engine = req->engine;
-       int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / 
sizeof(uint32_t);
+       int num_dwords = (engine->ring->tail & (CACHELINE_BYTES - 1)) / 
sizeof(uint32_t);
        int ret;
 
        if (num_dwords == 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3e40f7bf2147..ce7337b0aa14 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -159,7 +159,7 @@ struct  intel_engine_cs {
        unsigned int guc_id;
        u32             mmio_base;
        struct          drm_device *dev;
-       struct intel_ringbuffer *buffer;
+       struct intel_ringbuffer *ring;
        struct list_head buffers;
 
        /*
@@ -434,8 +434,8 @@ intel_write_status_page(struct intel_engine_cs *engine,
 struct intel_ringbuffer *
 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
-                                    struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+                                    struct intel_ringbuffer *ring);
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ring);
 void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
 void intel_stop_engine(struct intel_engine_cs *engine);
@@ -448,9 +448,9 @@ int __must_check intel_ring_cacheline_align(struct 
drm_i915_gem_request *req);
 static inline void intel_ring_emit(struct intel_engine_cs *engine,
                                   u32 data)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
-       ringbuf->tail += 4;
+       struct intel_ringbuffer *ring = engine->ring;
+       iowrite32(data, ring->virtual_start + ring->tail);
+       ring->tail += 4;
 }
 static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
                                       i915_reg_t reg)
@@ -459,12 +459,12 @@ static inline void intel_ring_emit_reg(struct 
intel_engine_cs *engine,
 }
 static inline void intel_ring_advance(struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *ringbuf = engine->buffer;
-       ringbuf->tail &= ringbuf->size - 1;
+       struct intel_ringbuffer *ring = engine->ring;
+       ring->tail &= ring->size - 1;
 }
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-int intel_ring_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ringbuffer *ring);
+int intel_ring_space(struct intel_ringbuffer *ring);
 bool intel_engine_stopped(struct intel_engine_cs *engine);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
@@ -485,9 +485,9 @@ u64 intel_ring_get_active_head(struct intel_engine_cs 
*engine);
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ring)
 {
-       return ringbuf->tail;
+       return ring->tail;
 }
 
 /*
@@ -504,13 +504,14 @@ static inline u32 intel_ring_get_tail(struct 
intel_ringbuffer *ringbuf)
  * will always have sufficient room to do its stuff. The request creation
  * code calls this automatically.
  */
-void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int 
size);
+void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ring,
+                                      int size);
 /* Cancel the reservation, e.g. because the request is being discarded. */
-void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
+void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ring);
 /* Use the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
+void intel_ring_reserved_space_use(struct intel_ringbuffer *ring);
 /* Finish with the reserved space - for use by i915_add_request() only. */
-void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
+void intel_ring_reserved_space_end(struct intel_ringbuffer *ring);
 
 /* Legacy ringbuffer specific portion of reservation code: */
 int intel_ring_reserve_space(struct drm_i915_gem_request *request);
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to