From: Dave Gordon <david.s.gor...@intel.com>

Keep a local copy of the request pointer in the _final() functions
rather than dereferencing the params block repeatedly.

v3: New patch in series.

For: VIZ-1587
Signed-off-by: Dave Gordon <david.s.gor...@intel.com>
Signed-off-by: John Harrison <john.c.harri...@intel.com>
Reviewed-by: Jesse Barnes <jbar...@virtuousgeek.org>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 13 +++++++------
 drivers/gpu/drm/i915/intel_lrc.c           | 11 ++++++-----
 2 files changed, 13 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3071a2b..7978dae 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1291,6 +1291,7 @@ i915_gem_ringbuffer_submission(struct 
i915_execbuffer_params *params,
 int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
 {
        struct drm_i915_private *dev_priv = params->dev->dev_private;
+       struct drm_i915_gem_request *req = params->request;
        struct intel_engine_cs  *ring = params->ring;
        u64 exec_start, exec_len;
        int ret;
@@ -1302,12 +1303,12 @@ int i915_gem_ringbuffer_submission_final(struct 
i915_execbuffer_params *params)
         * Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       ret = intel_ring_invalidate_all_caches(params->request);
+       ret = intel_ring_invalidate_all_caches(req);
        if (ret)
                return ret;
 
        /* Switch to the correct context for the batch */
-       ret = i915_switch_context(params->request);
+       ret = i915_switch_context(req);
        if (ret)
                return ret;
 
@@ -1316,7 +1317,7 @@ int i915_gem_ringbuffer_submission_final(struct 
i915_execbuffer_params *params)
 
        if (ring == &dev_priv->ring[RCS] &&
            params->instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_ring_begin(params->request, 4);
+               ret = intel_ring_begin(req, 4);
                if (ret)
                        return ret;
 
@@ -1330,7 +1331,7 @@ int i915_gem_ringbuffer_submission_final(struct 
i915_execbuffer_params *params)
        }
 
        if (params->args_flags & I915_EXEC_GEN7_SOL_RESET) {
-               ret = i915_reset_gen7_sol_offsets(params->dev, params->request);
+               ret = i915_reset_gen7_sol_offsets(params->dev, req);
                if (ret)
                        return ret;
        }
@@ -1342,13 +1343,13 @@ int i915_gem_ringbuffer_submission_final(struct 
i915_execbuffer_params *params)
        if (exec_len == 0)
                exec_len = params->batch_obj->base.size;
 
-       ret = ring->dispatch_execbuffer(params->request,
+       ret = ring->dispatch_execbuffer(req,
                                        exec_start, exec_len,
                                        params->dispatch_flags);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
+       trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
 
        i915_gem_execbuffer_retire_commands(params);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2246a16..12e8949 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -973,7 +973,8 @@ int intel_execlists_submission(struct 
i915_execbuffer_params *params,
 int intel_execlists_submission_final(struct i915_execbuffer_params *params)
 {
        struct drm_i915_private *dev_priv = params->dev->dev_private;
-       struct intel_ringbuffer *ringbuf = params->request->ringbuf;
+       struct drm_i915_gem_request *req = params->request;
+       struct intel_ringbuffer *ringbuf = req->ringbuf;
        struct intel_engine_cs *ring = params->ring;
        u64 exec_start;
        int ret;
@@ -985,13 +986,13 @@ int intel_execlists_submission_final(struct 
i915_execbuffer_params *params)
         * Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.
         */
-       ret = logical_ring_invalidate_all_caches(params->request);
+       ret = logical_ring_invalidate_all_caches(req);
        if (ret)
                return ret;
 
        if (ring == &dev_priv->ring[RCS] &&
            params->instp_mode != dev_priv->relative_constants_mode) {
-               ret = intel_logical_ring_begin(params->request, 4);
+               ret = intel_logical_ring_begin(req, 4);
                if (ret)
                        return ret;
 
@@ -1007,11 +1008,11 @@ int intel_execlists_submission_final(struct 
i915_execbuffer_params *params)
        exec_start = params->batch_obj_vm_offset +
                     params->args_batch_start_offset;
 
-       ret = ring->emit_bb_start(params->request, exec_start, 
params->dispatch_flags);
+       ret = ring->emit_bb_start(req, exec_start, params->dispatch_flags);
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
+       trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
 
        i915_gem_execbuffer_retire_commands(params);
 
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to