From: Ville Syrjälä <ville.syrj...@linux.intel.com>

The FBC RT address is saved as part of the context, so track it
the same way in the driver to skip pointless LRIs. Also try
make sure we re-emit the render/blitter tracking stuff on driver
load and gpu reset.

Signed-off-by: Ville Syrjälä <ville.syrj...@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  1 +
 drivers/gpu/drm/i915/i915_gem.c            |  3 +++
 drivers/gpu/drm/i915/i915_gem_context.c    | 21 ++++++++++++++-------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 27 +++++++++++++++++----------
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 17 ++++++++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  7 +++++--
 6 files changed, 50 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0e615e..c6401ba 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -587,6 +587,7 @@ struct i915_ctx_hang_stats {
 #define DEFAULT_CONTEXT_ID 0
 struct intel_context {
        struct kref ref;
+       unsigned long fbc_address;
        int id;
        bool is_initialized;
        uint8_t remap_slice;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a1753dc..3fac2c6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2563,6 +2563,9 @@ static void i915_gem_reset_ring_cleanup(struct 
drm_i915_private *dev_priv,
        ring->preallocated_lazy_request = NULL;
        ring->outstanding_lazy_seqno = 0;
 
+       /* current state unknown so force a FBC RT address update */
+       ring->fbc_address = I915_FBC_RT_RESET;
+
        spin_lock_irq(&ring->lock);
        list_for_each_entry_safe(notify, next, &ring->notify_list, list) {
                intel_ring_notify_complete(notify);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
b/drivers/gpu/drm/i915/i915_gem_context.c
index 1aab053..c5e50f0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -273,6 +273,9 @@ __create_hw_context(struct drm_device *dev,
         * is no remap info, it will be a NOP. */
        ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
+       /* force a re-emit FBC RT address on first use */
+       ctx->fbc_address = I915_FBC_RT_RESET;
+
        return ctx;
 
 err_out:
@@ -359,6 +362,7 @@ err_destroy:
 void i915_gem_context_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_context *ctx;
        int i;
 
        /* Prevent the hardware from restoring the last context (which hung) on
@@ -386,6 +390,13 @@ void i915_gem_context_reset(struct drm_device *dev)
                i915_gem_context_reference(dctx);
                ring->last_context = dctx;
        }
+
+       /*
+        * FBC RT address update(s) may have been lost,
+        * just have everyone re-emit them.
+        */
+       list_for_each_entry(ctx, &dev_priv->context_list, link)
+               ctx->fbc_address = I915_FBC_RT_RESET;
 }
 
 int i915_gem_context_init(struct drm_device *dev)
@@ -588,13 +599,6 @@ mi_set_context(struct intel_engine_cs *ring,
 
        intel_ring_advance(ring);
 
-       /*
-        * FBC RT address is stored in the context, so we may have just
-        * restored it to an old value. Make sure we emit a new LRI
-        * to update the address.
-        */
-       ring->fbc_address_dirty = true;
-
        return ret;
 }
 
@@ -667,6 +671,9 @@ static int do_switch(struct intel_engine_cs *ring,
        if (ret)
                goto unpin_out;
 
+       if ((hw_flags & MI_RESTORE_INHIBIT) == 0)
+               ring->fbc_address = to->fbc_address;
+
        for (i = 0; i < MAX_L3_SLICES; i++) {
                if (!(to->remap_slice & (1<<i)))
                        continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7a328f6..046f62b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -956,29 +956,33 @@ i915_gem_validate_context(struct drm_device *dev, struct 
drm_file *file,
 
 static void
 i915_gem_execbuffer_mark_fbc_dirty(struct intel_engine_cs *ring,
+                                  struct intel_context *ctx,
                                   struct list_head *vmas)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct i915_vma *vma;
-       u32 fbc_address = -1;
+       unsigned long fbc_address = I915_FBC_RT_NONE;
 
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
 
                if (obj->base.pending_write_domain &&
                    obj == dev_priv->fbc.obj) {
-                       WARN_ON(fbc_address != -1 &&
-                               fbc_address != i915_gem_obj_ggtt_offset(obj));
-                       fbc_address = i915_gem_obj_ggtt_offset(obj);
+                       WARN_ON(fbc_address != I915_FBC_RT_NONE &&
+                               fbc_address != vma->node.start);
+                       fbc_address = vma->node.start;
                }
        }
 
-       /* need to nuke/cache_clean on IVB+? */
-       ring->fbc_dirty |= fbc_address != -1;
-
        /* need to update FBC tracking? */
-       ring->fbc_address_dirty |= fbc_address != ring->fbc_address;
-       ring->fbc_address = fbc_address;
+
+       /* simple yes/no is sufficient for !RCS */
+       if (ring->id != RCS && fbc_address != I915_FBC_RT_NONE)
+               ring->pending_fbc_address = 0;
+       else
+               ring->pending_fbc_address = fbc_address;
+
+       ring->fbc_dirty |= fbc_address != I915_FBC_RT_NONE;
 }
 
 static void
@@ -1349,7 +1353,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        else
                exec_start += i915_gem_obj_offset(batch_obj, vm);
 
-       i915_gem_execbuffer_mark_fbc_dirty(ring, &eb->vmas);
+       i915_gem_execbuffer_mark_fbc_dirty(ring, ctx, &eb->vmas);
 
        ret = i915_switch_context(ring, ctx);
        if (ret)
@@ -1359,6 +1363,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                goto err;
 
+       if (HAS_HW_CONTEXTS(dev) && ring->id == RCS)
+               ctx->fbc_address = ring->fbc_address;
+
        if (ring == &dev_priv->ring[RCS] &&
            mode != dev_priv->relative_constants_mode) {
                ret = intel_ring_begin(ring, 4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index dbfa280..6655eb5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -73,7 +73,7 @@ static int gen5_render_fbc_tracking(struct intel_engine_cs 
*ring)
 {
        int ret;
 
-       if (!ring->fbc_address_dirty)
+       if (ring->fbc_address == ring->pending_fbc_address)
                return 0;
 
        ret = intel_ring_begin(ring, 4);
@@ -83,14 +83,14 @@ static int gen5_render_fbc_tracking(struct intel_engine_cs 
*ring)
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, ILK_FBC_RT_BASE);
-       if (ring->fbc_address != -1)
-               intel_ring_emit(ring, ring->fbc_address |
+       if (ring->pending_fbc_address != I915_FBC_RT_NONE)
+               intel_ring_emit(ring, ring->pending_fbc_address |
                                ILK_FBC_FRONT_BUFFER | ILK_FBC_RT_VALID);
        else
                intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
-       ring->fbc_address_dirty = false;
+       ring->fbc_address = ring->pending_fbc_address;
 
        return 0;
 }
@@ -99,7 +99,7 @@ static int gen6_blt_fbc_tracking(struct intel_engine_cs *ring)
 {
        int ret;
 
-       if (!ring->fbc_address_dirty)
+       if (ring->fbc_address == ring->pending_fbc_address)
                return 0;
 
        ret = intel_ring_begin(ring, 4);
@@ -109,13 +109,13 @@ static int gen6_blt_fbc_tracking(struct intel_engine_cs 
*ring)
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, GEN6_BLITTER_ECOSKPD);
-       if (ring->fbc_address != -1)
+       if (ring->pending_fbc_address != I915_FBC_RT_NONE)
                intel_ring_emit(ring, 
_MASKED_BIT_ENABLE(GEN6_BLITTER_FBC_NOTIFY));
        else
                intel_ring_emit(ring, 
_MASKED_BIT_DISABLE(GEN6_BLITTER_FBC_NOTIFY));
        intel_ring_advance(ring);
 
-       ring->fbc_address_dirty = false;
+       ring->fbc_address = ring->pending_fbc_address;
 
        return 0;
 }
@@ -1505,6 +1505,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
        init_waitqueue_head(&ring->irq_queue);
 
+       /* current state unknown so force a FBC RT address update */
+       ring->fbc_address = I915_FBC_RT_RESET;
+
        INIT_LIST_HEAD(&ring->notify_list);
        spin_lock_init(&ring->lock);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 80406ab..b8f9fd2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -175,11 +175,14 @@ struct  intel_engine_cs {
         * Do we have some not yet emitted requests outstanding?
         */
        struct drm_i915_gem_request *preallocated_lazy_request;
+
+#define I915_FBC_RT_NONE 1 /* FBC RT disabled */
+#define I915_FBC_RT_RESET 2 /* force re-emit of FBC RT */
+       unsigned long fbc_address, pending_fbc_address;
+
        u32 outstanding_lazy_seqno;
-       u32 fbc_address;
        bool gpu_caches_dirty:1;
        bool fbc_dirty:1;
-       bool fbc_address_dirty:1;
 
        wait_queue_head_t irq_queue;
 
-- 
1.8.5.5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to