---
 drivers/gpu/drm/i915/i915_drv.h            |   10 +-
 drivers/gpu/drm/i915/i915_gem.c            |  166 +++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   33 +-----
 drivers/gpu/drm/i915/intel_overlay.c       |    4 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   68 ++++--------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |    7 +-
 6 files changed, 138 insertions(+), 150 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ebf946..ed970bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1134,18 +1134,21 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
+u32 __i915_gem_get_seqno(struct drm_device *dev);
+
 static inline u32
 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       return ring->outstanding_lazy_request = dev_priv->next_seqno;
+       if (ring->outstanding_lazy_request == 0)
+               ring->outstanding_lazy_request = 
__i915_gem_get_seqno(ring->dev);
+       return ring->outstanding_lazy_request;
 }
 
 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
                                           struct intel_ring_buffer *pipelined);
 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
@@ -1161,7 +1164,6 @@ void i915_gem_do_init(struct drm_device *dev,
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
-                                 struct drm_file *file,
                                  struct drm_i915_gem_request *request);
 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
                                   uint32_t seqno);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aa8a1b1..d8a0f7b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1640,13 +1640,11 @@ i915_gem_object_move_to_ring(struct drm_i915_gem_object 
*obj,
                if (request == NULL)
                        return -ENOMEM;
 
-               ret = i915_add_request(from, NULL, request);
+               ret = i915_add_request(from, request);
                if (ret) {
                        kfree(request);
                        return ret;
                }
-
-               seqno = request->seqno;
        }
 
        from->sync_seqno[idx] = seqno;
@@ -1759,11 +1757,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object 
*obj)
        return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
+static u32
 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
                               uint32_t flush_domains)
 {
        struct drm_i915_gem_object *obj, *next;
+       u32 seqno = 0;
 
        list_for_each_entry_safe(obj, next,
                                 &ring->gpu_write_list,
@@ -1771,21 +1770,37 @@ i915_gem_process_flushing_list(struct intel_ring_buffer 
*ring,
                if (obj->base.write_domain & flush_domains) {
                        uint32_t old_write_domain = obj->base.write_domain;
 
+                       seqno = i915_gem_next_request_seqno(ring);
+
                        obj->base.write_domain = 0;
                        list_del_init(&obj->gpu_write_list);
-                       i915_gem_object_move_to_active(obj, ring,
-                                                      
i915_gem_next_request_seqno(ring));
+                       i915_gem_object_move_to_active(obj, ring, seqno);
 
                        trace_i915_gem_object_change_domain(obj,
                                                            
obj->base.read_domains,
                                                            old_write_domain);
                }
        }
+
+       return seqno;
+}
+
+u32 __i915_gem_get_seqno(struct drm_device *dev)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       u32 seqno;
+
+       seqno = dev_priv->next_seqno;
+
+       /* reserve 0 for non-seqno */
+       if (++dev_priv->next_seqno == 0)
+               dev_priv->next_seqno = 1;
+
+       return seqno;
 }
 
 int
 i915_add_request(struct intel_ring_buffer *ring,
-                struct drm_file *file,
                 struct drm_i915_gem_request *request)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1793,11 +1808,10 @@ i915_add_request(struct intel_ring_buffer *ring,
        int was_empty;
        int ret;
 
-       BUG_ON(request == NULL);
-
-       ret = ring->add_request(ring, &seqno);
+       seqno = ring->outstanding_lazy_request;
+       ret = ring->add_request(ring, seqno);
        if (ret)
-           return ret;
+               return ret;
 
        trace_i915_gem_request_add(ring, seqno);
 
@@ -1807,17 +1821,18 @@ i915_add_request(struct intel_ring_buffer *ring,
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
 
-       if (file) {
-               struct drm_i915_file_private *file_priv = file->driver_priv;
+       if (ring->lazy_dispatch) {
+               struct drm_i915_file_private *file;
+
+               file = ring->lazy_dispatch->driver_priv;
 
-               spin_lock(&file_priv->mm.lock);
-               request->file_priv = file_priv;
-               list_add_tail(&request->client_list,
-                             &file_priv->mm.request_list);
-               spin_unlock(&file_priv->mm.lock);
+               spin_lock(&file->mm.lock);
+               request->file_priv = file;
+               list_add_tail(&request->client_list, &file->mm.request_list);
+               spin_unlock(&file->mm.lock);
        }
 
-       ring->outstanding_lazy_request = false;
+       ring->outstanding_lazy_request = 0;
 
        if (!dev_priv->mm.suspended) {
                mod_timer(&dev_priv->hangcheck_timer,
@@ -1932,17 +1947,50 @@ void i915_gem_reset(struct drm_device *dev)
        i915_gem_reset_fences(dev);
 }
 
+static bool
+i915_ring_outstanding_dispatch(struct intel_ring_buffer *ring)
+{
+       u32 last_request, last_dispatch;
+
+       if (!ring->outstanding_lazy_request)
+               return false;
+
+       if (list_empty(&ring->active_list))
+               return false;
+
+       if (list_empty(&ring->request_list))
+               return false;
+
+       last_request = list_entry(ring->request_list.prev,
+                                 struct drm_i915_gem_request,
+                                 list)->seqno;
+
+       last_dispatch = list_entry(ring->active_list.prev,
+                                  struct drm_i915_gem_object,
+                                  ring_list)->last_rendering_seqno;
+
+       return !i915_seqno_passed(last_request, last_dispatch);
+}
+
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-static void
+static bool
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
        uint32_t seqno;
        int i;
 
+       if (i915_ring_outstanding_dispatch(ring)) {
+               struct drm_i915_gem_request *request;
+
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request && i915_add_request(ring, request))
+                       kfree(request);
+       }
+
        if (list_empty(&ring->request_list))
-               return;
+               return false;
 
        WARN_ON(i915_verify_lists(ring->dev));
 
@@ -1995,12 +2043,15 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer 
*ring)
        }
 
        WARN_ON(i915_verify_lists(ring->dev));
+
+       return !list_empty(&ring->active_list);
 }
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
+       bool active = false;
        int i;
 
        if (!list_empty(&dev_priv->mm.deferred_free_list)) {
@@ -2018,7 +2069,9 @@ i915_gem_retire_requests(struct drm_device *dev)
        }
 
        for (i = 0; i < I915_NUM_RINGS; i++)
-               i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+               active |= i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+
+       return active;
 }
 
 static void
@@ -2026,7 +2079,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
 {
        drm_i915_private_t *dev_priv;
        struct drm_device *dev;
-       bool idle;
        int i;
 
        dev_priv = container_of(work, drm_i915_private_t,
@@ -2039,31 +2091,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
                return;
        }
 
-       i915_gem_retire_requests(dev);
-
        /* Send a periodic flush down the ring so we don't hold onto GEM
         * objects indefinitely.
         */
-       idle = true;
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_ring_buffer *ring = &dev_priv->ring[i];
+               int ret;
 
-               if (!list_empty(&ring->gpu_write_list)) {
-                       struct drm_i915_gem_request *request;
-                       int ret;
-
+               if (!list_empty(&ring->gpu_write_list))
                        ret = i915_gem_flush_ring(ring,
                                                  0, I915_GEM_GPU_DOMAINS);
-                       request = kzalloc(sizeof(*request), GFP_KERNEL);
-                       if (ret || request == NULL ||
-                           i915_add_request(ring, NULL, request))
-                           kfree(request);
-               }
-
-               idle &= list_empty(&ring->request_list);
+               (void)ret;
        }
 
-       if (!dev_priv->mm.suspended && !idle)
+       if (i915_gem_retire_requests(dev))
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 
        mutex_unlock(&dev->struct_mutex);
@@ -2103,13 +2144,11 @@ i915_wait_request(struct intel_ring_buffer *ring,
                if (request == NULL)
                        return -ENOMEM;
 
-               ret = i915_add_request(ring, NULL, request);
+               ret = i915_add_request(ring, request);
                if (ret) {
                        kfree(request);
                        return ret;
                }
-
-               seqno = request->seqno;
        }
 
        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
@@ -2254,23 +2293,32 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 }
 
 int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
-                   uint32_t invalidate_domains,
-                   uint32_t flush_domains)
+i915_gem_flush_ring(struct intel_ring_buffer *ring, u32 invalidate, u32 flush)
 {
        int ret;
 
-       if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
+       if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
-       trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+       trace_i915_gem_ring_flush(ring, invalidate, flush);
 
-       ret = ring->flush(ring, invalidate_domains, flush_domains);
+       ret = ring->flush(ring, &invalidate, &flush);
        if (ret)
                return ret;
 
-       if (flush_domains & I915_GEM_GPU_DOMAINS)
-               i915_gem_process_flushing_list(ring, flush_domains);
+       if (flush & I915_GEM_GPU_DOMAINS &&
+           i915_gem_process_flushing_list(ring, flush)) {
+               struct drm_i915_gem_request *request;
+
+               request = kzalloc(sizeof(*request), GFP_KERNEL);
+               if (request) {
+                       ret = i915_add_request(ring, request);
+                       if (ret) {
+                               kfree(request);
+                               return ret;
+                       }
+               }
+       }
 
        return 0;
 }
@@ -2283,8 +2331,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
                return 0;
 
        if (!list_empty(&ring->gpu_write_list)) {
-               ret = i915_gem_flush_ring(ring,
-                                   I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+               ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
                if (ret)
                        return ret;
        }
@@ -3501,22 +3548,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
                 * use this buffer rather sooner than later, so issuing the 
required
                 * flush earlier is beneficial.
                 */
-               if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+               if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
                        ret = i915_gem_flush_ring(obj->ring,
                                                  0, obj->base.write_domain);
-               } else if (obj->ring->outstanding_lazy_request ==
-                          obj->last_rendering_seqno) {
-                       struct drm_i915_gem_request *request;
-
-                       /* This ring is not being cleared by active usage,
-                        * so emit a request to do so.
-                        */
-                       request = kzalloc(sizeof(*request), GFP_KERNEL);
-                       if (request)
-                               ret = i915_add_request(obj->ring, NULL,request);
-                       else
-                               ret = -ENOMEM;
-               }
 
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ae24eb9..60aaf99 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -873,36 +873,6 @@ i915_gem_execbuffer_move_to_active(struct list_head 
*objects,
        }
 }
 
-static void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-                                   struct drm_file *file,
-                                   struct intel_ring_buffer *ring)
-{
-       struct drm_i915_gem_request *request;
-       u32 invalidate;
-
-       /*
-        * Ensure that the commands in the batch buffer are
-        * finished before the interrupt fires.
-        *
-        * The sampler always gets flushed on i965 (sigh).
-        */
-       invalidate = I915_GEM_DOMAIN_COMMAND;
-       if (INTEL_INFO(dev)->gen >= 4)
-               invalidate |= I915_GEM_DOMAIN_SAMPLER;
-       if (ring->flush(ring, invalidate, 0)) {
-               i915_gem_next_request_seqno(ring);
-               return;
-       }
-
-       /* Add a breadcrumb for the completion of the batch buffer */
-       request = kzalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL || i915_add_request(ring, file, request)) {
-               i915_gem_next_request_seqno(ring);
-               kfree(request);
-       }
-}
-
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
@@ -1089,6 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto err;
        }
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+       batch_obj->base.pending_write_domain = I915_GEM_DOMAIN_COMMAND;
 
        ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
        if (ret)
@@ -1132,7 +1103,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
-       i915_gem_execbuffer_retire_commands(dev, file, ring);
+       ring->lazy_dispatch = file;
 
 err:
        eb_destroy(eb);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c 
b/drivers/gpu/drm/i915/intel_overlay.c
index a670c00..25edf0e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -220,7 +220,7 @@ static int intel_overlay_do_wait_request(struct 
intel_overlay *overlay,
        int ret;
 
        BUG_ON(overlay->last_flip_req);
-       ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+       ret = i915_add_request(LP_RING(dev_priv), request);
        if (ret) {
            kfree(request);
            return ret;
@@ -361,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay 
*overlay,
        OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-       ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+       ret = i915_add_request(LP_RING(dev_priv), request);
        if (ret) {
                kfree(request);
                return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e9e6f71..cd34e43 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -42,24 +42,9 @@ static inline int ring_space(struct intel_ring_buffer *ring)
        return space;
 }
 
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 seqno;
-
-       seqno = dev_priv->next_seqno;
-
-       /* reserve 0 for non-seqno */
-       if (++dev_priv->next_seqno == 0)
-               dev_priv->next_seqno = 1;
-
-       return seqno;
-}
-
 static int
 render_ring_flush(struct intel_ring_buffer *ring,
-                 u32   invalidate_domains,
-                 u32   flush_domains)
+                 u32 *invalidate, u32 *flush)
 {
        struct drm_device *dev = ring->dev;
        u32 cmd;
@@ -94,21 +79,23 @@ render_ring_flush(struct intel_ring_buffer *ring,
         */
 
        cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-       if ((invalidate_domains|flush_domains) &
-           I915_GEM_DOMAIN_RENDER)
+       if ((*invalidate | *flush) & I915_GEM_DOMAIN_RENDER) {
                cmd &= ~MI_NO_WRITE_FLUSH;
+               *flush |= I915_GEM_DOMAIN_RENDER;
+       }
+
        if (INTEL_INFO(dev)->gen < 4) {
                /*
                 * On the 965, the sampler cache always gets flushed
                 * and this bit is reserved.
                 */
-               if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+               if (*invalidate & I915_GEM_DOMAIN_SAMPLER)
                        cmd |= MI_READ_FLUSH;
        }
-       if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+       if (*invalidate & I915_GEM_DOMAIN_INSTRUCTION)
                cmd |= MI_EXE_FLUSH;
 
-       if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+       if (*invalidate & I915_GEM_DOMAIN_COMMAND &&
            (IS_G4X(dev) || IS_GEN5(dev)))
                cmd |= MI_INVALIDATE_ISP;
 
@@ -120,6 +107,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
+       *flush |= I915_GEM_DOMAIN_COMMAND;
        return 0;
 }
 
@@ -336,16 +324,14 @@ update_semaphore(struct intel_ring_buffer *ring, int i, 
u32 seqno)
 
 static int
 gen6_add_request(struct intel_ring_buffer *ring,
-                u32 *result)
+                u32 seqno)
 {
-       u32 seqno;
        int ret;
 
        ret = intel_ring_begin(ring, 10);
        if (ret)
                return ret;
 
-       seqno = i915_gem_get_seqno(ring->dev);
        update_semaphore(ring, 0, seqno);
        update_semaphore(ring, 1, seqno);
 
@@ -355,7 +341,6 @@ gen6_add_request(struct intel_ring_buffer *ring,
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
-       *result = seqno;
        return 0;
 }
 
@@ -394,10 +379,8 @@ do {                                                       
                \
 
 static int
 pc_render_add_request(struct intel_ring_buffer *ring,
-                     u32 *result)
+                     u32 seqno)
 {
-       struct drm_device *dev = ring->dev;
-       u32 seqno = i915_gem_get_seqno(dev);
        struct pipe_control *pc = ring->private;
        u32 scratch_addr = pc->gtt_offset + 128;
        int ret;
@@ -438,16 +421,13 @@ pc_render_add_request(struct intel_ring_buffer *ring,
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
-       *result = seqno;
        return 0;
 }
 
 static int
 render_ring_add_request(struct intel_ring_buffer *ring,
-                       u32 *result)
+                       u32 seqno)
 {
-       struct drm_device *dev = ring->dev;
-       u32 seqno = i915_gem_get_seqno(dev);
        int ret;
 
        ret = intel_ring_begin(ring, 4);
@@ -460,7 +440,6 @@ render_ring_add_request(struct intel_ring_buffer *ring,
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
-       *result = seqno;
        return 0;
 }
 
@@ -561,8 +540,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer 
*ring)
 
 static int
 bsd_ring_flush(struct intel_ring_buffer *ring,
-              u32     invalidate_domains,
-              u32     flush_domains)
+              u32 *invalidate, u32 *flush)
 {
        int ret;
 
@@ -573,29 +551,27 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
        intel_ring_emit(ring, MI_FLUSH);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
+
+       *flush |= I915_GEM_GPU_DOMAINS;
        return 0;
 }
 
 static int
 ring_add_request(struct intel_ring_buffer *ring,
-                u32 *result)
+                u32 seqno)
 {
-       u32 seqno;
        int ret;
 
        ret = intel_ring_begin(ring, 4);
        if (ret)
                return ret;
 
-       seqno = i915_gem_get_seqno(ring->dev);
-
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
        intel_ring_emit(ring, seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
-       *result = seqno;
        return 0;
 }
 
@@ -1046,7 +1022,7 @@ static void gen6_bsd_ring_write_tail(struct 
intel_ring_buffer *ring,
 }
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
-                          u32 invalidate, u32 flush)
+                          u32 *invalidate, u32 *flush)
 {
        uint32_t cmd;
        int ret;
@@ -1056,13 +1032,15 @@ static int gen6_ring_flush(struct intel_ring_buffer 
*ring,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (invalidate & I915_GEM_GPU_DOMAINS)
+       if (*invalidate & I915_GEM_GPU_DOMAINS)
                cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
        intel_ring_emit(ring, cmd);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
+
+       *flush |= I915_GEM_GPU_DOMAINS;
        return 0;
 }
 
@@ -1217,7 +1195,7 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
 }
 
 static int blt_ring_flush(struct intel_ring_buffer *ring,
-                         u32 invalidate, u32 flush)
+                         u32 *invalidate, u32 *flush)
 {
        uint32_t cmd;
        int ret;
@@ -1227,13 +1205,15 @@ static int blt_ring_flush(struct intel_ring_buffer 
*ring,
                return ret;
 
        cmd = MI_FLUSH_DW;
-       if (invalidate & I915_GEM_DOMAIN_RENDER)
+       if (*invalidate & I915_GEM_DOMAIN_RENDER)
                cmd |= MI_INVALIDATE_TLB;
        intel_ring_emit(ring, cmd);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, 0);
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
+
+       *flush |= I915_GEM_GPU_DOMAINS;
        return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f23cc5f..0241f7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -70,10 +70,10 @@ struct  intel_ring_buffer {
        void            (*write_tail)(struct intel_ring_buffer *ring,
                                      u32 value);
        int __must_check (*flush)(struct intel_ring_buffer *ring,
-                                 u32   invalidate_domains,
-                                 u32   flush_domains);
+                                 u32   *invalidate,
+                                 u32   *flush);
        int             (*add_request)(struct intel_ring_buffer *ring,
-                                      u32 *seqno);
+                                      u32 seqno);
        u32             (*get_seqno)(struct intel_ring_buffer *ring);
        int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
                                               u32 offset, u32 length);
@@ -110,6 +110,7 @@ struct  intel_ring_buffer {
         * Do we have some not yet emitted requests outstanding?
         */
        u32 outstanding_lazy_request;
+       struct drm_file *lazy_dispatch;
 
        wait_queue_head_t irq_queue;
        drm_local_map_t map;
-- 
1.7.4.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to