On Mon, Oct 06, 2014 at 03:15:10PM +0100, john.c.harri...@intel.com wrote:
> From: John Harrison <john.c.harri...@intel.com>
> 
> For: VIZ-4377
> Signed-off-by: john.c.harri...@intel.com
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c        |    6 +--
>  drivers/gpu/drm/i915/i915_drv.h            |    6 +--
>  drivers/gpu/drm/i915/i915_gem.c            |   66 
> ++++++++++++++++------------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |    6 +--
>  drivers/gpu/drm/i915/i915_gem_gtt.h        |    4 +-
>  drivers/gpu/drm/i915/i915_gem_tiling.c     |    2 +-
>  drivers/gpu/drm/i915/i915_gpu_error.c      |    4 +-
>  drivers/gpu/drm/i915/intel_display.c       |   10 ++---
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |    2 +-
>  9 files changed, 58 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c 
> b/drivers/gpu/drm/i915/i915_debugfs.c
> index 063b448..726a8f0 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -133,9 +133,9 @@ describe_obj(struct seq_file *m, struct 
> drm_i915_gem_object *obj)
>                  obj->base.size / 1024,
>                  obj->base.read_domains,
>                  obj->base.write_domain,
> -                obj->last_read_seqno,
> -                obj->last_write_seqno,
> -                obj->last_fenced_seqno,
> +                i915_gem_request_get_seqno(obj->last_read_req),
> +                i915_gem_request_get_seqno(obj->last_write_req),
> +                i915_gem_request_get_seqno(obj->last_fenced_req),
>                  i915_cache_level_str(to_i915(obj->base.dev), 
> obj->cache_level),
>                  obj->dirty ? " dirty" : "",
>                  obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 62c9f66..1401266 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1862,10 +1862,10 @@ struct drm_i915_gem_object {
>       struct intel_engine_cs *ring;
>  
>       /** Breadcrumb of last rendering to the buffer. */
> -     uint32_t last_read_seqno;
> -     uint32_t last_write_seqno;
> +     struct drm_i915_gem_request *last_read_req;
> +     struct drm_i915_gem_request *last_write_req;
>       /** Breadcrumb of last fenced GPU access to the buffer. */
> -     uint32_t last_fenced_seqno;
> +     struct drm_i915_gem_request *last_fenced_req;
>  
>       /** Current tiling stride for the object, if it's tiled. */
>       uint32_t stride;
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 2555cd8..2c33a83 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1289,11 +1289,11 @@ i915_gem_object_wait_rendering__tail(struct 
> drm_i915_gem_object *obj)
>       /* Manually manage the write flush as we may have not yet
>        * retired the buffer.
>        *
> -      * Note that the last_write_seqno is always the earlier of
> -      * the two (read/write) seqno, so if we haved successfully waited,
> +      * Note that the last_write_req is always the earlier of
> +      * the two (read/write) requests, so if we haved successfully waited,
>        * we know we have passed the last write.
>        */
> -     obj->last_write_seqno = 0;
> +     obj->last_write_req = NULL;
>  
>       return 0;
>  }
> @@ -1306,14 +1306,18 @@ static __must_check int
>  i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
>                              bool readonly)
>  {
> +     struct drm_i915_gem_request *req;
>       struct intel_engine_cs *ring = obj->ring;
>       u32 seqno;
>       int ret;
>  
> -     seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
> -     if (seqno == 0)
> +     req = readonly ? obj->last_write_req : obj->last_read_req;
> +     if (!req)
>               return 0;
>  
> +     seqno = i915_gem_request_get_seqno(req);
> +     BUG_ON(seqno == 0);

Again, you like BUG_ON a bit too much for my taste. If you want these
checks imo a WARN_ON in i915_gem_request_get_seqno (iff req != NULL ofc)
in the previous patch would be much better.
-Daniel

> +
>       ret = i915_wait_seqno(ring, seqno);
>       if (ret)
>               return ret;
> @@ -1329,6 +1333,7 @@ i915_gem_object_wait_rendering__nonblocking(struct 
> drm_i915_gem_object *obj,
>                                           struct drm_i915_file_private 
> *file_priv,
>                                           bool readonly)
>  {
> +     struct drm_i915_gem_request *req;
>       struct drm_device *dev = obj->base.dev;
>       struct drm_i915_private *dev_priv = dev->dev_private;
>       struct intel_engine_cs *ring = obj->ring;
> @@ -1339,10 +1344,13 @@ i915_gem_object_wait_rendering__nonblocking(struct 
> drm_i915_gem_object *obj,
>       BUG_ON(!mutex_is_locked(&dev->struct_mutex));
>       BUG_ON(!dev_priv->mm.interruptible);
>  
> -     seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
> -     if (seqno == 0)
> +     req = readonly ? obj->last_write_req : obj->last_read_req;
> +     if (!req)
>               return 0;
>  
> +     seqno = i915_gem_request_get_seqno(req);
> +     BUG_ON(seqno == 0);
> +
>       ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
>       if (ret)
>               return ret;
> @@ -2180,12 +2188,12 @@ static void
>  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
>                              struct intel_engine_cs *ring)
>  {
> -     u32 seqno = intel_ring_get_seqno(ring);
> +     struct drm_i915_gem_request *req = intel_ring_get_request(ring);
>  
>       BUG_ON(ring == NULL);
> -     if (obj->ring != ring && obj->last_write_seqno) {
> -             /* Keep the seqno relative to the current ring */
> -             obj->last_write_seqno = seqno;
> +     if (obj->ring != ring && obj->last_write_req) {
> +             /* Keep the request relative to the current ring */
> +             obj->last_write_req = req;
>       }
>       obj->ring = ring;
>  
> @@ -2197,7 +2205,7 @@ i915_gem_object_move_to_active(struct 
> drm_i915_gem_object *obj,
>  
>       list_move_tail(&obj->ring_list, &ring->active_list);
>  
> -     obj->last_read_seqno = seqno;
> +     obj->last_read_req = req;
>  }
>  
>  void i915_vma_move_to_active(struct i915_vma *vma,
> @@ -2228,11 +2236,11 @@ i915_gem_object_move_to_inactive(struct 
> drm_i915_gem_object *obj)
>       list_del_init(&obj->ring_list);
>       obj->ring = NULL;
>  
> -     obj->last_read_seqno = 0;
> -     obj->last_write_seqno = 0;
> +     obj->last_read_req = NULL;
> +     obj->last_write_req = NULL;
>       obj->base.write_domain = 0;
>  
> -     obj->last_fenced_seqno = 0;
> +     obj->last_fenced_req = NULL;
>  
>       obj->active = 0;
>       drm_gem_object_unreference(&obj->base);
> @@ -2249,7 +2257,7 @@ i915_gem_object_retire(struct drm_i915_gem_object *obj)
>               return;
>  
>       if (i915_seqno_passed(ring->get_seqno(ring, true),
> -                           obj->last_read_seqno))
> +                           i915_gem_request_get_seqno(obj->last_read_req)))
>               i915_gem_object_move_to_inactive(obj);
>  }
>  
> @@ -2669,7 +2677,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs 
> *ring)
>                                     struct drm_i915_gem_object,
>                                     ring_list);
>  
> -             if (!i915_seqno_passed(seqno, obj->last_read_seqno))
> +             if (!i915_seqno_passed(seqno, 
> i915_gem_request_get_seqno(obj->last_read_req)))
>                       break;
>  
>               i915_gem_object_move_to_inactive(obj);
> @@ -2779,7 +2787,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object 
> *obj)
>       int ret;
>  
>       if (obj->active) {
> -             ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
> +             ret = i915_gem_check_olr(obj->ring, 
> i915_gem_request_get_seqno(obj->last_read_req));
>               if (ret)
>                       return ret;
>  
> @@ -2838,13 +2846,14 @@ i915_gem_wait_ioctl(struct drm_device *dev, void 
> *data, struct drm_file *file)
>               goto out;
>  
>       if (obj->active) {
> -             seqno = obj->last_read_seqno;
> +             if (!obj->last_read_req)
> +                     goto out;
> +
> +             seqno = i915_gem_request_get_seqno(obj->last_read_req);
> +             BUG_ON(seqno == 0);
>               ring = obj->ring;
>       }
>  
> -     if (seqno == 0)
> -              goto out;
> -
>       /* Do this after OLR check to make sure we make forward progress polling
>        * on this IOCTL with a timeout <=0 (like busy ioctl)
>        */
> @@ -2894,7 +2903,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  
>       idx = intel_ring_sync_index(from, to);
>  
> -     seqno = obj->last_read_seqno;
> +     seqno = i915_gem_request_get_seqno(obj->last_read_req);
>       /* Optimization: Avoid semaphore sync when we are sure we already
>        * waited for an object with higher seqno */
>       if (seqno <= from->semaphore.sync_seqno[idx]) /* <--- broken?! needs to 
> use i915_seqno_passed()??? */
> @@ -2907,11 +2916,12 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
>       trace_i915_gem_ring_sync_to(from, to, seqno);
>       ret = to->semaphore.sync_to(to, from, seqno);
>       if (!ret)
> -             /* We use last_read_seqno because sync_to()
> +             /* We use last_read_req because sync_to()
>                * might have just caused seqno wrap under
>                * the radar.
>                */
> -             from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
> +             from->semaphore.sync_seqno[idx] =
> +                             i915_gem_request_get_seqno(obj->last_read_req);
>  
>       return ret;
>  }
> @@ -3224,12 +3234,12 @@ static void i915_gem_object_update_fence(struct 
> drm_i915_gem_object *obj,
>  static int
>  i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
>  {
> -     if (obj->last_fenced_seqno) {
> -             int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
> +     if (obj->last_fenced_req) {
> +             int ret = i915_wait_seqno(obj->ring, 
> i915_gem_request_get_seqno(obj->last_fenced_req));
>               if (ret)
>                       return ret;
>  
> -             obj->last_fenced_seqno = 0;
> +             obj->last_fenced_req = NULL;
>       }
>  
>       return 0;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
> b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 1a0611b..4250211 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -946,7 +946,7 @@ void
>  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>                                  struct intel_engine_cs *ring)
>  {
> -     u32 seqno = intel_ring_get_seqno(ring);
> +     struct drm_i915_gem_request *req = intel_ring_get_request(ring);
>       struct i915_vma *vma;
>  
>       list_for_each_entry(vma, vmas, exec_list) {
> @@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>               i915_vma_move_to_active(vma, ring);
>               if (obj->base.write_domain) {
>                       obj->dirty = 1;
> -                     obj->last_write_seqno = seqno;
> +                     obj->last_write_req = req;
>  
>                       intel_fb_obj_invalidate(obj, ring);
>  
> @@ -971,7 +971,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>                       obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
>               }
>               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
> -                     obj->last_fenced_seqno = seqno;
> +                     obj->last_fenced_req = req;
>                       if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
>                               struct drm_i915_private *dev_priv = 
> to_i915(ring->dev);
>                               
> list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h 
> b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index d5c14af..8a220c0 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -178,7 +178,7 @@ struct i915_address_space {
>        * List of objects currently involved in rendering.
>        *
>        * Includes buffers having the contents of their GPU caches
> -      * flushed, not necessarily primitives.  last_rendering_seqno
> +      * flushed, not necessarily primitives.  last_rendering_req
>        * represents when the rendering involved will be completed.
>        *
>        * A reference is held on the buffer while on this list.
> @@ -189,7 +189,7 @@ struct i915_address_space {
>        * LRU list of objects which are not in the ringbuffer and
>        * are ready to unbind, but are still in the GTT.
>        *
> -      * last_rendering_seqno is 0 while an object is in this list.
> +      * last_rendering_req is NULL while an object is in this list.
>        *
>        * A reference is not held on the buffer while on this list,
>        * as merely being GTT-bound shouldn't prevent its being
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c 
> b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 2cefb59..fa5fc8c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -383,7 +383,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
>  
>               if (ret == 0) {
>                       obj->fence_dirty =
> -                             obj->last_fenced_seqno ||
> +                             obj->last_fenced_req ||
>                               obj->fence_reg != I915_FENCE_REG_NONE;
>  
>                       obj->tiling_mode = args->tiling_mode;
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c 
> b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 2c87a79..1b58390 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -666,8 +666,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>  
>       err->size = obj->base.size;
>       err->name = obj->base.name;
> -     err->rseqno = obj->last_read_seqno;
> -     err->wseqno = obj->last_write_seqno;
> +     err->rseqno = i915_gem_request_get_seqno(obj->last_read_req);
> +     err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
>       err->gtt_offset = vma->node.start;
>       err->read_domains = obj->base.read_domains;
>       err->write_domain = obj->base.write_domain;
> diff --git a/drivers/gpu/drm/i915/intel_display.c 
> b/drivers/gpu/drm/i915/intel_display.c
> index b78f00a..5aae3d1e 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -9766,16 +9766,16 @@ static int intel_postpone_flip(struct 
> drm_i915_gem_object *obj)
>  
>       lockdep_assert_held(&obj->base.dev->struct_mutex);
>  
> -     if (!obj->last_write_seqno)
> +     if (!obj->last_write_req)
>               return 0;
>  
>       ring = obj->ring;
>  
>       if (i915_seqno_passed(ring->get_seqno(ring, true),
> -                           obj->last_write_seqno))
> +                           i915_gem_request_get_seqno(obj->last_write_req)))
>               return 0;
>  
> -     ret = i915_gem_check_olr(ring, obj->last_write_seqno);
> +     ret = i915_gem_check_olr(ring, 
> i915_gem_request_get_seqno(obj->last_write_req));
>       if (ret)
>               return ret;
>  
> @@ -9838,7 +9838,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
>       }
>  
>       spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
> -     intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
> +     intel_crtc->mmio_flip.seqno = 
> i915_gem_request_get_seqno(obj->last_write_req);
>       intel_crtc->mmio_flip.ring_id = obj->ring->id;
>       spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
>  
> @@ -10045,7 +10045,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>               if (ret)
>                       goto cleanup_unpin;
>  
> -             work->flip_queued_seqno = obj->last_write_seqno;
> +             work->flip_queued_seqno = 
> i915_gem_request_get_seqno(obj->last_write_req);
>               work->flip_queued_ring = obj->ring;
>       } else {
>               ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
> b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index cc1b62f..d98b964 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -249,7 +249,7 @@ struct  intel_engine_cs {
>        * ringbuffer.
>        *
>        * Includes buffers having the contents of their GPU caches
> -      * flushed, not necessarily primitives.  last_rendering_seqno
> +      * flushed, not necessarily primitives.  last_rendering_req
>        * represents when the rendering involved will be completed.
>        *
>        * A reference is held on the buffer while on this list.
> -- 
> 1.7.9.5
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to