On Thu, Jun 27, 2013 at 01:48:17PM +0200, Maarten Lankhorst wrote:
> This commit converts the source of the val_seq counter to
> the ww_mutex api. The reservation objects are converted later,
> because there is still a lockdep splat in nouveau that has to
> resolved first.
> 
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>

Reviewed-by: Jerome Glisse <jglisse at redhat.com>

> ---
>  drivers/gpu/drm/nouveau/nouveau_gem.c    | 38 ++++++++++++++-------
>  drivers/gpu/drm/radeon/radeon.h          |  1 +
>  drivers/gpu/drm/radeon/radeon_cs.c       | 18 +++++-----
>  drivers/gpu/drm/radeon/radeon_object.c   |  5 +--
>  drivers/gpu/drm/radeon/radeon_object.h   |  3 +-
>  drivers/gpu/drm/radeon/radeon_uvd.c      | 27 +++++++--------
>  drivers/gpu/drm/ttm/ttm_bo.c             | 50 +++++++++++++++++----------
>  drivers/gpu/drm/ttm/ttm_execbuf_util.c   | 58 
> +++++++++++++++++---------------
>  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  | 14 ++++----
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 23 ++++++++-----
>  include/drm/ttm/ttm_bo_api.h             |  2 +-
>  include/drm/ttm/ttm_bo_driver.h          | 32 +++++++++++++-----
>  include/drm/ttm/ttm_execbuf_util.h       | 13 +++++--
>  13 files changed, 172 insertions(+), 112 deletions(-)
> 
> diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c 
> b/drivers/gpu/drm/nouveau/nouveau_gem.c
> index 7054706..e35d468 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
> @@ -277,10 +277,12 @@ struct validate_op {
>       struct list_head vram_list;
>       struct list_head gart_list;
>       struct list_head both_list;
> +     struct ww_acquire_ctx ticket;
>  };
>  
>  static void
> -validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
> +validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
> +                struct ww_acquire_ctx *ticket)
>  {
>       struct list_head *entry, *tmp;
>       struct nouveau_bo *nvbo;
> @@ -297,17 +299,24 @@ validate_fini_list(struct list_head *list, struct 
> nouveau_fence *fence)
>  
>               list_del(&nvbo->entry);
>               nvbo->reserved_by = NULL;
> -             ttm_bo_unreserve(&nvbo->bo);
> +             ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
>               drm_gem_object_unreference_unlocked(nvbo->gem);
>       }
>  }
>  
>  static void
> -validate_fini(struct validate_op *op, struct nouveau_fence* fence)
> +validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
>  {
> -     validate_fini_list(&op->vram_list, fence);
> -     validate_fini_list(&op->gart_list, fence);
> -     validate_fini_list(&op->both_list, fence);
> +     validate_fini_list(&op->vram_list, fence, &op->ticket);
> +     validate_fini_list(&op->gart_list, fence, &op->ticket);
> +     validate_fini_list(&op->both_list, fence, &op->ticket);
> +}
> +
> +static void
> +validate_fini(struct validate_op *op, struct nouveau_fence *fence)
> +{
> +     validate_fini_no_ticket(op, fence);
> +     ww_acquire_fini(&op->ticket);
>  }
>  
>  static int
> @@ -317,13 +326,11 @@ validate_init(struct nouveau_channel *chan, struct 
> drm_file *file_priv,
>  {
>       struct nouveau_cli *cli = nouveau_cli(file_priv);
>       struct drm_device *dev = chan->drm->dev;
> -     struct nouveau_drm *drm = nouveau_drm(dev);
> -     uint32_t sequence;
>       int trycnt = 0;
>       int ret, i;
>       struct nouveau_bo *res_bo = NULL;
>  
> -     sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
> +     ww_acquire_init(&op->ticket, &reservation_ww_class);
>  retry:
>       if (++trycnt > 100000) {
>               NV_ERROR(cli, "%s failed and gave up.\n", __func__);
> @@ -338,6 +345,7 @@ retry:
>               gem = drm_gem_object_lookup(dev, file_priv, b->handle);
>               if (!gem) {
>                       NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
> +                     ww_acquire_done(&op->ticket);
>                       validate_fini(op, NULL);
>                       return -ENOENT;
>               }
> @@ -352,21 +360,23 @@ retry:
>                       NV_ERROR(cli, "multiple instances of buffer %d on "
>                                     "validation list\n", b->handle);
>                       drm_gem_object_unreference_unlocked(gem);
> +                     ww_acquire_done(&op->ticket);
>                       validate_fini(op, NULL);
>                       return -EINVAL;
>               }
>  
> -             ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
> +             ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
>               if (ret) {
> -                     validate_fini(op, NULL);
> +                     validate_fini_no_ticket(op, NULL);
>                       if (unlikely(ret == -EAGAIN)) {
> -                             sequence = atomic_add_return(1, 
> &drm->ttm.validate_sequence);
>                               ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
> -                                                           sequence);
> +                                                           &op->ticket);
>                               if (!ret)
>                                       res_bo = nvbo;
>                       }
>                       if (unlikely(ret)) {
> +                             ww_acquire_done(&op->ticket);
> +                             ww_acquire_fini(&op->ticket);
>                               drm_gem_object_unreference_unlocked(gem);
>                               if (ret != -ERESTARTSYS)
>                                       NV_ERROR(cli, "fail reserve\n");
> @@ -390,6 +400,7 @@ retry:
>                       NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
>                                b->valid_domains);
>                       list_add_tail(&nvbo->entry, &op->both_list);
> +                     ww_acquire_done(&op->ticket);
>                       validate_fini(op, NULL);
>                       return -EINVAL;
>               }
> @@ -397,6 +408,7 @@ retry:
>                       goto retry;
>       }
>  
> +     ww_acquire_done(&op->ticket);
>       return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
> index 142ce6c..eaf4ff5 100644
> --- a/drivers/gpu/drm/radeon/radeon.h
> +++ b/drivers/gpu/drm/radeon/radeon.h
> @@ -883,6 +883,7 @@ struct radeon_cs_parser {
>       u32                     cs_flags;
>       u32                     ring;
>       s32                     priority;
> +     struct ww_acquire_ctx   ticket;
>  };
>  
>  extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
> diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
> b/drivers/gpu/drm/radeon/radeon_cs.c
> index 7e265a5..8a91901 100644
> --- a/drivers/gpu/drm/radeon/radeon_cs.c
> +++ b/drivers/gpu/drm/radeon/radeon_cs.c
> @@ -106,7 +106,7 @@ static int radeon_cs_parser_relocs(struct 
> radeon_cs_parser *p)
>               radeon_bo_list_add_object(&p->relocs[i].lobj,
>                                         &p->validated);
>       }
> -     return radeon_bo_list_validate(&p->validated, p->ring);
> +     return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
>  }
>  
>  static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 
> priority)
> @@ -314,15 +314,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, 
> void *data)
>   * If error is set than unvalidate buffer, otherwise just free memory
>   * used by parsing context.
>   **/
> -static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
> +static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int 
> error, bool backoff)
>  {
>       unsigned i;
>  
>       if (!error) {
> -             ttm_eu_fence_buffer_objects(&parser->validated,
> +             ttm_eu_fence_buffer_objects(&parser->ticket,
> +                                         &parser->validated,
>                                           parser->ib.fence);
> -     } else {
> -             ttm_eu_backoff_reservation(&parser->validated);
> +     } else if (backoff) {
> +             ttm_eu_backoff_reservation(&parser->ticket,
> +                                        &parser->validated);
>       }
>  
>       if (parser->relocs != NULL) {
> @@ -535,7 +537,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
> struct drm_file *filp)
>       r = radeon_cs_parser_init(&parser, data);
>       if (r) {
>               DRM_ERROR("Failed to initialize parser !\n");
> -             radeon_cs_parser_fini(&parser, r);
> +             radeon_cs_parser_fini(&parser, r, false);
>               up_read(&rdev->exclusive_lock);
>               r = radeon_cs_handle_lockup(rdev, r);
>               return r;
> @@ -544,7 +546,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
> struct drm_file *filp)
>       if (r) {
>               if (r != -ERESTARTSYS)
>                       DRM_ERROR("Failed to parse relocation %d!\n", r);
> -             radeon_cs_parser_fini(&parser, r);
> +             radeon_cs_parser_fini(&parser, r, false);
>               up_read(&rdev->exclusive_lock);
>               r = radeon_cs_handle_lockup(rdev, r);
>               return r;
> @@ -562,7 +564,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
> struct drm_file *filp)
>               goto out;
>       }
>  out:
> -     radeon_cs_parser_fini(&parser, r);
> +     radeon_cs_parser_fini(&parser, r, true);
>       up_read(&rdev->exclusive_lock);
>       r = radeon_cs_handle_lockup(rdev, r);
>       return r;
> diff --git a/drivers/gpu/drm/radeon/radeon_object.c 
> b/drivers/gpu/drm/radeon/radeon_object.c
> index 07af5a9..71287bb 100644
> --- a/drivers/gpu/drm/radeon/radeon_object.c
> +++ b/drivers/gpu/drm/radeon/radeon_object.c
> @@ -349,14 +349,15 @@ void radeon_bo_list_add_object(struct radeon_bo_list 
> *lobj,
>       }
>  }
>  
> -int radeon_bo_list_validate(struct list_head *head, int ring)
> +int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
> +                         struct list_head *head, int ring)
>  {
>       struct radeon_bo_list *lobj;
>       struct radeon_bo *bo;
>       u32 domain;
>       int r;
>  
> -     r = ttm_eu_reserve_buffers(head);
> +     r = ttm_eu_reserve_buffers(ticket, head);
>       if (unlikely(r != 0)) {
>               return r;
>       }
> diff --git a/drivers/gpu/drm/radeon/radeon_object.h 
> b/drivers/gpu/drm/radeon/radeon_object.h
> index e2cb80a..3e62a3a 100644
> --- a/drivers/gpu/drm/radeon/radeon_object.h
> +++ b/drivers/gpu/drm/radeon/radeon_object.h
> @@ -128,7 +128,8 @@ extern int radeon_bo_init(struct radeon_device *rdev);
>  extern void radeon_bo_fini(struct radeon_device *rdev);
>  extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
>                               struct list_head *head);
> -extern int radeon_bo_list_validate(struct list_head *head, int ring);
> +extern int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
> +                                struct list_head *head, int ring);
>  extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
>                               struct vm_area_struct *vma);
>  extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
> diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c 
> b/drivers/gpu/drm/radeon/radeon_uvd.c
> index cad735d..0078cdf 100644
> --- a/drivers/gpu/drm/radeon/radeon_uvd.c
> +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
> @@ -542,6 +542,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
>                              struct radeon_fence **fence)
>  {
>       struct ttm_validate_buffer tv;
> +     struct ww_acquire_ctx ticket;
>       struct list_head head;
>       struct radeon_ib ib;
>       uint64_t addr;
> @@ -553,7 +554,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
>       INIT_LIST_HEAD(&head);
>       list_add(&tv.head, &head);
>  
> -     r = ttm_eu_reserve_buffers(&head);
> +     r = ttm_eu_reserve_buffers(&ticket, &head);
>       if (r)
>               return r;
>  
> @@ -561,16 +562,12 @@ static int radeon_uvd_send_msg(struct radeon_device 
> *rdev,
>       radeon_uvd_force_into_uvd_segment(bo);
>  
>       r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
> -     if (r) {
> -             ttm_eu_backoff_reservation(&head);
> -             return r;
> -     }
> +     if (r) 
> +             goto err;
>  
>       r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
> -     if (r) {
> -             ttm_eu_backoff_reservation(&head);
> -             return r;
> -     }
> +     if (r)
> +             goto err;
>  
>       addr = radeon_bo_gpu_offset(bo);
>       ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
> @@ -584,11 +581,9 @@ static int radeon_uvd_send_msg(struct radeon_device 
> *rdev,
>       ib.length_dw = 16;
>  
>       r = radeon_ib_schedule(rdev, &ib, NULL);
> -     if (r) {
> -             ttm_eu_backoff_reservation(&head);
> -             return r;
> -     }
> -     ttm_eu_fence_buffer_objects(&head, ib.fence);
> +     if (r)
> +             goto err;
> +     ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
>  
>       if (fence)
>               *fence = radeon_fence_ref(ib.fence);
> @@ -596,6 +591,10 @@ static int radeon_uvd_send_msg(struct radeon_device 
> *rdev,
>       radeon_ib_free(rdev, &ib);
>       radeon_bo_unref(&bo);
>       return 0;
> +
> +err:
> +     ttm_eu_backoff_reservation(&ticket, &head);
> +     return r;
>  }
>  
>  /* multiple fence commands without any stream commands in between can
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 9b07b7d..b912375 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -215,7 +215,8 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
>  
>  int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
>                         bool interruptible,
> -                       bool no_wait, bool use_sequence, uint32_t sequence)
> +                       bool no_wait, bool use_ticket,
> +                       struct ww_acquire_ctx *ticket)
>  {
>       int ret;
>  
> @@ -223,17 +224,17 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
>               /**
>                * Deadlock avoidance for multi-bo reserving.
>                */
> -             if (use_sequence && bo->seq_valid) {
> +             if (use_ticket && bo->seq_valid) {
>                       /**
>                        * We've already reserved this one.
>                        */
> -                     if (unlikely(sequence == bo->val_seq))
> +                     if (unlikely(ticket->stamp == bo->val_seq))
>                               return -EDEADLK;
>                       /**
>                        * Already reserved by a thread that will not back
>                        * off for us. We need to back off.
>                        */
> -                     if (unlikely(sequence - bo->val_seq < (1 << 31)))
> +                     if (unlikely(ticket->stamp - bo->val_seq <= LONG_MAX))
>                               return -EAGAIN;
>               }
>  
> @@ -246,13 +247,14 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
>                       return ret;
>       }
>  
> -     if (use_sequence) {
> +     if (use_ticket) {
>               bool wake_up = false;
> +
>               /**
>                * Wake up waiters that may need to recheck for deadlock,
>                * if we decreased the sequence number.
>                */
> -             if (unlikely((bo->val_seq - sequence < (1 << 31))
> +             if (unlikely((bo->val_seq - ticket->stamp <= LONG_MAX)
>                            || !bo->seq_valid))
>                       wake_up = true;
>  
> @@ -266,7 +268,7 @@ int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
>                * written before val_seq was, and just means some slightly
>                * increased cpu usage
>                */
> -             bo->val_seq = sequence;
> +             bo->val_seq = ticket->stamp;
>               bo->seq_valid = true;
>               if (wake_up)
>                       wake_up_all(&bo->event_queue);
> @@ -292,14 +294,15 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, 
> int count,
>  
>  int ttm_bo_reserve(struct ttm_buffer_object *bo,
>                  bool interruptible,
> -                bool no_wait, bool use_sequence, uint32_t sequence)
> +                bool no_wait, bool use_ticket,
> +                struct ww_acquire_ctx *ticket)
>  {
>       struct ttm_bo_global *glob = bo->glob;
>       int put_count = 0;
>       int ret;
>  
> -     ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
> -                                sequence);
> +     ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket,
> +                                 ticket);
>       if (likely(ret == 0)) {
>               spin_lock(&glob->lru_lock);
>               put_count = ttm_bo_del_from_lru(bo);
> @@ -311,13 +314,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
>  }
>  
>  int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
> -                               bool interruptible, uint32_t sequence)
> +                               bool interruptible,
> +                               struct ww_acquire_ctx *ticket)
>  {
>       bool wake_up = false;
>       int ret;
>  
>       while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
> -             WARN_ON(bo->seq_valid && sequence == bo->val_seq);
> +             WARN_ON(bo->seq_valid && ticket->stamp == bo->val_seq);
>  
>               ret = ttm_bo_wait_unreserved(bo, interruptible);
>  
> @@ -325,14 +329,14 @@ int ttm_bo_reserve_slowpath_nolru(struct 
> ttm_buffer_object *bo,
>                       return ret;
>       }
>  
> -     if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
> +     if (bo->val_seq - ticket->stamp < LONG_MAX || !bo->seq_valid)
>               wake_up = true;
>  
>       /**
>        * Wake up waiters that may need to recheck for deadlock,
>        * if we decreased the sequence number.
>        */
> -     bo->val_seq = sequence;
> +     bo->val_seq = ticket->stamp;
>       bo->seq_valid = true;
>       if (wake_up)
>               wake_up_all(&bo->event_queue);
> @@ -341,12 +345,12 @@ int ttm_bo_reserve_slowpath_nolru(struct 
> ttm_buffer_object *bo,
>  }
>  
>  int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
> -                         bool interruptible, uint32_t sequence)
> +                         bool interruptible, struct ww_acquire_ctx *ticket)
>  {
>       struct ttm_bo_global *glob = bo->glob;
>       int put_count, ret;
>  
> -     ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
> +     ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, ticket);
>       if (likely(!ret)) {
>               spin_lock(&glob->lru_lock);
>               put_count = ttm_bo_del_from_lru(bo);
> @@ -357,7 +361,7 @@ int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
>  }
>  EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
>  
> -void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
> +void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo, struct 
> ww_acquire_ctx *ticket)
>  {
>       ttm_bo_add_to_lru(bo);
>       atomic_set(&bo->reserved, 0);
> @@ -369,11 +373,21 @@ void ttm_bo_unreserve(struct ttm_buffer_object *bo)
>       struct ttm_bo_global *glob = bo->glob;
>  
>       spin_lock(&glob->lru_lock);
> -     ttm_bo_unreserve_locked(bo);
> +     ttm_bo_unreserve_ticket_locked(bo, NULL);
>       spin_unlock(&glob->lru_lock);
>  }
>  EXPORT_SYMBOL(ttm_bo_unreserve);
>  
> +void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, struct 
> ww_acquire_ctx *ticket)
> +{
> +     struct ttm_bo_global *glob = bo->glob;
> +
> +     spin_lock(&glob->lru_lock);
> +     ttm_bo_unreserve_ticket_locked(bo, ticket);
> +     spin_unlock(&glob->lru_lock);
> +}
> +EXPORT_SYMBOL(ttm_bo_unreserve_ticket);
> +
>  /*
>   * Call bo->mutex locked.
>   */
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c 
> b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 7b90def..efcb734 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -32,7 +32,8 @@
>  #include <linux/sched.h>
>  #include <linux/module.h>
>  
> -static void ttm_eu_backoff_reservation_locked(struct list_head *list)
> +static void ttm_eu_backoff_reservation_locked(struct list_head *list,
> +                                           struct ww_acquire_ctx *ticket)
>  {
>       struct ttm_validate_buffer *entry;
>  
> @@ -41,14 +42,15 @@ static void ttm_eu_backoff_reservation_locked(struct 
> list_head *list)
>               if (!entry->reserved)
>                       continue;
>  
> +             entry->reserved = false;
>               if (entry->removed) {
> -                     ttm_bo_add_to_lru(bo);
> +                     ttm_bo_unreserve_ticket_locked(bo, ticket);
>                       entry->removed = false;
>  
> +             } else {
> +                     atomic_set(&bo->reserved, 0);
> +                     wake_up_all(&bo->event_queue);
>               }
> -             entry->reserved = false;
> -             atomic_set(&bo->reserved, 0);
> -             wake_up_all(&bo->event_queue);
>       }
>  }
>  
> @@ -82,7 +84,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
>       }
>  }
>  
> -void ttm_eu_backoff_reservation(struct list_head *list)
> +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
> +                             struct list_head *list)
>  {
>       struct ttm_validate_buffer *entry;
>       struct ttm_bo_global *glob;
> @@ -93,7 +96,8 @@ void ttm_eu_backoff_reservation(struct list_head *list)
>       entry = list_first_entry(list, struct ttm_validate_buffer, head);
>       glob = entry->bo->glob;
>       spin_lock(&glob->lru_lock);
> -     ttm_eu_backoff_reservation_locked(list);
> +     ttm_eu_backoff_reservation_locked(list, ticket);
> +     ww_acquire_fini(ticket);
>       spin_unlock(&glob->lru_lock);
>  }
>  EXPORT_SYMBOL(ttm_eu_backoff_reservation);
> @@ -110,12 +114,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
>   * buffers in different orders.
>   */
>  
> -int ttm_eu_reserve_buffers(struct list_head *list)
> +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
> +                        struct list_head *list)
>  {
>       struct ttm_bo_global *glob;
>       struct ttm_validate_buffer *entry;
>       int ret;
> -     uint32_t val_seq;
>  
>       if (list_empty(list))
>               return 0;
> @@ -129,8 +133,8 @@ int ttm_eu_reserve_buffers(struct list_head *list)
>       entry = list_first_entry(list, struct ttm_validate_buffer, head);
>       glob = entry->bo->glob;
>  
> +     ww_acquire_init(ticket, &reservation_ww_class);
>       spin_lock(&glob->lru_lock);
> -     val_seq = entry->bo->bdev->val_seq++;
>  
>  retry:
>       list_for_each_entry(entry, list, head) {
> @@ -140,7 +144,7 @@ retry:
>               if (entry->reserved)
>                       continue;
>  
> -             ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
> +             ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
>               switch (ret) {
>               case 0:
>                       break;
> @@ -148,8 +152,9 @@ retry:
>                       ttm_eu_del_from_lru_locked(list);
>                       spin_unlock(&glob->lru_lock);
>                       ret = ttm_bo_reserve_nolru(bo, true, false,
> -                                                true, val_seq);
> +                                                true, ticket);
>                       spin_lock(&glob->lru_lock);
> +
>                       if (!ret)
>                               break;
>  
> @@ -158,21 +163,13 @@ retry:
>  
>                       /* fallthrough */
>               case -EAGAIN:
> -                     ttm_eu_backoff_reservation_locked(list);
> -
> -                     /*
> -                      * temporarily increase sequence number every retry,
> -                      * to prevent us from seeing our old reservation
> -                      * sequence when someone else reserved the buffer,
> -                      * but hasn't updated the seq_valid/seqno members yet.
> -                      */
> -                     val_seq = entry->bo->bdev->val_seq++;
> -
> +                     ttm_eu_backoff_reservation_locked(list, ticket);
>                       spin_unlock(&glob->lru_lock);
>                       ttm_eu_list_ref_sub(list);
> -                     ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
> +                     ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
>                       if (unlikely(ret != 0))
> -                             return ret;
> +                             goto err_fini;
> +
>                       spin_lock(&glob->lru_lock);
>                       entry->reserved = true;
>                       if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
> @@ -191,21 +188,25 @@ retry:
>               }
>       }
>  
> +     ww_acquire_done(ticket);
>       ttm_eu_del_from_lru_locked(list);
>       spin_unlock(&glob->lru_lock);
>       ttm_eu_list_ref_sub(list);
> -
>       return 0;
>  
>  err:
> -     ttm_eu_backoff_reservation_locked(list);
> +     ttm_eu_backoff_reservation_locked(list, ticket);
>       spin_unlock(&glob->lru_lock);
>       ttm_eu_list_ref_sub(list);
> +err_fini:
> +     ww_acquire_done(ticket);
> +     ww_acquire_fini(ticket);
>       return ret;
>  }
>  EXPORT_SYMBOL(ttm_eu_reserve_buffers);
>  
> -void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
> +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
> +                              struct list_head *list, void *sync_obj)
>  {
>       struct ttm_validate_buffer *entry;
>       struct ttm_buffer_object *bo;
> @@ -228,11 +229,12 @@ void ttm_eu_fence_buffer_objects(struct list_head 
> *list, void *sync_obj)
>               bo = entry->bo;
>               entry->old_sync_obj = bo->sync_obj;
>               bo->sync_obj = driver->sync_obj_ref(sync_obj);
> -             ttm_bo_unreserve_locked(bo);
> +             ttm_bo_unreserve_ticket_locked(bo, ticket);
>               entry->reserved = false;
>       }
>       spin_unlock(&bdev->fence_lock);
>       spin_unlock(&glob->lru_lock);
> +     ww_acquire_fini(ticket);
>  
>       list_for_each_entry(entry, list, head) {
>               if (entry->old_sync_obj)
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c 
> b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 394e647..599f646 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -1432,6 +1432,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
>       struct vmw_fence_obj *fence = NULL;
>       struct vmw_resource *error_resource;
>       struct list_head resource_list;
> +     struct ww_acquire_ctx ticket;
>       uint32_t handle;
>       void *cmd;
>       int ret;
> @@ -1488,7 +1489,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
>       if (unlikely(ret != 0))
>               goto out_err;
>  
> -     ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
> +     ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
>       if (unlikely(ret != 0))
>               goto out_err;
>  
> @@ -1537,7 +1538,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
>               DRM_ERROR("Fence submission error. Syncing.\n");
>  
>       vmw_resource_list_unreserve(&sw_context->resource_list, false);
> -     ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
> +     ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
>                                   (void *) fence);
>  
>       if (unlikely(dev_priv->pinned_bo != NULL &&
> @@ -1570,7 +1571,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
>  out_err:
>       vmw_resource_relocations_free(&sw_context->res_relocations);
>       vmw_free_relocations(sw_context);
> -     ttm_eu_backoff_reservation(&sw_context->validate_nodes);
> +     ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
>       vmw_resource_list_unreserve(&sw_context->resource_list, true);
>       vmw_clear_validations(sw_context);
>       if (unlikely(dev_priv->pinned_bo != NULL &&
> @@ -1644,6 +1645,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private 
> *dev_priv,
>       struct list_head validate_list;
>       struct ttm_validate_buffer pinned_val, query_val;
>       struct vmw_fence_obj *lfence = NULL;
> +     struct ww_acquire_ctx ticket;
>  
>       if (dev_priv->pinned_bo == NULL)
>               goto out_unlock;
> @@ -1657,7 +1659,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private 
> *dev_priv,
>       list_add_tail(&query_val.head, &validate_list);
>  
>       do {
> -             ret = ttm_eu_reserve_buffers(&validate_list);
> +             ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
>       } while (ret == -ERESTARTSYS);
>  
>       if (unlikely(ret != 0)) {
> @@ -1684,7 +1686,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private 
> *dev_priv,
>                                                 NULL);
>               fence = lfence;
>       }
> -     ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
> +     ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
>       if (lfence != NULL)
>               vmw_fence_obj_unreference(&lfence);
>  
> @@ -1696,7 +1698,7 @@ out_unlock:
>       return;
>  
>  out_no_emit:
> -     ttm_eu_backoff_reservation(&validate_list);
> +     ttm_eu_backoff_reservation(&ticket, &validate_list);
>  out_no_reserve:
>       ttm_bo_unref(&query_val.bo);
>       ttm_bo_unref(&pinned_val.bo);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c 
> b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index bc78425..ced7946 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -990,9 +990,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
>   * @val_buf:        On successful return contains data about the
>   *                  reserved and validated backup buffer.
>   */
> -int vmw_resource_check_buffer(struct vmw_resource *res,
> -                           bool interruptible,
> -                           struct ttm_validate_buffer *val_buf)
> +static int
> +vmw_resource_check_buffer(struct vmw_resource *res,
> +                       struct ww_acquire_ctx *ticket,
> +                       bool interruptible,
> +                       struct ttm_validate_buffer *val_buf)
>  {
>       struct list_head val_list;
>       bool backup_dirty = false;
> @@ -1007,7 +1009,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
>       INIT_LIST_HEAD(&val_list);
>       val_buf->bo = ttm_bo_reference(&res->backup->base);
>       list_add_tail(&val_buf->head, &val_list);
> -     ret = ttm_eu_reserve_buffers(&val_list);
> +     ret = ttm_eu_reserve_buffers(ticket, &val_list);
>       if (unlikely(ret != 0))
>               goto out_no_reserve;
>  
> @@ -1025,7 +1027,7 @@ int vmw_resource_check_buffer(struct vmw_resource *res,
>       return 0;
>  
>  out_no_validate:
> -     ttm_eu_backoff_reservation(&val_list);
> +     ttm_eu_backoff_reservation(ticket, &val_list);
>  out_no_reserve:
>       ttm_bo_unref(&val_buf->bo);
>       if (backup_dirty)
> @@ -1069,7 +1071,9 @@ int vmw_resource_reserve(struct vmw_resource *res, bool 
> no_backup)
>   *.
>   * @val_buf:        Backup buffer information.
>   */
> -void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
> +static void
> +vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
> +                              struct ttm_validate_buffer *val_buf)
>  {
>       struct list_head val_list;
>  
> @@ -1078,7 +1082,7 @@ void vmw_resource_backoff_reservation(struct 
> ttm_validate_buffer *val_buf)
>  
>       INIT_LIST_HEAD(&val_list);
>       list_add_tail(&val_buf->head, &val_list);
> -     ttm_eu_backoff_reservation(&val_list);
> +     ttm_eu_backoff_reservation(ticket, &val_list);
>       ttm_bo_unref(&val_buf->bo);
>  }
>  
> @@ -1092,12 +1096,13 @@ int vmw_resource_do_evict(struct vmw_resource *res)
>  {
>       struct ttm_validate_buffer val_buf;
>       const struct vmw_res_func *func = res->func;
> +     struct ww_acquire_ctx ticket;
>       int ret;
>  
>       BUG_ON(!func->may_evict);
>  
>       val_buf.bo = NULL;
> -     ret = vmw_resource_check_buffer(res, true, &val_buf);
> +     ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf);
>       if (unlikely(ret != 0))
>               return ret;
>  
> @@ -1112,7 +1117,7 @@ int vmw_resource_do_evict(struct vmw_resource *res)
>       res->backup_dirty = true;
>       res->res_dirty = false;
>  out_no_unbind:
> -     vmw_resource_backoff_reservation(&val_buf);
> +     vmw_resource_backoff_reservation(&ticket, &val_buf);
>  
>       return ret;
>  }
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 3cb5d84..0a992b0 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -234,7 +234,7 @@ struct ttm_buffer_object {
>       struct list_head ddestroy;
>       struct list_head swap;
>       struct list_head io_reserve_lru;
> -     uint32_t val_seq;
> +     unsigned long val_seq;
>       bool seq_valid;
>  
>       /**
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index 9c8dca7..ec18c5f 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -38,6 +38,7 @@
>  #include <linux/workqueue.h>
>  #include <linux/fs.h>
>  #include <linux/spinlock.h>
> +#include <linux/reservation.h>
>  
>  struct ttm_backend_func {
>       /**
> @@ -778,7 +779,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager 
> *man);
>   * @bo: A pointer to a struct ttm_buffer_object.
>   * @interruptible: Sleep interruptible if waiting.
>   * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
> - * @use_sequence: If @bo is already reserved, Only sleep waiting for
> + * @use_ticket: If @bo is already reserved, Only sleep waiting for
>   * it to become unreserved if @sequence < (@bo)->sequence.
>   *
>   * Locks a buffer object for validation. (Or prevents other processes from
> @@ -819,7 +820,8 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager 
> *man);
>   */
>  extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
>                         bool interruptible,
> -                       bool no_wait, bool use_sequence, uint32_t sequence);
> +                       bool no_wait, bool use_ticket,
> +                       struct ww_acquire_ctx *ticket);
>  
>  /**
>   * ttm_bo_reserve_slowpath_nolru:
> @@ -836,7 +838,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
>   */
>  extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
>                                        bool interruptible,
> -                                      uint32_t sequence);
> +                                      struct ww_acquire_ctx *ticket);
>  
>  
>  /**
> @@ -850,7 +852,8 @@ extern int ttm_bo_reserve_slowpath_nolru(struct 
> ttm_buffer_object *bo,
>   * held by us, this function cannot deadlock any more.
>   */
>  extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
> -                                bool interruptible, uint32_t sequence);
> +                                bool interruptible,
> +                                struct ww_acquire_ctx *ticket);
>  
>  /**
>   * ttm_bo_reserve_nolru:
> @@ -876,8 +879,8 @@ extern int ttm_bo_reserve_slowpath(struct 
> ttm_buffer_object *bo,
>   */
>  extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
>                                bool interruptible,
> -                              bool no_wait, bool use_sequence,
> -                              uint32_t sequence);
> +                              bool no_wait, bool use_ticket,
> +                              struct ww_acquire_ctx *ticket);
>  
>  /**
>   * ttm_bo_unreserve
> @@ -889,14 +892,25 @@ extern int ttm_bo_reserve_nolru(struct 
> ttm_buffer_object *bo,
>  extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
>  
>  /**
> - * ttm_bo_unreserve_locked
> + * ttm_bo_unreserve_ticket
> + * @bo: A pointer to a struct ttm_buffer_object.
> + * @ticket: ww_acquire_ctx used for reserving
>   *
> + * Unreserve a previous reservation of @bo made with @ticket.
> + */
> +extern void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
> +                                 struct ww_acquire_ctx *ticket);
> +
> +/**
> + * ttm_bo_unreserve_locked
>   * @bo: A pointer to a struct ttm_buffer_object.
> + * @ticket: ww_acquire_ctx used for reserving, or NULL
>   *
> - * Unreserve a previous reservation of @bo.
> + * Unreserve a previous reservation of @bo made with @ticket.
>   * Needs to be called with struct ttm_bo_global::lru_lock held.
>   */
> -extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
> +extern void ttm_bo_unreserve_ticket_locked(struct ttm_buffer_object *bo,
> +                                        struct ww_acquire_ctx *ticket);
>  
>  /*
>   * ttm_bo_util.c
> diff --git a/include/drm/ttm/ttm_execbuf_util.h 
> b/include/drm/ttm/ttm_execbuf_util.h
> index 547e19f..ba71ef9 100644
> --- a/include/drm/ttm/ttm_execbuf_util.h
> +++ b/include/drm/ttm/ttm_execbuf_util.h
> @@ -33,6 +33,7 @@
>  
>  #include <ttm/ttm_bo_api.h>
>  #include <linux/list.h>
> +#include <linux/reservation.h>
>  
>  /**
>   * struct ttm_validate_buffer
> @@ -57,17 +58,20 @@ struct ttm_validate_buffer {
>  /**
>   * function ttm_eu_backoff_reservation
>   *
> + * @ticket:   ww_acquire_ctx from reserve call
>   * @list:     thread private list of ttm_validate_buffer structs.
>   *
>   * Undoes all buffer validation reservations for bos pointed to by
>   * the list entries.
>   */
>  
> -extern void ttm_eu_backoff_reservation(struct list_head *list);
> +extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
> +                                    struct list_head *list);
>  
>  /**
>   * function ttm_eu_reserve_buffers
>   *
> + * @ticket:  [out] ww_acquire_ctx returned by call.
>   * @list:    thread private list of ttm_validate_buffer structs.
>   *
>   * Tries to reserve bos pointed to by the list entries for validation.
> @@ -90,11 +94,13 @@ extern void ttm_eu_backoff_reservation(struct list_head 
> *list);
>   * has failed.
>   */
>  
> -extern int ttm_eu_reserve_buffers(struct list_head *list);
> +extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
> +                               struct list_head *list);
>  
>  /**
>   * function ttm_eu_fence_buffer_objects.
>   *
> + * @ticket:      ww_acquire_ctx from reserve call
>   * @list:        thread private list of ttm_validate_buffer structs.
>   * @sync_obj:    The new sync object for the buffers.
>   *
> @@ -104,6 +110,7 @@ extern int ttm_eu_reserve_buffers(struct list_head *list);
>   *
>   */
>  
> -extern void ttm_eu_fence_buffer_objects(struct list_head *list, void 
> *sync_obj);
> +extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
> +                                     struct list_head *list, void *sync_obj);
>  
>  #endif
> -- 
> 1.8.3.1
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to