Ah, crap. Drop the last two, they are still work in progress.

I was on the wrong branch while sending them,
Christian.
Am 27.11.2014 um 14:48 schrieb Christian König:
> From: Christian König <christian.koenig at amd.com>
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
>   drivers/gpu/drm/radeon/radeon.h     |  2 +-
>   drivers/gpu/drm/radeon/radeon_cs.c  |  9 +++++--
>   drivers/gpu/drm/radeon/radeon_gem.c | 17 ++++++++++----
>   drivers/gpu/drm/radeon/radeon_kms.c |  5 ++++
>   drivers/gpu/drm/radeon/radeon_vm.c  | 47 
> +++++++++----------------------------
>   5 files changed, 36 insertions(+), 44 deletions(-)
>
> diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
> index 3680cf0..699446a 100644
> --- a/drivers/gpu/drm/radeon/radeon.h
> +++ b/drivers/gpu/drm/radeon/radeon.h
> @@ -925,7 +925,7 @@ struct radeon_vm_id {
>   };
>   
>   struct radeon_vm {
> -     struct mutex            mutex;
> +     struct mutex            lock;
>   
>       struct rb_root          va;
>   
> diff --git a/drivers/gpu/drm/radeon/radeon_cs.c 
> b/drivers/gpu/drm/radeon/radeon_cs.c
> index fb776cb..7a90378 100644
> --- a/drivers/gpu/drm/radeon/radeon_cs.c
> +++ b/drivers/gpu/drm/radeon/radeon_cs.c
> @@ -195,6 +195,7 @@ static int radeon_cs_parser_relocs(struct 
> radeon_cs_parser *p)
>       if (p->cs_flags & RADEON_CS_USE_VM)
>               p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
>                                             &p->validated);
> +
>       if (need_mmap_lock)
>               down_read(&current->mm->mmap_sem);
>   
> @@ -571,7 +572,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device 
> *rdev,
>       if (parser->ring == R600_RING_TYPE_UVD_INDEX)
>               radeon_uvd_note_usage(rdev);
>   
> -     mutex_lock(&vm->mutex);
>       r = radeon_bo_vm_update_pte(parser, vm);
>       if (r) {
>               goto out;
> @@ -592,7 +592,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device 
> *rdev,
>       }
>   
>   out:
> -     mutex_unlock(&vm->mutex);
>       return r;
>   }
>   
> @@ -696,6 +695,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
> struct drm_file *filp)
>       }
>   
>       r = radeon_cs_ib_fill(rdev, &parser);
> +     if (parser.cs_flags & RADEON_CS_USE_VM)
> +             mutex_lock(&parser.ib.vm->lock);
>       if (!r) {
>               r = radeon_cs_parser_relocs(&parser);
>               if (r && r != -ERESTARTSYS)
> @@ -704,6 +705,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
> struct drm_file *filp)
>   
>       if (r) {
>               radeon_cs_parser_fini(&parser, r, false);
> +             if (parser.cs_flags & RADEON_CS_USE_VM)
> +                     mutex_unlock(&parser.ib.vm->lock);
>               up_read(&rdev->exclusive_lock);
>               r = radeon_cs_handle_lockup(rdev, r);
>               return r;
> @@ -721,6 +724,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, 
> struct drm_file *filp)
>       }
>   out:
>       radeon_cs_parser_fini(&parser, r, true);
> +     if (parser.cs_flags & RADEON_CS_USE_VM)
> +             mutex_unlock(&parser.ib.vm->lock);
>       up_read(&rdev->exclusive_lock);
>       r = radeon_cs_handle_lockup(rdev, r);
>       return r;
> diff --git a/drivers/gpu/drm/radeon/radeon_gem.c 
> b/drivers/gpu/drm/radeon/radeon_gem.c
> index 6162bd2..4eafec6 100644
> --- a/drivers/gpu/drm/radeon/radeon_gem.c
> +++ b/drivers/gpu/drm/radeon/radeon_gem.c
> @@ -150,8 +150,10 @@ int radeon_gem_object_open(struct drm_gem_object *obj, 
> struct drm_file *file_pri
>               return 0;
>       }
>   
> +     mutex_lock(&vm->lock);
>       r = radeon_bo_reserve(rbo, false);
>       if (r) {
> +             mutex_unlock(&vm->lock);
>               return r;
>       }
>   
> @@ -162,6 +164,7 @@ int radeon_gem_object_open(struct drm_gem_object *obj, 
> struct drm_file *file_pri
>               ++bo_va->ref_count;
>       }
>       radeon_bo_unreserve(rbo);
> +     mutex_unlock(&vm->lock);
>   
>       return 0;
>   }
> @@ -180,8 +183,10 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
>               return;
>       }
>   
> +     mutex_lock(&vm->lock);
>       r = radeon_bo_reserve(rbo, true);
>       if (r) {
> +             mutex_unlock(&vm->lock);
>               dev_err(rdev->dev, "leaking bo va because "
>                       "we fail to reserve bo (%d)\n", r);
>               return;
> @@ -193,6 +198,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
>               }
>       }
>       radeon_bo_unreserve(rbo);
> +     mutex_unlock(&vm->lock);
>   }
>   
>   static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
> @@ -576,17 +582,13 @@ static void radeon_gem_va_update_vm(struct 
> radeon_device *rdev,
>                       goto error_unreserve;
>       }
>   
> -     mutex_lock(&bo_va->vm->mutex);
>       r = radeon_vm_clear_freed(rdev, bo_va->vm);
>       if (r)
> -             goto error_unlock;
> +             goto error_unreserve;
>   
>       if (bo_va->it.start)
>               r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
>   
> -error_unlock:
> -     mutex_unlock(&bo_va->vm->mutex);
> -
>   error_unreserve:
>       ttm_eu_backoff_reservation(&ticket, &list);
>   
> @@ -662,14 +664,18 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void 
> *data,
>               return -ENOENT;
>       }
>       rbo = gem_to_radeon_bo(gobj);
> +     mutex_lock(&fpriv->vm.lock);
>       r = radeon_bo_reserve(rbo, false);
>       if (r) {
> +             mutex_unlock(&fpriv->vm.lock);
>               args->operation = RADEON_VA_RESULT_ERROR;
>               drm_gem_object_unreference_unlocked(gobj);
>               return r;
>       }
>       bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
>       if (!bo_va) {
> +             radeon_bo_unreserve(rbo);
> +             mutex_unlock(&fpriv->vm.lock);
>               args->operation = RADEON_VA_RESULT_ERROR;
>               drm_gem_object_unreference_unlocked(gobj);
>               return -ENOENT;
> @@ -698,6 +704,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void 
> *data,
>               args->operation = RADEON_VA_RESULT_ERROR;
>       }
>   out:
> +     mutex_unlock(&fpriv->vm.lock);
>       drm_gem_object_unreference_unlocked(gobj);
>       return r;
>   }
> diff --git a/drivers/gpu/drm/radeon/radeon_kms.c 
> b/drivers/gpu/drm/radeon/radeon_kms.c
> index f4dd26a..d994006 100644
> --- a/drivers/gpu/drm/radeon/radeon_kms.c
> +++ b/drivers/gpu/drm/radeon/radeon_kms.c
> @@ -613,8 +613,10 @@ int radeon_driver_open_kms(struct drm_device *dev, 
> struct drm_file *file_priv)
>               }
>   
>               if (rdev->accel_working) {
> +                     mutex_lock(&vm->lock);
>                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
>                       if (r) {
> +                             mutex_unlock(&vm->lock);
>                               radeon_vm_fini(rdev, vm);
>                               kfree(fpriv);
>                               return r;
> @@ -628,6 +630,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct 
> drm_file *file_priv)
>                                                 RADEON_VA_IB_OFFSET,
>                                                 RADEON_VM_PAGE_READABLE |
>                                                 RADEON_VM_PAGE_SNOOPED);
> +                     mutex_unlock(&vm->lock);
>                       if (r) {
>                               radeon_vm_fini(rdev, vm);
>                               kfree(fpriv);
> @@ -662,12 +665,14 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
>               int r;
>   
>               if (rdev->accel_working) {
> +                     mutex_lock(&vm->lock);
>                       r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
>                       if (!r) {
>                               if (vm->ib_bo_va)
>                                       radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
>                               radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
>                       }
> +                     mutex_unlock(&vm->lock);
>               }
>   
>               radeon_vm_fini(rdev, vm);
> diff --git a/drivers/gpu/drm/radeon/radeon_vm.c 
> b/drivers/gpu/drm/radeon/radeon_vm.c
> index cde48c4..658183f 100644
> --- a/drivers/gpu/drm/radeon/radeon_vm.c
> +++ b/drivers/gpu/drm/radeon/radeon_vm.c
> @@ -172,7 +172,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct 
> radeon_device *rdev,
>    * Allocate an id for the vm (cayman+).
>    * Returns the fence we need to sync to (if any).
>    *
> - * Global and local mutex must be locked!
> + * Mutex must be locked!
>    */
>   struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
>                                      struct radeon_vm *vm, int ring)
> @@ -231,7 +231,7 @@ struct radeon_fence *radeon_vm_grab_id(struct 
> radeon_device *rdev,
>    *
>    * Flush the vm (cayman+).
>    *
> - * Global and local mutex must be locked!
> + * Mutex must be locked!
>    */
>   void radeon_vm_flush(struct radeon_device *rdev,
>                    struct radeon_vm *vm,
> @@ -263,7 +263,7 @@ void radeon_vm_flush(struct radeon_device *rdev,
>    * Fence the vm (cayman+).
>    * Set the fence used to protect page table and id.
>    *
> - * Global and local mutex must be locked!
> + * Mutex must be locked!
>    */
>   void radeon_vm_fence(struct radeon_device *rdev,
>                    struct radeon_vm *vm,
> @@ -314,7 +314,7 @@ struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm 
> *vm,
>    * Add @bo to the list of bos associated with the vm
>    * Returns newly added bo_va or NULL for failure
>    *
> - * Object has to be reserved!
> + * Mutex must be locked and object has to be reserved!
>    */
>   struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
>                                     struct radeon_vm *vm,
> @@ -336,9 +336,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct 
> radeon_device *rdev,
>       INIT_LIST_HEAD(&bo_va->bo_list);
>       INIT_LIST_HEAD(&bo_va->vm_status);
>   
> -     mutex_lock(&vm->mutex);
>       list_add_tail(&bo_va->bo_list, &bo->va);
> -     mutex_unlock(&vm->mutex);
>   
>       return bo_va;
>   }
> @@ -441,7 +439,8 @@ error_unreserve:
>    * Validate and set the offset requested within the vm address space.
>    * Returns 0 for success, error for failure.
>    *
> - * Object has to be reserved and gets unreserved by this function!
> + * Mutex must be locked and object has to be reserved and gets
> + * unreserved by this function!
>    */
>   int radeon_vm_bo_set_addr(struct radeon_device *rdev,
>                         struct radeon_bo_va *bo_va,
> @@ -472,14 +471,12 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
>               eoffset = last_pfn = 0;
>       }
>   
> -     mutex_lock(&vm->mutex);
>       if (bo_va->it.start || bo_va->it.last) {
>               if (bo_va->addr) {
>                       /* add a clone of the bo_va to clear the old address */
>                       struct radeon_bo_va *tmp;
>                       tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
>                       if (!tmp) {
> -                             mutex_unlock(&vm->mutex);
>                               return -ENOMEM;
>                       }
>                       tmp->it.start = bo_va->it.start;
> @@ -509,7 +506,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
>                       dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
>                               "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
>                               soffset, tmp->bo, tmp->it.start, tmp->it.last);
> -                     mutex_unlock(&vm->mutex);
>                       return -EINVAL;
>               }
>               bo_va->it.start = soffset;
> @@ -537,9 +533,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
>               if (vm->page_tables[pt_idx].bo)
>                       continue;
>   
> -             /* drop mutex to allocate and clear page table */
> -             mutex_unlock(&vm->mutex);
> -
>               r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
>                                    RADEON_GPU_PAGE_SIZE, true,
>                                    RADEON_GEM_DOMAIN_VRAM, 0,
> @@ -554,21 +547,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
>                       return r;
>               }
>   
> -             /* aquire mutex again */
> -             mutex_lock(&vm->mutex);
> -             if (vm->page_tables[pt_idx].bo) {
> -                     /* someone else allocated the pt in the meantime */
> -                     mutex_unlock(&vm->mutex);
> -                     radeon_bo_unref(&pt);
> -                     mutex_lock(&vm->mutex);
> -                     continue;
> -             }
> -
>               vm->page_tables[pt_idx].addr = 0;
>               vm->page_tables[pt_idx].bo = pt;
>       }
>   
> -     mutex_unlock(&vm->mutex);
>       return 0;
>   }
>   
> @@ -627,7 +609,7 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
>    * and updates the page directory (cayman+).
>    * Returns 0 for success, error for failure.
>    *
> - * Global and local mutex must be locked!
> + * Mutex must be locked!
>    */
>   int radeon_vm_update_page_directory(struct radeon_device *rdev,
>                                   struct radeon_vm *vm)
> @@ -717,8 +699,6 @@ int radeon_vm_update_page_directory(struct radeon_device 
> *rdev,
>    * @pe_end: last PTE to handle
>    * @addr: addr those PTEs should point to
>    * @flags: hw mapping flags
> - *
> - * Global and local mutex must be locked!
>    */
>   static void radeon_vm_frag_ptes(struct radeon_device *rdev,
>                               struct radeon_ib *ib,
> @@ -798,7 +778,6 @@ static void radeon_vm_frag_ptes(struct radeon_device 
> *rdev,
>    *
>    * Update the page tables in the range @start - @end (cayman+).
>    *
> - * Global and local mutex must be locked!
>    */
>   static int radeon_vm_update_ptes(struct radeon_device *rdev,
>                                struct radeon_vm *vm,
> @@ -870,7 +849,6 @@ static int radeon_vm_update_ptes(struct radeon_device 
> *rdev,
>    *
>    * Fence the page tables in the range @start - @end (cayman+).
>    *
> - * Global and local mutex must be locked!
>    */
>   static void radeon_vm_fence_pts(struct radeon_vm *vm,
>                               uint64_t start, uint64_t end,
> @@ -896,7 +874,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
>    * Fill in the page table entries for @bo (cayman+).
>    * Returns 0 for success, -EINVAL for failure.
>    *
> - * Object have to be reserved and mutex must be locked!
> + * Mutex must be locked and BOs have to be reserved!
>    */
>   int radeon_vm_bo_update(struct radeon_device *rdev,
>                       struct radeon_bo_va *bo_va,
> @@ -1097,7 +1075,7 @@ int radeon_vm_clear_invalids(struct radeon_device *rdev,
>    *
>    * Remove @bo_va->bo from the requested vm (cayman+).
>    *
> - * Object have to be reserved!
> + * Mutex must be locked and object has to be reserved!
>    */
>   void radeon_vm_bo_rmv(struct radeon_device *rdev,
>                     struct radeon_bo_va *bo_va)
> @@ -1106,7 +1084,6 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
>   
>       list_del(&bo_va->bo_list);
>   
> -     mutex_lock(&vm->mutex);
>       interval_tree_remove(&bo_va->it, &vm->va);
>       spin_lock(&vm->status_lock);
>       list_del(&bo_va->vm_status);
> @@ -1119,8 +1096,6 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
>               kfree(bo_va);
>       }
>       spin_unlock(&vm->status_lock);
> -
> -     mutex_unlock(&vm->mutex);
>   }
>   
>   /**
> @@ -1168,7 +1143,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct 
> radeon_vm *vm)
>               vm->ids[i].flushed_updates = NULL;
>               vm->ids[i].last_id_use = NULL;
>       }
> -     mutex_init(&vm->mutex);
> +     mutex_init(&vm->lock);
>       vm->va = RB_ROOT;
>       spin_lock_init(&vm->status_lock);
>       INIT_LIST_HEAD(&vm->invalidated);
> @@ -1245,5 +1220,5 @@ void radeon_vm_fini(struct radeon_device *rdev, struct 
> radeon_vm *vm)
>               radeon_fence_unref(&vm->ids[i].last_id_use);
>       }
>   
> -     mutex_destroy(&vm->mutex);
> +     mutex_destroy(&vm->lock);
>   }

Reply via email to