From: Rob Clark <robdcl...@chromium.org>

Split memory allocation from vma initialization.  Async vm-bind happens
in the fence signalling path, so it will need to use pre-allocated
memory.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_gem_vma.c | 67 ++++++++++++++++++++++---------
 1 file changed, 49 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c 
b/drivers/gpu/drm/msm/msm_gem_vma.c
index baa5c6a0ff22..7d40b151aa95 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -71,40 +71,54 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct 
sg_table *sgt)
        return ret;
 }
 
-/* Close an iova.  Warn if it is still in use */
-void msm_gem_vma_close(struct drm_gpuva *vma)
+static void __vma_close(struct drm_gpuva *vma)
 {
        struct msm_gem_vm *vm = to_msm_vm(vma->vm);
        struct msm_gem_vma *msm_vma = to_msm_vma(vma);
 
        GEM_WARN_ON(msm_vma->mapped);
+       GEM_WARN_ON(!mutex_is_locked(&vm->vm_lock));
 
        spin_lock(&vm->mm_lock);
        if (vma->va.addr && vm->managed)
                drm_mm_remove_node(&msm_vma->node);
        spin_unlock(&vm->mm_lock);
 
-       dma_resv_lock(drm_gpuvm_resv(vma->vm), NULL);
-       mutex_lock(&vm->vm_lock);
        drm_gpuva_remove(vma);
        drm_gpuva_unlink(vma);
-       mutex_unlock(&vm->vm_lock);
-       dma_resv_unlock(drm_gpuvm_resv(vma->vm));
 
        kfree(vma);
 }
 
-/* Create a new vma and allocate an iova for it */
-struct drm_gpuva *
-msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
-               u64 offset, u64 range_start, u64 range_end)
+/* Close an iova.  Warn if it is still in use */
+void msm_gem_vma_close(struct drm_gpuva *vma)
+{
+       struct msm_gem_vm *vm = to_msm_vm(vma->vm);
+
+       /*
+        * Only used in legacy (kernel managed) VM, if userspace is managing
+        * the VM, the legacy paths should be disallowed:
+        */
+       GEM_WARN_ON(!vm->managed);
+
+       dma_resv_lock(drm_gpuvm_resv(vma->vm), NULL);
+       mutex_lock(&vm->vm_lock);
+       __vma_close(vma);
+       mutex_unlock(&vm->vm_lock);
+       dma_resv_unlock(drm_gpuvm_resv(vma->vm));
+}
+
+static struct drm_gpuva *
+__vma_init(struct msm_gem_vma *vma, struct drm_gpuvm *_vm,
+          struct drm_gem_object *obj, u64 offset,
+          u64 range_start, u64 range_end)
 {
        struct msm_gem_vm *vm = to_msm_vm(_vm);
        struct drm_gpuvm_bo *vm_bo;
-       struct msm_gem_vma *vma;
        int ret;
 
-       vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+       GEM_WARN_ON(!mutex_is_locked(&vm->vm_lock));
+
        if (!vma)
                return ERR_PTR(-ENOMEM);
 
@@ -128,9 +142,7 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct 
drm_gem_object *obj,
        drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 
offset);
        vma->mapped = false;
 
-       mutex_lock(&vm->vm_lock);
        ret = drm_gpuva_insert(&vm->base, &vma->base);
-       mutex_unlock(&vm->vm_lock);
        if (ret)
                goto err_free_range;
 
@@ -140,17 +152,13 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct 
drm_gem_object *obj,
                goto err_va_remove;
        }
 
-       mutex_lock(&vm->vm_lock);
        drm_gpuva_link(&vma->base, vm_bo);
-       mutex_unlock(&vm->vm_lock);
        GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
 
        return &vma->base;
 
 err_va_remove:
-       mutex_lock(&vm->vm_lock);
        drm_gpuva_remove(&vma->base);
-       mutex_unlock(&vm->vm_lock);
 err_free_range:
        if (vm->managed)
                drm_mm_remove_node(&vma->node);
@@ -159,6 +167,29 @@ msm_gem_vma_new(struct drm_gpuvm *_vm, struct 
drm_gem_object *obj,
        return ERR_PTR(ret);
 }
 
+/* Create a new vma and allocate an iova for it */
+struct drm_gpuva *
+msm_gem_vma_new(struct drm_gpuvm *_vm, struct drm_gem_object *obj,
+               u64 offset, u64 range_start, u64 range_end)
+{
+       struct msm_gem_vm *vm = to_msm_vm(_vm);
+       struct msm_gem_vma *vma;
+
+       /*
+        * Only used in legacy (kernel managed) VM, if userspace is managing
+        * the VM, the legacy paths should be disallowed:
+        */
+       GEM_WARN_ON(!vm->managed);
+
+       vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+
+       mutex_lock(&vm->vm_lock);
+       __vma_init(vma, _vm, obj, offset, range_start, range_end);
+       mutex_unlock(&vm->vm_lock);
+
+       return &vma->base;
+}
+
 static const struct drm_gpuvm_ops msm_gpuvm_ops = {
        .vm_free = msm_gem_vm_free,
 };
-- 
2.48.1

Reply via email to