From: Rob Clark <robdcl...@chromium.org>

Pre-allocate the VMA objects that we will need in the vm bind job.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_gem.h        |  9 +++++
 drivers/gpu/drm/msm/msm_gem_submit.c |  5 +++
 drivers/gpu/drm/msm/msm_gem_vma.c    | 60 ++++++++++++++++++++++++++++
 3 files changed, 74 insertions(+)

diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 1622d557ea1f..cb76959fa8a8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -115,6 +115,9 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu 
*mmu, const char *name,
 
 void msm_gem_vm_close(struct drm_gpuvm *gpuvm);
 
+void msm_vma_job_prepare(struct msm_gem_submit *submit);
+void msm_vma_job_cleanup(struct msm_gem_submit *submit);
+
 struct msm_fence_context;
 
 /**
@@ -339,6 +342,12 @@ struct msm_gem_submit {
 
        int fence_id;       /* key into queue->fence_idr */
        struct msm_gpu_submitqueue *queue;
+
+       /* List of pre-allocated msm_gem_vma's, used to avoid memory allocation
+        * in fence signalling path.
+        */
+       struct list_head preallocated_vmas;
+
        struct pid *pid;    /* submitting process */
        bool bos_pinned : 1;
        bool fault_dumped:1;/* Limit devcoredump dumping to one per submit */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
b/drivers/gpu/drm/msm/msm_gem_submit.c
index 39a6e0418bdf..a9b3e6692db3 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -80,6 +80,7 @@ static struct msm_gem_submit *submit_create(struct drm_device 
*dev,
        submit->ident = atomic_inc_return(&ident) - 1;
 
        INIT_LIST_HEAD(&submit->node);
+       INIT_LIST_HEAD(&submit->preallocated_vmas);
 
        return submit;
 }
@@ -584,6 +585,9 @@ void msm_submit_retire(struct msm_gem_submit *submit)
 {
        int i;
 
+       if (submit_is_vmbind(submit))
+               msm_vma_job_cleanup(submit);
+
        for (i = 0; i < submit->nr_bos; i++) {
                struct drm_gem_object *obj = submit->bos[i].obj;
 
@@ -912,6 +916,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        }
 
        if (submit_is_vmbind(submit)) {
+               msm_vma_job_prepare(submit);
                ret = submit_get_pages(submit);
        } else {
                ret = submit_pin_vmas(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c 
b/drivers/gpu/drm/msm/msm_gem_vma.c
index 7d40b151aa95..5c7d44b004fb 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -258,6 +258,66 @@ static const struct drm_sched_backend_ops msm_vm_bind_ops 
= {
        .free_job = msm_vma_job_free
 };
 
+/**
+ * msm_vma_job_prepare() - VM_BIND job setup
+ * @submit: the VM_BIND job
+ *
+ * Prepare for a VM_BIND job by pre-allocating various memory that will
+ * be required once the job runs.  Memory allocations cannot happen in
+ * the fence signalling path (ie. from job->run()) as that could recurse
+ * into the shrinker and potentially block waiting on the fence that is
+ * signalled when this job completes (ie. deadlock).
+ *
+ * Called after BOs are locked.
+ */
+void
+msm_vma_job_prepare(struct msm_gem_submit *submit)
+{
+       unsigned num_prealloc_vmas = 0;
+
+       for (int i = 0; i < submit->nr_bos; i++) {
+               unsigned op = submit->bos[i].flags & MSM_SUBMIT_BO_OP_MASK;
+
+               if (submit->bos[i].obj)
+                       msm_gem_assert_locked(submit->bos[i].obj);
+
+               /*
+                * OP_MAP/OP_MAP_NULL has one new VMA for the new mapping,
+                * and potentially remaps with a prev and next VMA, for a
+                * total of 3 new VMAs.
+                *
+                * OP_UNMAP could trigger a remap with either a prev or
+                * next VMA, but not both.
+                */
+               num_prealloc_vmas += (op == MSM_SUBMIT_BO_OP_UNMAP) ? 1 : 3;
+       }
+
+       while (num_prealloc_vmas-- > 0) {
+               struct msm_gem_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+               list_add_tail(&vma->base.rb.entry, &submit->preallocated_vmas);
+       }
+}
+
+/**
+ * msm_vma_job_cleanup() - cleanup after a VM_BIND job
+ * @submit: the VM_BIND job
+ *
+ * The counterpoint to msm_vma_job_prepare().
+ */
+void
+msm_vma_job_cleanup(struct msm_gem_submit *submit)
+{
+       struct drm_gpuva *vma;
+
+       while (!list_empty(&submit->preallocated_vmas)) {
+               vma = list_first_entry(&submit->preallocated_vmas,
+                                      struct drm_gpuva,
+                                      rb.entry);
+               list_del(&vma->rb.entry);
+               kfree(to_msm_vma(vma));
+       }
+}
+
 /**
  * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
  * @drm: the drm device
-- 
2.48.1

Reply via email to