struct nouveau_sched contains a waitque needed to prevent
drm_sched_fini() from being called while there are still jobs pending.
Doing so so far would have caused memory leaks.

With the new memleak-free mode of operation switched on in
drm_sched_fini() by providing the callback
nouveau_sched_fence_context_kill() the waitque is not necessary anymore.

Remove the waitque.

Signed-off-by: Philipp Stanner <pha...@kernel.org>
---
 drivers/gpu/drm/nouveau/nouveau_sched.c | 20 +++++++-------------
 drivers/gpu/drm/nouveau/nouveau_sched.h |  9 +++------
 drivers/gpu/drm/nouveau/nouveau_uvmm.c  |  8 ++++----
 3 files changed, 14 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c 
b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 3659ac78bb3e..d9ac76198616 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -121,11 +121,9 @@ nouveau_job_done(struct nouveau_job *job)
 {
        struct nouveau_sched *sched = job->sched;
 
-       spin_lock(&sched->job.list.lock);
+       spin_lock(&sched->job_list.lock);
        list_del(&job->entry);
-       spin_unlock(&sched->job.list.lock);
-
-       wake_up(&sched->job.wq);
+       spin_unlock(&sched->job_list.lock);
 }
 
 void
@@ -306,9 +304,9 @@ nouveau_job_submit(struct nouveau_job *job)
        }
 
        /* Submit was successful; add the job to the schedulers job list. */
-       spin_lock(&sched->job.list.lock);
-       list_add(&job->entry, &sched->job.list.head);
-       spin_unlock(&sched->job.list.lock);
+       spin_lock(&sched->job_list.lock);
+       list_add(&job->entry, &sched->job_list.head);
+       spin_unlock(&sched->job_list.lock);
 
        drm_sched_job_arm(&job->base);
        job->done_fence = dma_fence_get(&job->base.s_fence->finished);
@@ -458,9 +456,8 @@ nouveau_sched_init(struct nouveau_sched *sched, struct 
nouveau_drm *drm,
                goto fail_sched;
 
        mutex_init(&sched->mutex);
-       spin_lock_init(&sched->job.list.lock);
-       INIT_LIST_HEAD(&sched->job.list.head);
-       init_waitqueue_head(&sched->job.wq);
+       spin_lock_init(&sched->job_list.lock);
+       INIT_LIST_HEAD(&sched->job_list.head);
 
        return 0;
 
@@ -503,9 +500,6 @@ nouveau_sched_fini(struct nouveau_sched *sched)
        struct drm_gpu_scheduler *drm_sched = &sched->base;
        struct drm_sched_entity *entity = &sched->entity;
 
-       rmb(); /* for list_empty to work without lock */
-       wait_event(sched->job.wq, list_empty(&sched->job.list.head));
-
        drm_sched_entity_fini(entity);
        drm_sched_fini(drm_sched);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h 
b/drivers/gpu/drm/nouveau/nouveau_sched.h
index e6e2016a3569..339a14563fbb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.h
@@ -105,12 +105,9 @@ struct nouveau_sched {
        struct nouveau_channel *chan;
 
        struct {
-               struct {
-                       struct list_head head;
-                       spinlock_t lock;
-               } list;
-               struct wait_queue_head wq;
-       } job;
+               struct list_head head;
+               spinlock_t lock;
+       } job_list;
 };
 
 int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm 
*drm,
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c 
b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..ddfc46bc1b3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1019,8 +1019,8 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 
addr, u64 range)
        u64 end = addr + range;
 
 again:
-       spin_lock(&sched->job.list.lock);
-       list_for_each_entry(__job, &sched->job.list.head, entry) {
+       spin_lock(&sched->job_list.lock);
+       list_for_each_entry(__job, &sched->job_list.head, entry) {
                struct nouveau_uvmm_bind_job *bind_job = 
to_uvmm_bind_job(__job);
 
                list_for_each_op(op, &bind_job->ops) {
@@ -1030,7 +1030,7 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 
addr, u64 range)
 
                                if (!(end <= op_addr || addr >= op_end)) {
                                        nouveau_uvmm_bind_job_get(bind_job);
-                                       spin_unlock(&sched->job.list.lock);
+                                       spin_unlock(&sched->job_list.lock);
                                        
wait_for_completion(&bind_job->complete);
                                        nouveau_uvmm_bind_job_put(bind_job);
                                        goto again;
@@ -1038,7 +1038,7 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 
addr, u64 range)
                        }
                }
        }
-       spin_unlock(&sched->job.list.lock);
+       spin_unlock(&sched->job_list.lock);
 }
 
 static int
-- 
2.48.1

Reply via email to