Rq->lock only protects the tree walk so lets move the rest out.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>
Cc: Christian König <christian.koe...@amd.com>
Cc: Danilo Krummrich <d...@redhat.com>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Philipp Stanner <pstan...@redhat.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 31 ++++++++++++++------------
 1 file changed, 17 insertions(+), 14 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index f07b19c97d0f..9a3d31c33fa9 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -325,29 +325,32 @@ static struct drm_sched_entity *
 drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
                                struct drm_sched_rq *rq)
 {
+       struct drm_sched_entity *entity = NULL;
        struct rb_node *rb;
 
        spin_lock(&rq->lock);
        for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
-               struct drm_sched_entity *entity;
-
                entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
-               if (drm_sched_entity_is_ready(entity)) {
-                       /* If we can't queue yet, preserve the current entity in
-                        * terms of fairness.
-                        */
-                       if (!drm_sched_can_queue(sched, entity)) {
-                               spin_unlock(&rq->lock);
-                               return ERR_PTR(-ENOSPC);
-                       }
-
-                       reinit_completion(&entity->entity_idle);
+               if (drm_sched_entity_is_ready(entity))
                        break;
-               }
+               else
+                       entity = NULL;
        }
        spin_unlock(&rq->lock);
 
-       return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
+       if (!entity)
+               return NULL;
+
+       /*
+        * If scheduler cannot take more jobs signal the caller to not consider
+        * lower priority queues.
+        */
+       if (!drm_sched_can_queue(sched, entity))
+               return ERR_PTR(-ENOSPC);
+
+       reinit_completion(&entity->entity_idle);
+
+       return entity;
 }
 
 /**
-- 
2.47.1

Reply via email to