Move the code dealing with entities entering and exiting run queues to helpers to logically separate it from jobs entering and exiting entities.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com> Cc: Christian König <christian.koe...@amd.com> Cc: Danilo Krummrich <d...@kernel.org> Cc: Matthew Brost <matthew.br...@intel.com> Cc: Philipp Stanner <pha...@kernel.org> --- drivers/gpu/drm/scheduler/sched_entity.c | 60 ++------------- drivers/gpu/drm/scheduler/sched_internal.h | 8 +- drivers/gpu/drm/scheduler/sched_main.c | 87 +++++++++++++++++++--- 3 files changed, 83 insertions(+), 72 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index bbb7f3d3e3e8..8362184fe431 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -469,19 +469,9 @@ drm_sched_job_dependency(struct drm_sched_job *job, return NULL; } -static ktime_t -drm_sched_rq_get_rr_deadline(struct drm_sched_rq *rq) -{ - lockdep_assert_held(&rq->lock); - - rq->rr_deadline = ktime_add_ns(rq->rr_deadline, 1); - - return rq->rr_deadline; -} - struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) { - struct drm_sched_job *sched_job, *next_job; + struct drm_sched_job *sched_job; sched_job = drm_sched_entity_queue_peek(entity); if (!sched_job) @@ -512,27 +502,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) spsc_queue_pop(&entity->job_queue); - /* - * Update the entity's location in the min heap according to - * the timestamp of the next job, if any. - */ - next_job = drm_sched_entity_queue_peek(entity); - if (next_job) { - struct drm_sched_rq *rq; - ktime_t ts; - - if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) - ts = next_job->submit_ts; - else - ts = drm_sched_rq_get_rr_deadline(rq); - - spin_lock(&entity->lock); - rq = entity->rq; - spin_lock(&rq->lock); - drm_sched_rq_update_fifo_locked(entity, rq, ts); - spin_unlock(&rq->lock); - spin_unlock(&entity->lock); - } + drm_sched_rq_pop_entity(entity); /* Jobs and entities might have different lifecycles. Since we're * removing the job from the entities queue, set the jobs entity pointer @@ -614,30 +584,10 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) /* first job wakes up scheduler */ if (first) { struct drm_gpu_scheduler *sched; - struct drm_sched_rq *rq; - /* Add the entity to the run queue */ - spin_lock(&entity->lock); - if (entity->stopped) { - spin_unlock(&entity->lock); - - DRM_ERROR("Trying to push to a killed entity\n"); - return; - } - - rq = entity->rq; - sched = rq->sched; - - spin_lock(&rq->lock); - drm_sched_rq_add_entity(rq, entity); - if (drm_sched_policy == DRM_SCHED_POLICY_RR) - submit_ts = drm_sched_rq_get_rr_deadline(rq); - drm_sched_rq_update_fifo_locked(entity, rq, submit_ts); - - spin_unlock(&rq->lock); - spin_unlock(&entity->lock); - - drm_sched_wakeup(sched); + sched = drm_sched_rq_add_entity(entity, submit_ts); + if (sched) + drm_sched_wakeup(sched); } } EXPORT_SYMBOL(drm_sched_entity_push_job); diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h index 599cf6e1bb74..8e7e477bace3 100644 --- a/drivers/gpu/drm/scheduler/sched_internal.h +++ b/drivers/gpu/drm/scheduler/sched_internal.h @@ -12,13 +12,11 @@ extern int drm_sched_policy; void drm_sched_wakeup(struct drm_gpu_scheduler *sched); -void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity); +struct drm_gpu_scheduler * +drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts); void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity); - -void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, - struct drm_sched_rq *rq, ktime_t ts); +void drm_sched_rq_pop_entity(struct drm_sched_entity *entity); void drm_sched_entity_select_rq(struct drm_sched_entity *entity); struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index e931a9b91083..8736c7cd3ddd 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -150,15 +150,18 @@ static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a, static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity, struct drm_sched_rq *rq) { + lockdep_assert_held(&entity->lock); + lockdep_assert_held(&rq->lock); + if (!RB_EMPTY_NODE(&entity->rb_tree_node)) { rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root); RB_CLEAR_NODE(&entity->rb_tree_node); } } -void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, - struct drm_sched_rq *rq, - ktime_t ts) +static void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + ktime_t ts) { /* * Both locks need to be grabbed, one to protect from entity->rq change @@ -193,25 +196,58 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched, rq->sched = sched; } +static ktime_t +drm_sched_rq_get_rr_deadline(struct drm_sched_rq *rq) +{ + lockdep_assert_held(&rq->lock); + + rq->rr_deadline = ktime_add_ns(rq->rr_deadline, 1); + + return rq->rr_deadline; +} + /** * drm_sched_rq_add_entity - add an entity * - * @rq: scheduler run queue * @entity: scheduler entity + * @ts: submission timestamp * * Adds a scheduler entity to the run queue. + * + * Returns a DRM scheduler pre-selected to handle this entity. */ -void drm_sched_rq_add_entity(struct drm_sched_rq *rq, - struct drm_sched_entity *entity) +struct drm_gpu_scheduler * +drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts) { - lockdep_assert_held(&entity->lock); - lockdep_assert_held(&rq->lock); + struct drm_gpu_scheduler *sched; + struct drm_sched_rq *rq; - if (!list_empty(&entity->list)) - return; + /* Add the entity to the run queue */ + spin_lock(&entity->lock); + if (entity->stopped) { + spin_unlock(&entity->lock); - atomic_inc(rq->sched->score); - list_add_tail(&entity->list, &rq->entities); + DRM_ERROR("Trying to push to a killed entity\n"); + return NULL; + } + + rq = entity->rq; + spin_lock(&rq->lock); + sched = rq->sched; + + if (list_empty(&entity->list)) { + atomic_inc(sched->score); + list_add_tail(&entity->list, &rq->entities); + } + + if (drm_sched_policy == DRM_SCHED_POLICY_RR) + ts = drm_sched_rq_get_rr_deadline(rq); + drm_sched_rq_update_fifo_locked(entity, rq, ts); + + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); + + return sched; } /** @@ -240,6 +276,33 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, spin_unlock(&rq->lock); } +void drm_sched_rq_pop_entity(struct drm_sched_entity *entity) +{ + struct drm_sched_job *next_job; + struct drm_sched_rq *rq; + ktime_t ts; + + /* + * Update the entity's location in the min heap according to + * the timestamp of the next job, if any. + */ + next_job = drm_sched_entity_queue_peek(entity); + if (!next_job) + return; + + if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) + ts = next_job->submit_ts; + else + ts = drm_sched_rq_get_rr_deadline(rq); + + spin_lock(&entity->lock); + rq = entity->rq; + spin_lock(&rq->lock); + drm_sched_rq_update_fifo_locked(entity, rq, ts); + spin_unlock(&rq->lock); + spin_unlock(&entity->lock); +} + /** * drm_sched_rq_select_entity - Select an entity which provides a job to run * -- 2.48.0