On Fri Dec 19, 2025 at 2:53 PM CET, Tvrtko Ursulin wrote:
> +/**
> + * drm_sched_entity_stats_alloc - Allocate a new struct 
> drm_sched_entity_stats object
> + *
> + * Return: Pointer to newly allocated struct drm_sched_entity_stats object.
> + */
> +static struct drm_sched_entity_stats *drm_sched_entity_stats_alloc(void)

NIT: This function allocates and initializes the structure, hence something
along the lines of *_new() is a better fit.

> +{
> +     struct drm_sched_entity_stats *stats;
> +
> +     stats = kzalloc(sizeof(*stats), GFP_KERNEL);
> +     if (!stats)
> +             return NULL;
> +
> +     kref_init(&stats->kref);
> +     spin_lock_init(&stats->lock);
> +
> +     return stats;
> +}

<snip>

> +/**
> + * struct drm_sched_entity_stats - execution stats for an entity.
> + * @kref: reference count for the object.
> + * @lock: lock guarding the @runtime updates.
> + * @runtime: time entity spent on the GPU.
> + *
> + * Because jobs and entities have decoupled lifetimes, ie. we cannot access 
> the

The beginning of this sentence seems slightly broken.

> + * entity once the job is completed and we know how much time it took on the
> + * GPU, we need to track these stats in a separate object which is then
> + * reference counted by both entities and jobs.
> + */
> +struct drm_sched_entity_stats {
> +     struct kref     kref;
> +     spinlock_t      lock;
> +     ktime_t         runtime;

We can avoid the lock entirely by using a atomic64_t instead. ktime_t is just a
typedef for s64.

> +};

<snip>

> +/**
> + * drm_sched_entity_stats_job_add_gpu_time - Account job execution time to 
> entity
> + * @job: Scheduler job to account.
> + *
> + * Accounts the execution time of @job to its respective entity stats object.
> + */
> +static inline void
> +drm_sched_entity_stats_job_add_gpu_time(struct drm_sched_job *job)
> +{
> +     struct drm_sched_entity_stats *stats = job->entity_stats;
> +     struct drm_sched_fence *s_fence = job->s_fence;
> +     ktime_t start, end;
> +
> +     start = dma_fence_timestamp(&s_fence->scheduled);
> +     end = dma_fence_timestamp(&s_fence->finished);
> +
> +     spin_lock(&stats->lock);
> +     stats->runtime = ktime_add(stats->runtime, ktime_sub(end, start));
> +     spin_unlock(&stats->lock);
> +}

This shouldn't be an inline function in the header, please move to
sched_entity.c.

> +
>  #endif
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
> b/drivers/gpu/drm/scheduler/sched_main.c
> index f825ad9e2260..4c10c7ba6704 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -660,6 +660,7 @@ void drm_sched_job_arm(struct drm_sched_job *job)
>  
>       job->sched = sched;
>       job->s_priority = entity->priority;
> +     job->entity_stats = drm_sched_entity_stats_get(entity->stats);
>  
>       drm_sched_fence_init(job->s_fence, job->entity);
>  }
> @@ -849,6 +850,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job)
>                * been called.
>                */
>               dma_fence_put(&job->s_fence->finished);
> +             drm_sched_entity_stats_put(job->entity_stats);
>       } else {
>               /* The job was aborted before it has been committed to be run;
>                * notably, drm_sched_job_arm() has not been called.
> @@ -1000,8 +1002,10 @@ static void drm_sched_free_job_work(struct work_struct 
> *w)
>               container_of(w, struct drm_gpu_scheduler, work_free_job);
>       struct drm_sched_job *job;
>  
> -     while ((job = drm_sched_get_finished_job(sched)))
> +     while ((job = drm_sched_get_finished_job(sched))) {
> +             drm_sched_entity_stats_job_add_gpu_time(job);

Is it really always OK to update this value in the free job work? What if a new
job gets scheduled concurrently. Doesn't this hurt accuracy, since the entity
value has not been updated yet?

>               sched->ops->free_job(job);
> +     }

Reply via email to