[PATCH] drm/scheduler: correct comments relate to scheduler
function drm_sched_entity_push_job doesn't have return value, remove the return value description for it. Correct several other typo errors. Signed-off-by: Shuicheng Lin --- drivers/gpu/drm/scheduler/sched_entity.c | 8 +++- drivers/gpu/drm/scheduler/sched_main.c | 4 ++-- include/drm/gpu_scheduler.h | 12 ++-- include/linux/dma-resv.h | 4 ++-- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 58c8161289fe..4d6a05fc35ca 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -51,7 +51,7 @@ * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * - * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. @@ -370,7 +370,7 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, } /* - * drm_sched_entity_clear_dep - callback to clear the entities dependency and + * drm_sched_entity_wakeup - callback to clear the entities dependency and * wake up scheduler */ static void drm_sched_entity_wakeup(struct dma_fence *f, @@ -389,7 +389,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, * @entity: scheduler entity * @priority: scheduler priority * - * Update the priority of runqueus used for the entity. + * Update the priority of runqueues used for the entity. */ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) @@ -574,8 +574,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init(). - * - * Returns 0 for success, negative error code otherwise. */ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f093616fe53c..6e8c7651bd95 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,7 +41,7 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on *the hardware. * - * The jobs in a entity are always scheduled in the order that they were pushed. + * The jobs in an entity are always scheduled in the order that they were pushed. * * Note that once a job was taken from the entities queue and pushed to the * hardware, i.e. the pending queue, the entity must not be referenced anymore @@ -1340,7 +1340,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) list_for_each_entry(s_entity, &rq->entities, list) /* * Prevents reinsertion and marks job_queue as idle, -* it will removed from rq in drm_sched_entity_fini +* it will be removed from rq in drm_sched_entity_fini * eventually */ s_entity->stopped = true; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index a8d19b10f9b8..9e1b12ca84b9 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,11 +33,11 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) /** - * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining * * Setting this flag on a scheduler fence prevents pipelining of jobs depending * on this fence. In other words we always insert a full CPU round trip before - * dependen jobs are pushed to the hw queue. + * dependent jobs are pushed to the hw queue. */ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS @@ -71,7 +71,7 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_COUNT }; -/* Used to chose between FIFO and RR jobs scheduling */ +/* Used to choose between FIFO and RR jobs scheduling */ extern int drm_sched_policy; #define DRM_SCHED_POLICY_RR0 @@ -198,7 +198,7 @@ struct drm_sched_entity { * * Points to the finished fence of the last scheduled job. Only written * by the scheduler thread, can be accessed locklessly from -* drm_sched_job_arm() iff the queue is empty. +* drm_sched_job_arm() if the queue is empty. */ struct dma_fence __rcu *last_scheduled; @@ -247,7 +247,7 @@ struct drm_sched_entity { * @sched: the scheduler to which this rq belongs to. * @entities: list of the entities to be scheduled. * @current_entity: the
[PATCH v2] drm/scheduler: Improve documentation
Function drm_sched_entity_push_job() doesn't have return value, remove the return value description for it. Correct several other typo errors. v2 (Philipp): - more correction with related comments. Signed-off-by: Shuicheng Lin Cc: Philipp Stanner --- drivers/gpu/drm/scheduler/sched_entity.c | 10 -- drivers/gpu/drm/scheduler/sched_main.c | 4 ++-- include/drm/gpu_scheduler.h | 12 ++-- include/linux/dma-resv.h | 6 +++--- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 58c8161289fe..ffa3e765f5db 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -51,7 +51,7 @@ * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * - * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. @@ -370,8 +370,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, } /* - * drm_sched_entity_clear_dep - callback to clear the entities dependency and - * wake up scheduler + * drm_sched_entity_wakeup - callback to clear the entity's dependency and + * wake up the scheduler */ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) @@ -389,7 +389,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, * @entity: scheduler entity * @priority: scheduler priority * - * Update the priority of runqueus used for the entity. + * Update the priority of runqueues used for the entity. */ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) @@ -574,8 +574,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init(). - * - * Returns 0 for success, negative error code otherwise. */ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index ab53ab486fe6..cadf1662bc01 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,7 +41,7 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on *the hardware. * - * The jobs in a entity are always scheduled in the order that they were pushed. + * The jobs in an entity are always scheduled in the order in which they were pushed. * * Note that once a job was taken from the entities queue and pushed to the * hardware, i.e. the pending queue, the entity must not be referenced anymore @@ -1339,7 +1339,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) list_for_each_entry(s_entity, &rq->entities, list) /* * Prevents reinsertion and marks job_queue as idle, -* it will removed from rq in drm_sched_entity_fini +* it will be removed from the rq in drm_sched_entity_fini() * eventually */ s_entity->stopped = true; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index fe8edb917360..ef23113451e4 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,11 +33,11 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) /** - * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining * * Setting this flag on a scheduler fence prevents pipelining of jobs depending * on this fence. In other words we always insert a full CPU round trip before - * dependen jobs are pushed to the hw queue. + * dependent jobs are pushed to the hw queue. */ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS @@ -71,7 +71,7 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_COUNT }; -/* Used to chose between FIFO and RR jobs scheduling */ +/* Used to choose between FIFO and RR job-scheduling */ extern int drm_sched_policy; #define DRM_SCHED_POLICY_RR0 @@ -198,7 +198,7 @@ struct drm_sched_entity { * * Points to the finished fence of the last scheduled job. Only written * by the scheduler thread, can be accessed locklessly from -* drm_sched_job_arm() iff the queue is empty. +* drm_sched_job_arm() if the queue is empty. */ struct dma_fence __rcu *last_scheduled
[PATCH] drm/xe: Enlarge the invalidation timeout from 150 to 500
There is error message like below during stress test. "[ 31.004009] xe :03:00.0: [drm] ERROR GT0: Global invalidation timeout" And change the timeout value from 150 to 500, could help avoid this error message in the stress test. xe_mmio_wait32() is implemented as wait 10us at beginning, then double the wait value as next wait until the timeout value is reached. So for the normal case, the real wait time is not changed. The larger timeout value should take effect for the bad case only. Signed-off-by: Shuicheng Lin --- drivers/gpu/drm/xe/xe_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index cd241a8e1838..60aebf7561ec 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -925,7 +925,7 @@ void xe_device_l2_flush(struct xe_device *xe) spin_lock(>->global_invl_lock); xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); - if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true)) + if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); spin_unlock(>->global_invl_lock); -- 2.25.1
[PATCH v3] drm/scheduler: Improve documentation
Function drm_sched_entity_push_job() doesn't have a return value, remove the return value description for it. Correct several other typo errors. v2 (Philipp): - more correction with related comments. Signed-off-by: Shuicheng Lin Reviewed-by: Philipp Stanner --- drivers/gpu/drm/scheduler/sched_entity.c | 10 -- drivers/gpu/drm/scheduler/sched_main.c | 4 ++-- include/drm/gpu_scheduler.h | 12 ++-- include/linux/dma-resv.h | 6 +++--- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 58c8161289fe..ffa3e765f5db 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -51,7 +51,7 @@ * drm_sched_entity_set_priority(). For changing the set of schedulers * @sched_list at runtime see drm_sched_entity_modify_sched(). * - * An entity is cleaned up by callind drm_sched_entity_fini(). See also + * An entity is cleaned up by calling drm_sched_entity_fini(). See also * drm_sched_entity_destroy(). * * Returns 0 on success or a negative error code on failure. @@ -370,8 +370,8 @@ static void drm_sched_entity_clear_dep(struct dma_fence *f, } /* - * drm_sched_entity_clear_dep - callback to clear the entities dependency and - * wake up scheduler + * drm_sched_entity_wakeup - callback to clear the entity's dependency and + * wake up the scheduler */ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) @@ -389,7 +389,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, * @entity: scheduler entity * @priority: scheduler priority * - * Update the priority of runqueus used for the entity. + * Update the priority of runqueues used for the entity. */ void drm_sched_entity_set_priority(struct drm_sched_entity *entity, enum drm_sched_priority priority) @@ -574,8 +574,6 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity) * fence sequence number this function should be called with drm_sched_job_arm() * under common lock for the struct drm_sched_entity that was set up for * @sched_job in drm_sched_job_init(). - * - * Returns 0 for success, negative error code otherwise. */ void drm_sched_entity_push_job(struct drm_sched_job *sched_job) { diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index ab53ab486fe6..cadf1662bc01 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -41,7 +41,7 @@ * 4. Entities themselves maintain a queue of jobs that will be scheduled on *the hardware. * - * The jobs in a entity are always scheduled in the order that they were pushed. + * The jobs in an entity are always scheduled in the order in which they were pushed. * * Note that once a job was taken from the entities queue and pushed to the * hardware, i.e. the pending queue, the entity must not be referenced anymore @@ -1339,7 +1339,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) list_for_each_entry(s_entity, &rq->entities, list) /* * Prevents reinsertion and marks job_queue as idle, -* it will removed from rq in drm_sched_entity_fini +* it will be removed from the rq in drm_sched_entity_fini() * eventually */ s_entity->stopped = true; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index fe8edb917360..ef23113451e4 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,11 +33,11 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) /** - * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining * * Setting this flag on a scheduler fence prevents pipelining of jobs depending * on this fence. In other words we always insert a full CPU round trip before - * dependen jobs are pushed to the hw queue. + * dependent jobs are pushed to the hw queue. */ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS @@ -71,7 +71,7 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_COUNT }; -/* Used to chose between FIFO and RR jobs scheduling */ +/* Used to choose between FIFO and RR job-scheduling */ extern int drm_sched_policy; #define DRM_SCHED_POLICY_RR0 @@ -198,7 +198,7 @@ struct drm_sched_entity { * * Points to the finished fence of the last scheduled job. Only written * by the scheduler thread, can be accessed locklessly from -* drm_sched_job_arm() iff the queue is empty. +* drm_sched_job_arm() if the queue is empty. */ struct dma_fence __rcu