On 02-10-2024 18:46, Matthew Brost wrote:
Now that drm sched uses a single lockdep map for all submit_wq, drop the
GuC submit_wq pool hack.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
  drivers/gpu/drm/xe/xe_guc_submit.c | 60 +-----------------------------
  drivers/gpu/drm/xe/xe_guc_types.h  |  7 ----
  2 files changed, 1 insertion(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c 
b/drivers/gpu/drm/xe/xe_guc_submit.c
index 80062e1d3f66..ce251845d59a 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -224,58 +224,6 @@ static bool exec_queue_killed_or_banned_or_wedged(struct 
xe_exec_queue *q)
                 EXEC_QUEUE_STATE_BANNED));
  }
-#ifdef CONFIG_PROVE_LOCKING
-static int alloc_submit_wq(struct xe_guc *guc)
-{
-       int i;
-
-       for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
-               guc->submission_state.submit_wq_pool[i] =
-                       alloc_ordered_workqueue("submit_wq", 0);
-               if (!guc->submission_state.submit_wq_pool[i])
-                       goto err_free;
-       }
-
-       return 0;
-
-err_free:
-       while (i)
-               destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
-
-       return -ENOMEM;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-       int i;
-
-       for (i = 0; i < NUM_SUBMIT_WQ; ++i)
-               destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
-       int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
-
-       return guc->submission_state.submit_wq_pool[idx];
-}
-#else
-static int alloc_submit_wq(struct xe_guc *guc)
-{
-       return 0;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
-       return NULL;
-}
-#endif
-
  static void xe_guc_submit_fini(struct xe_guc *guc)
  {
        struct xe_device *xe = guc_to_xe(guc);
@@ -297,7 +245,6 @@ static void guc_submit_fini(struct drm_device *drm, void 
*arg)
xe_guc_submit_fini(guc);
        xa_destroy(&guc->submission_state.exec_queue_lookup);
-       free_submit_wq(guc);
  }
static void guc_submit_wedged_fini(void *arg)
@@ -359,10 +306,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int 
num_ids)
        if (err)
                return err;
- err = alloc_submit_wq(guc);
-       if (err)
-               return err;
-
        gt->exec_queue_ops = &guc_exec_queue_ops;
xa_init(&guc->submission_state.exec_queue_lookup);
@@ -1482,8 +1425,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
        timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
                  msecs_to_jiffies(q->sched_props.job_timeout_ms);
        err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
-                           get_submit_wq(guc),
-                           q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+                           NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
                            timeout, guc_to_gt(guc)->ordered_wq, NULL,
                            q->name, gt_to_xe(q->gt)->drm.dev);
        if (err)
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h 
b/drivers/gpu/drm/xe/xe_guc_types.h
index 69046f698271..ed150fc09ad0 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -72,13 +72,6 @@ struct xe_guc {
                atomic_t stopped;
                /** @submission_state.lock: protects submission state */
                struct mutex lock;
-#ifdef CONFIG_PROVE_LOCKING
-#define NUM_SUBMIT_WQ  256
-               /** @submission_state.submit_wq_pool: submission ordered 
workqueues pool */
-               struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
-               /** @submission_state.submit_wq_idx: submission ordered 
workqueue index */
-               int submit_wq_idx;
-#endif

LGTM
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimi...@intel.com>

                /** @submission_state.enabled: submission is enabled */
                bool enabled;
                /** @submission_state.fini_wq: submit fini wait queue */

Reply via email to