Now that drm sched uses a single lockdep map for all submit_wq, drop the
GuC submit_wq pool hack.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/xe/xe_guc_submit.c | 60 +-----------------------------
 drivers/gpu/drm/xe/xe_guc_types.h  |  7 ----
 2 files changed, 1 insertion(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c 
b/drivers/gpu/drm/xe/xe_guc_submit.c
index 460808507947..882cef3a10dc 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -224,64 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct 
xe_exec_queue *q)
                 EXEC_QUEUE_STATE_BANNED));
 }
 
-#ifdef CONFIG_PROVE_LOCKING
-static int alloc_submit_wq(struct xe_guc *guc)
-{
-       int i;
-
-       for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
-               guc->submission_state.submit_wq_pool[i] =
-                       alloc_ordered_workqueue("submit_wq", 0);
-               if (!guc->submission_state.submit_wq_pool[i])
-                       goto err_free;
-       }
-
-       return 0;
-
-err_free:
-       while (i)
-               destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
-
-       return -ENOMEM;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-       int i;
-
-       for (i = 0; i < NUM_SUBMIT_WQ; ++i)
-               destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
-       int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
-
-       return guc->submission_state.submit_wq_pool[idx];
-}
-#else
-static int alloc_submit_wq(struct xe_guc *guc)
-{
-       return 0;
-}
-
-static void free_submit_wq(struct xe_guc *guc)
-{
-
-}
-
-static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
-{
-       return NULL;
-}
-#endif
-
 static void guc_submit_fini(struct drm_device *drm, void *arg)
 {
        struct xe_guc *guc = arg;
 
        xa_destroy(&guc->submission_state.exec_queue_lookup);
-       free_submit_wq(guc);
 }
 
 static void guc_submit_wedged_fini(struct drm_device *drm, void *arg)
@@ -337,10 +284,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int 
num_ids)
        if (err)
                return err;
 
-       err = alloc_submit_wq(guc);
-       if (err)
-               return err;
-
        gt->exec_queue_ops = &guc_exec_queue_ops;
 
        xa_init(&guc->submission_state.exec_queue_lookup);
@@ -1445,8 +1388,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
        timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
                  msecs_to_jiffies(q->sched_props.job_timeout_ms);
        err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
-                           get_submit_wq(guc),
-                           q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+                           NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
                            timeout, guc_to_gt(guc)->ordered_wq, NULL,
                            q->name, gt_to_xe(q->gt)->drm.dev);
        if (err)
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h 
b/drivers/gpu/drm/xe/xe_guc_types.h
index 546ac6350a31..585f5c274f09 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -72,13 +72,6 @@ struct xe_guc {
                atomic_t stopped;
                /** @submission_state.lock: protects submission state */
                struct mutex lock;
-#ifdef CONFIG_PROVE_LOCKING
-#define NUM_SUBMIT_WQ  256
-               /** @submission_state.submit_wq_pool: submission ordered 
workqueues pool */
-               struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
-               /** @submission_state.submit_wq_idx: submission ordered 
workqueue index */
-               int submit_wq_idx;
-#endif
                /** @submission_state.enabled: submission is enabled */
                bool enabled;
        } submission_state;
-- 
2.34.1

Reply via email to