If the new fair policy is at least as good as FIFO and we can afford to
remove round-robin, we can simplify the scheduler code by making the
scheduler to run queue relationship always 1:1 and remove some code.

Also, now that the FIFO policy is gone the tree of entities is not a FIFO
tree any more so rename it to just the tree.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursu...@igalia.com>
Cc: Christian König <christian.koe...@amd.com>
Cc: Danilo Krummrich <d...@kernel.org>
Cc: Matthew Brost <matthew.br...@intel.com>
Cc: Philipp Stanner <pha...@kernel.org>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c    |  23 ++--
 drivers/gpu/drm/scheduler/sched_entity.c   |  29 +----
 drivers/gpu/drm/scheduler/sched_internal.h |   9 +-
 drivers/gpu/drm/scheduler/sched_main.c     | 133 +++++----------------
 drivers/gpu/drm/scheduler/sched_rq.c       |  53 +++-----
 include/drm/gpu_scheduler.h                |  13 +-
 6 files changed, 62 insertions(+), 198 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index acb21fc8b3ce..9440af58073b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -459,25 +459,22 @@ drm_sched_entity_queue_pop(struct drm_sched_entity 
*entity)
 
 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 {
+       struct drm_sched_rq *rq = sched->rq;
+       struct drm_sched_entity *s_entity;
        struct drm_sched_job *s_job;
-       struct drm_sched_entity *s_entity = NULL;
-       int i;
 
        /* Signal all jobs not yet scheduled */
-       for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
-               struct drm_sched_rq *rq = sched->sched_rq[i];
-               spin_lock(&rq->lock);
-               list_for_each_entry(s_entity, &rq->entities, list) {
-                       while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
-                               struct drm_sched_fence *s_fence = 
s_job->s_fence;
+       spin_lock(&rq->lock);
+       list_for_each_entry(s_entity, &rq->entities, list) {
+               while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
+                       struct drm_sched_fence *s_fence = s_job->s_fence;
 
-                               dma_fence_signal(&s_fence->scheduled);
-                               dma_fence_set_error(&s_fence->finished, 
-EHWPOISON);
-                               dma_fence_signal(&s_fence->finished);
-                       }
+                       dma_fence_signal(&s_fence->scheduled);
+                       dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+                       dma_fence_signal(&s_fence->finished);
                }
-               spin_unlock(&rq->lock);
        }
+       spin_unlock(&rq->lock);
 
        /* Signal all jobs already scheduled to HW */
        list_for_each_entry(s_job, &sched->pending_list, list) {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c 
b/drivers/gpu/drm/scheduler/sched_entity.c
index 4481d5645138..d149df2a2050 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -98,8 +98,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
        entity->guilty = guilty;
        entity->num_sched_list = num_sched_list;
        entity->priority = priority;
-       entity->rq_priority = drm_sched_policy == DRM_SCHED_POLICY_FAIR ?
-                             DRM_SCHED_PRIORITY_KERNEL : priority;
        /*
         * It's perfectly valid to initialize an entity without having a valid
         * scheduler attached. It's just not valid to use the scheduler before 
it
@@ -109,30 +107,14 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
        RCU_INIT_POINTER(entity->last_scheduled, NULL);
        RB_CLEAR_NODE(&entity->rb_tree_node);
 
-       if (num_sched_list && !sched_list[0]->sched_rq) {
+       if (num_sched_list && !sched_list[0]->rq) {
                /* Since every entry covered by num_sched_list
                 * should be non-NULL and therefore we warn drivers
                 * not to do this and to fix their DRM calling order.
                 */
                pr_warn("%s: called with uninitialized scheduler\n", __func__);
        } else if (num_sched_list) {
-               enum drm_sched_priority p = entity->priority;
-
-               /*
-                * The "priority" of an entity cannot exceed the number of
-                * run-queues of a scheduler. Protect against num_rqs being 0,
-                * by converting to signed. Choose the lowest priority
-                * available.
-                */
-               if (p >= sched_list[0]->num_user_rqs) {
-                       dev_err(sched_list[0]->dev, "entity with out-of-bounds 
priority:%u num_user_rqs:%u\n",
-                               p, sched_list[0]->num_user_rqs);
-                       p = max_t(s32,
-                                (s32)sched_list[0]->num_user_rqs - 1,
-                                (s32)DRM_SCHED_PRIORITY_KERNEL);
-                       entity->priority = p;
-               }
-               entity->rq = sched_list[0]->sched_rq[entity->rq_priority];
+               entity->rq = sched_list[0]->rq;
        }
 
        init_completion(&entity->entity_idle);
@@ -580,7 +562,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity 
*entity)
 
        spin_lock(&entity->lock);
        sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
-       rq = sched ? sched->sched_rq[entity->rq_priority] : NULL;
+       rq = sched ? sched->rq : NULL;
        if (rq != entity->rq) {
                drm_sched_rq_remove_entity(entity->rq, entity);
                entity->rq = rq;
@@ -604,7 +586,6 @@ void drm_sched_entity_push_job(struct drm_sched_job 
*sched_job)
 {
        struct drm_sched_entity *entity = sched_job->entity;
        bool first;
-       ktime_t submit_ts;
 
        trace_drm_sched_job(sched_job, entity);
        atomic_inc(entity->rq->sched->score);
@@ -613,16 +594,14 @@ void drm_sched_entity_push_job(struct drm_sched_job 
*sched_job)
        /*
         * After the sched_job is pushed into the entity queue, it may be
         * completed and freed up at any time. We can no longer access it.
-        * Make sure to set the submit_ts first, to avoid a race.
         */
-       sched_job->submit_ts = submit_ts = ktime_get();
        first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
        /* first job wakes up scheduler */
        if (first) {
                struct drm_gpu_scheduler *sched;
 
-               sched = drm_sched_rq_add_entity(entity, submit_ts);
+               sched = drm_sched_rq_add_entity(entity);
                if (sched)
                        drm_sched_wakeup(sched);
        }
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h 
b/drivers/gpu/drm/scheduler/sched_internal.h
index b675941b58a0..2d55f265a092 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -15,13 +15,6 @@ struct drm_sched_entity_stats {
        u64             vruntime;
 };
 
-/* Used to choose between FIFO and RR job-scheduling */
-extern int drm_sched_policy;
-
-#define DRM_SCHED_POLICY_RR    0
-#define DRM_SCHED_POLICY_FIFO  1
-#define DRM_SCHED_POLICY_FAIR  2
-
 bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
                         struct drm_sched_entity *entity);
 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
@@ -32,7 +25,7 @@ struct drm_sched_entity *
 drm_sched_rq_select_entity(struct drm_gpu_scheduler *sched,
                           struct drm_sched_rq *rq);
 struct drm_gpu_scheduler *
-drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts);
+drm_sched_rq_add_entity(struct drm_sched_entity *entity);
 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
                                struct drm_sched_entity *entity);
 void drm_sched_rq_pop_entity(struct drm_sched_entity *entity);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c 
b/drivers/gpu/drm/scheduler/sched_main.c
index d63c10c19f21..a2be0a097e75 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -83,15 +83,6 @@
 #define CREATE_TRACE_POINTS
 #include "gpu_scheduler_trace.h"
 
-int drm_sched_policy = DRM_SCHED_POLICY_FAIR;
-
-/**
- * DOC: sched_policy (int)
- * Used to override default entities scheduling policy in a run queue.
- */
-MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on 
a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " 
__stringify(DRM_SCHED_POLICY_FIFO) " = FIFO, " 
__stringify(DRM_SCHED_POLICY_FAIR) " = Fair (default).");
-module_param_named(sched_policy, drm_sched_policy, int, 0444);
-
 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
 {
        u32 credits;
@@ -835,34 +826,6 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
        drm_sched_run_job_queue(sched);
 }
 
-/**
- * drm_sched_select_entity - Select next entity to process
- *
- * @sched: scheduler instance
- *
- * Return an entity to process or NULL if none are found.
- *
- * Note, that we break out of the for-loop when "entity" is non-null, which can
- * also be an error-pointer--this assures we don't process lower priority
- * run-queues. See comments in the respectively called functions.
- */
-static struct drm_sched_entity *
-drm_sched_select_entity(struct drm_gpu_scheduler *sched)
-{
-       struct drm_sched_entity *entity = NULL;
-       int i;
-
-       /* Start with the highest priority.
-        */
-       for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
-               entity = drm_sched_rq_select_entity(sched, sched->sched_rq[i]);
-               if (entity)
-                       break;
-       }
-
-       return IS_ERR(entity) ? NULL : entity;
-}
-
 /**
  * drm_sched_get_finished_job - fetch the next finished job to be destroyed
  *
@@ -979,8 +942,8 @@ static void drm_sched_run_job_work(struct work_struct *w)
        int r;
 
        /* Find entity with a ready job */
-       entity = drm_sched_select_entity(sched);
-       if (!entity)
+       entity = drm_sched_rq_select_entity(sched, sched->rq);
+       if (IS_ERR_OR_NULL(entity))
                return; /* No more work */
 
        sched_job = drm_sched_entity_pop_job(entity);
@@ -1045,8 +1008,6 @@ static struct workqueue_struct *drm_sched_alloc_wq(const 
char *name)
  */
 int drm_sched_init(struct drm_gpu_scheduler *sched, const struct 
drm_sched_init_args *args)
 {
-       int i;
-
        sched->ops = args->ops;
        sched->credit_limit = args->credit_limit;
        sched->name = args->name;
@@ -1056,13 +1017,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, 
const struct drm_sched_init_
        sched->score = args->score ? args->score : &sched->_score;
        sched->dev = args->dev;
 
-       if (args->num_rqs > DRM_SCHED_PRIORITY_COUNT) {
-               /* This is a gross violation--tell drivers what the  problem is.
-                */
-               dev_err(sched->dev, "%s: num_rqs cannot be greater than 
DRM_SCHED_PRIORITY_COUNT\n",
-                       __func__);
-               return -EINVAL;
-       } else if (sched->sched_rq) {
+       if (sched->rq) {
                /* Not an error, but warn anyway so drivers can
                 * fine-tune their DRM calling order, and return all
                 * is good.
@@ -1082,21 +1037,11 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, 
const struct drm_sched_init_
                sched->own_submit_wq = true;
        }
 
-       sched->num_user_rqs = args->num_rqs;
-       sched->num_rqs = drm_sched_policy != DRM_SCHED_POLICY_FAIR ?
-                        args->num_rqs : 1;
-       sched->sched_rq = kmalloc_array(sched->num_rqs,
-                                       sizeof(*sched->sched_rq),
-                                       GFP_KERNEL | __GFP_ZERO);
-       if (!sched->sched_rq)
+       sched->rq = kmalloc(sizeof(*sched->rq), GFP_KERNEL | __GFP_ZERO);
+       if (!sched->rq)
                goto Out_check_own;
 
-       for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
-               sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), 
GFP_KERNEL);
-               if (!sched->sched_rq[i])
-                       goto Out_unroll;
-               drm_sched_rq_init(sched, sched->sched_rq[i]);
-       }
+       drm_sched_rq_init(sched, sched->rq);
 
        init_waitqueue_head(&sched->job_scheduled);
        INIT_LIST_HEAD(&sched->pending_list);
@@ -1111,12 +1056,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, 
const struct drm_sched_init_
 
        sched->ready = true;
        return 0;
-Out_unroll:
-       for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
-               kfree(sched->sched_rq[i]);
 
-       kfree(sched->sched_rq);
-       sched->sched_rq = NULL;
 Out_check_own:
        if (sched->own_submit_wq)
                destroy_workqueue(sched->submit_wq);
@@ -1148,25 +1088,21 @@ EXPORT_SYMBOL(drm_sched_init);
  */
 void drm_sched_fini(struct drm_gpu_scheduler *sched)
 {
+
+       struct drm_sched_rq *rq = sched->rq;
        struct drm_sched_entity *s_entity;
-       int i;
 
        drm_sched_wqueue_stop(sched);
 
-       for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
-               struct drm_sched_rq *rq = sched->sched_rq[i];
-
-               spin_lock(&rq->lock);
-               list_for_each_entry(s_entity, &rq->entities, list)
-                       /*
-                        * Prevents reinsertion and marks job_queue as idle,
-                        * it will be removed from the rq in 
drm_sched_entity_fini()
-                        * eventually
-                        */
-                       s_entity->stopped = true;
-               spin_unlock(&rq->lock);
-               kfree(sched->sched_rq[i]);
-       }
+       spin_lock(&rq->lock);
+       list_for_each_entry(s_entity, &rq->entities, list)
+               /*
+                * Prevents reinsertion and marks job_queue as idle,
+                * it will be removed from the rq in drm_sched_entity_fini()
+                * eventually
+                */
+               s_entity->stopped = true;
+       spin_unlock(&rq->lock);
 
        /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
        wake_up_all(&sched->job_scheduled);
@@ -1177,8 +1113,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
        if (sched->own_submit_wq)
                destroy_workqueue(sched->submit_wq);
        sched->ready = false;
-       kfree(sched->sched_rq);
-       sched->sched_rq = NULL;
+       kfree(sched->rq);
+       sched->rq = NULL;
 }
 EXPORT_SYMBOL(drm_sched_fini);
 
@@ -1193,35 +1129,28 @@ EXPORT_SYMBOL(drm_sched_fini);
  */
 void drm_sched_increase_karma(struct drm_sched_job *bad)
 {
-       int i;
-       struct drm_sched_entity *tmp;
-       struct drm_sched_entity *entity;
        struct drm_gpu_scheduler *sched = bad->sched;
+       struct drm_sched_entity *entity, *tmp;
+       struct drm_sched_rq *rq = sched->rq;
 
        /* don't change @bad's karma if it's from KERNEL RQ,
         * because sometimes GPU hang would cause kernel jobs (like VM updating 
jobs)
         * corrupt but keep in mind that kernel jobs always considered good.
         */
-       if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
-               atomic_inc(&bad->karma);
+       if (bad->s_priority == DRM_SCHED_PRIORITY_KERNEL)
+               return;
 
-               for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
-                       struct drm_sched_rq *rq = sched->sched_rq[i];
+       atomic_inc(&bad->karma);
 
-                       spin_lock(&rq->lock);
-                       list_for_each_entry_safe(entity, tmp, &rq->entities, 
list) {
-                               if (bad->s_fence->scheduled.context ==
-                                   entity->fence_context) {
-                                       if (entity->guilty)
-                                               atomic_set(entity->guilty, 1);
-                                       break;
-                               }
-                       }
-                       spin_unlock(&rq->lock);
-                       if (&entity->list != &rq->entities)
-                               break;
+       spin_lock(&rq->lock);
+       list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+               if (bad->s_fence->scheduled.context == entity->fence_context) {
+                       if (entity->guilty)
+                               atomic_set(entity->guilty, 1);
+                       break;
                }
        }
+       spin_unlock(&rq->lock);
 }
 EXPORT_SYMBOL(drm_sched_increase_karma);
 
diff --git a/drivers/gpu/drm/scheduler/sched_rq.c 
b/drivers/gpu/drm/scheduler/sched_rq.c
index fd1cf89911e6..69ccdbd5af6c 100644
--- a/drivers/gpu/drm/scheduler/sched_rq.c
+++ b/drivers/gpu/drm/scheduler/sched_rq.c
@@ -16,7 +16,7 @@ drm_sched_entity_compare_before(struct rb_node *a, const 
struct rb_node *b)
        return ktime_before(ea->oldest_job_waiting, eb->oldest_job_waiting);
 }
 
-static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
+static void drm_sched_rq_remove_tree_locked(struct drm_sched_entity *entity,
                                            struct drm_sched_rq *rq)
 {
        lockdep_assert_held(&entity->lock);
@@ -28,7 +28,7 @@ static void drm_sched_rq_remove_fifo_locked(struct 
drm_sched_entity *entity,
        }
 }
 
-static void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
+static void drm_sched_rq_update_tree_locked(struct drm_sched_entity *entity,
                                            struct drm_sched_rq *rq,
                                            ktime_t ts)
 {
@@ -40,7 +40,7 @@ static void drm_sched_rq_update_fifo_locked(struct 
drm_sched_entity *entity,
        lockdep_assert_held(&entity->lock);
        lockdep_assert_held(&rq->lock);
 
-       drm_sched_rq_remove_fifo_locked(entity, rq);
+       drm_sched_rq_remove_tree_locked(entity, rq);
 
        entity->oldest_job_waiting = ts;
 
@@ -65,16 +65,6 @@ void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
        rq->sched = sched;
 }
 
-static ktime_t
-drm_sched_rq_get_rr_deadline(struct drm_sched_rq *rq)
-{
-       lockdep_assert_held(&rq->lock);
-
-       rq->rr_deadline = ktime_add_ns(rq->rr_deadline, 1);
-
-       return rq->rr_deadline;
-}
-
 static ktime_t
 drm_sched_rq_get_min_vruntime(struct drm_sched_rq *rq)
 {
@@ -103,10 +93,11 @@ drm_sched_rq_get_min_vruntime(struct drm_sched_rq *rq)
  * Returns a DRM scheduler pre-selected to handle this entity.
  */
 struct drm_gpu_scheduler *
-drm_sched_rq_add_entity(struct drm_sched_entity *entity, ktime_t ts)
+drm_sched_rq_add_entity(struct drm_sched_entity *entity)
 {
        struct drm_gpu_scheduler *sched;
        struct drm_sched_rq *rq;
+       ktime_t ts;
 
        /* Add the entity to the run queue */
        spin_lock(&entity->lock);
@@ -126,14 +117,9 @@ drm_sched_rq_add_entity(struct drm_sched_entity *entity, 
ktime_t ts)
                list_add_tail(&entity->list, &rq->entities);
        }
 
-       if (drm_sched_policy == DRM_SCHED_POLICY_FAIR) {
-               ts = drm_sched_rq_get_min_vruntime(rq);
-               ts = drm_sched_entity_restore_vruntime(entity, ts);
-       } else if (drm_sched_policy == DRM_SCHED_POLICY_RR) {
-               ts = drm_sched_rq_get_rr_deadline(rq);
-       }
-
-       drm_sched_rq_update_fifo_locked(entity, rq, ts);
+       ts = drm_sched_rq_get_min_vruntime(rq);
+       ts = drm_sched_entity_restore_vruntime(entity, ts);
+       drm_sched_rq_update_tree_locked(entity, rq, ts);
 
        spin_unlock(&rq->lock);
        spin_unlock(&entity->lock);
@@ -162,7 +148,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
        atomic_dec(rq->sched->score);
        list_del_init(&entity->list);
 
-       drm_sched_rq_remove_fifo_locked(entity, rq);
+       drm_sched_rq_remove_tree_locked(entity, rq);
 
        spin_unlock(&rq->lock);
 }
@@ -183,23 +169,14 @@ void drm_sched_rq_pop_entity(struct drm_sched_entity 
*entity)
        if (next_job) {
                ktime_t ts;
 
-               if (drm_sched_policy == DRM_SCHED_POLICY_FAIR)
-                       ts = drm_sched_entity_get_job_ts(entity);
-               else if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
-                       ts = next_job->submit_ts;
-               else
-                       ts = drm_sched_rq_get_rr_deadline(rq);
-
-               drm_sched_rq_update_fifo_locked(entity, rq, ts);
+               ts = drm_sched_entity_get_job_ts(entity);
+               drm_sched_rq_update_tree_locked(entity, rq, ts);
        } else {
-               drm_sched_rq_remove_fifo_locked(entity, rq);
+               ktime_t min_vruntime;
 
-               if (drm_sched_policy == DRM_SCHED_POLICY_FAIR) {
-                       ktime_t min_vruntime;
-
-                       min_vruntime = drm_sched_rq_get_min_vruntime(rq);
-                       drm_sched_entity_save_vruntime(entity, min_vruntime);
-               }
+               drm_sched_rq_remove_tree_locked(entity, rq);
+               min_vruntime = drm_sched_rq_get_min_vruntime(rq);
+               drm_sched_entity_save_vruntime(entity, min_vruntime);
        }
        spin_unlock(&rq->lock);
        spin_unlock(&entity->lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index c6169cbf909b..e9ff24c076aa 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -151,8 +151,6 @@ struct drm_sched_entity {
         */
        struct spsc_queue               job_queue;
 
-       enum drm_sched_priority         rq_priority;
-
        /**
         * @fence_seq:
         *
@@ -339,13 +337,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct 
dma_fence *f);
 struct drm_sched_job {
        u64                             id;
 
-       /**
-        * @submit_ts:
-        *
-        * When the job was pushed into the entity queue.
-        */
-       ktime_t                         submit_ts;
-
        /**
         * @sched:
         *
@@ -557,9 +548,7 @@ struct drm_gpu_scheduler {
        atomic_t                        credit_count;
        long                            timeout;
        const char                      *name;
-       u32                             num_rqs;
-       u32                             num_user_rqs;
-       struct drm_sched_rq             **sched_rq;
+       struct drm_sched_rq             *rq;
        wait_queue_head_t               job_scheduled;
        atomic64_t                      job_id_count;
        struct workqueue_struct         *submit_wq;
-- 
2.48.0

Reply via email to