From: Dave Gordon <david.s.gor...@intel.com>

This patch adds the scheduler logic for managing potentially preemptive
requests, including validating dependencies and working out when a
request can be downgraded to non-preemptive (e.g. when there's nothing
ahead for it to preempt).

Actually-preemptive requests are still disabled via a module parameter
at this early stage, as the rest of the logic to deal with the
consequences of preemption isn't in place yet.

For: VIZ-2021
Signed-off-by: Dave Gordon <david.s.gor...@intel.com>
---
 drivers/gpu/drm/i915/i915_scheduler.c | 76 ++++++++++++++++++++++++++++-------
 1 file changed, 61 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
b/drivers/gpu/drm/i915/i915_scheduler.c
index 50ff8b7..61eabc6 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -498,6 +498,9 @@ static inline bool i915_scheduler_is_dependency_valid(
        if (I915_SQS_IS_FLYING(dep)) {
                if (node->params.ring != dep->params.ring)
                        return true;
+
+               if (node->params.request->scheduler_flags & i915_req_sf_preempt)
+                       return true;
        }
 
        return false;
@@ -1317,10 +1320,11 @@ static int i915_scheduler_pop_from_queue_locked(struct 
intel_engine_cs *ring,
        struct i915_scheduler_queue_entry  *best_wait, *fence_wait = NULL;
        struct i915_scheduler_queue_entry  *best;
        struct i915_scheduler_queue_entry  *node;
+       struct drm_i915_gem_request        *req;
        int     ret;
        int     i;
        bool    signalled, any_queued;
-       bool    has_local, has_remote, only_remote;
+       bool    has_local, has_remote, only_remote, local_preempt_only;
 
        *pop_node = NULL;
        ret = -ENODATA;
@@ -1346,18 +1350,40 @@ static int i915_scheduler_pop_from_queue_locked(struct 
intel_engine_cs *ring,
                        scheduler->stats[node->params.ring->id].fence_ignore++;
                }
 
+               /* Attempt to re-enable pre-emption if a node wants to pre-empt
+                * but previously got downgraded. */
+               req = node->params.request;
+               if (req->scheduler_flags & i915_req_sf_was_preempt)
+                       req->scheduler_flags |= i915_req_sf_preempt;
+
                has_local  = false;
                has_remote = false;
+               local_preempt_only = (req->scheduler_flags & 
i915_req_sf_preempt) != 0;
                for (i = 0; i < node->num_deps; i++) {
                        if (!i915_scheduler_is_dependency_valid(node, i))
                                continue;
 
-                       if (node->dep_list[i]->params.ring == node->params.ring)
+                       if (node->dep_list[i]->params.ring == 
node->params.ring) {
                                has_local = true;
-                       else
+
+                               if (local_preempt_only) {
+                                       req->scheduler_flags &= 
~i915_req_sf_preempt;
+                                       if 
(i915_scheduler_is_dependency_valid(node, i))
+                                               local_preempt_only = false;
+                                       req->scheduler_flags |= 
i915_req_sf_preempt;
+                               }
+                       } else
                                has_remote = true;
                }
 
+               if (has_local && local_preempt_only) {
+                       /* If a preemptive node's local dependencies are all
+                        * flying, then they can be ignored by un-preempting
+                        * the node. */
+                       req->scheduler_flags &= ~i915_req_sf_preempt;
+                       has_local = false;
+               }
+
                if (has_remote && !has_local)
                        only_remote = true;
 
@@ -1447,6 +1473,7 @@ static int i915_scheduler_submit(struct intel_engine_cs 
*ring, bool was_locked)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_scheduler   *scheduler = dev_priv->scheduler;
        struct i915_scheduler_queue_entry  *node;
+       struct drm_i915_gem_request *req;
        unsigned long       flags;
        int                 ret = 0, count = 0;
 
@@ -1462,14 +1489,8 @@ static int i915_scheduler_submit(struct intel_engine_cs 
*ring, bool was_locked)
 
        /* First time around, complain if anything unexpected occurs: */
        ret = i915_scheduler_pop_from_queue_locked(ring, &node, &flags);
-       if (ret) {
-               spin_unlock_irqrestore(&scheduler->lock, flags);
-
-               if (!was_locked)
-                       mutex_unlock(&dev->struct_mutex);
-
-               return ret;
-       }
+       if (ret)
+               goto exit;
 
        do {
                BUG_ON(!node);
@@ -1477,6 +1498,30 @@ static int i915_scheduler_submit(struct intel_engine_cs 
*ring, bool was_locked)
                BUG_ON(node->status != i915_sqs_popped);
                count++;
 
+               req = node->params.request;
+               if (req->scheduler_flags & i915_req_sf_preempt) {
+                       struct i915_scheduler_queue_entry  *fly;
+                       bool    got_flying = false;
+
+                       list_for_each_entry(fly, 
&scheduler->node_queue[ring->id], link) {
+                               if (!I915_SQS_IS_FLYING(fly))
+                                       continue;
+
+                               got_flying = true;
+                               if (fly->priority >= node->priority) {
+                                       /* Already working on something at least
+                                        * as important, so don't interrupt it. 
*/
+                                       req->scheduler_flags &= 
~i915_req_sf_preempt;
+                                       break;
+                               }
+                       }
+
+                       if (!got_flying) {
+                               /* Nothing to preempt so don't bother. */
+                               req->scheduler_flags &= ~i915_req_sf_preempt;
+                       }
+               }
+
                /* The call to pop above will have removed the node from the
                 * list. So add it back in and mark it as in flight. */
                i915_scheduler_fly_node(node);
@@ -1545,6 +1590,10 @@ static int i915_scheduler_submit(struct intel_engine_cs 
*ring, bool was_locked)
                ret = i915_scheduler_pop_from_queue_locked(ring, &node, &flags);
        } while (ret == 0);
 
+       /* Don't complain about not being able to submit extra entries */
+       if (ret == -ENODATA)
+               ret = 0;
+
        /*
         * Bump the priority of everything that was not submitted to prevent
         * starvation of low priority tasks by a spamming high priority task.
@@ -1558,15 +1607,12 @@ static int i915_scheduler_submit(struct intel_engine_cs 
*ring, bool was_locked)
                                             scheduler->priority_level_bump);
        }
 
+exit:
        spin_unlock_irqrestore(&scheduler->lock, flags);
 
        if (!was_locked)
                mutex_unlock(&dev->struct_mutex);
 
-       /* Don't complain about not being able to submit extra entries */
-       if (ret == -ENODATA)
-               ret = 0;
-
        return (ret < 0) ? ret : count;
 }
 
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to