In GuC parent-child contexts the parent context controls the scheduling,
ensure only the parent does the scheduling operations.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 48 +++++++++++++++----
 1 file changed, 38 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0045895e0fa0..f60a46704ac5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -402,6 +402,18 @@ static inline void clr_context_banned(struct intel_context 
*ce)
        ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
 }
 
+static inline struct intel_context *
+to_parent(struct intel_context *ce)
+{
+       return intel_context_is_child(ce) ? ce->parent : ce;
+}
+
+static inline struct intel_context *
+request_to_scheduling_context(struct i915_request *rq)
+{
+       return to_parent(rq->context);
+}
+
 static inline bool context_guc_id_invalid(struct intel_context *ce)
 {
        return (ce->guc_id == GUC_INVALID_LRC_ID);
@@ -2341,6 +2353,7 @@ static void __guc_context_sched_disable(struct intel_guc 
*guc,
        GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
 #endif
 
+       GEM_BUG_ON(intel_context_is_child(ce));
        trace_intel_context_sched_disable(ce);
 
        guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
@@ -2546,6 +2559,8 @@ static void guc_context_sched_disable(struct 
intel_context *ce)
        u16 guc_id;
        bool enabled;
 
+       GEM_BUG_ON(intel_context_is_child(ce));
+
        if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
            !lrc_desc_registered(guc, ce->guc_id)) {
                clr_context_enabled(ce);
@@ -2941,6 +2956,8 @@ static void guc_signal_context_fence(struct intel_context 
*ce)
 {
        unsigned long flags;
 
+       GEM_BUG_ON(intel_context_is_child(ce));
+
        spin_lock_irqsave(&ce->guc_state.lock, flags);
        clr_context_wait_for_deregister_to_register(ce);
        __guc_signal_context_fence(ce);
@@ -3026,10 +3043,21 @@ static bool context_needs_lrc_desc_pin(struct 
intel_context *ce, bool new_guc_id
                !submission_disabled(ce_to_guc(ce));
 }
 
+static void clear_lrca_dirty(struct intel_context *ce)
+{
+       struct intel_context *child;
+
+       GEM_BUG_ON(intel_context_is_child(ce));
+
+       clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
+       for_each_child(ce, child)
+               clear_bit(CONTEXT_LRCA_DIRTY, &child->flags);
+}
+
 static int tasklet_pin_guc_id(struct guc_submit_engine *gse,
                              struct i915_request *rq)
 {
-       struct intel_context *ce = rq->context;
+       struct intel_context *ce = request_to_scheduling_context(rq);
        int ret = 0;
 
        lockdep_assert_held(&gse->sched_engine.lock);
@@ -3061,7 +3089,7 @@ static int tasklet_pin_guc_id(struct guc_submit_engine 
*gse,
                gse->submission_stall_reason = STALL_SCHED_DISABLE;
        }
 
-       clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
+       clear_lrca_dirty(ce);
 out:
        gse->total_num_rq_with_no_guc_id -= ce->guc_num_rq_submit_no_id;
        GEM_BUG_ON(gse->total_num_rq_with_no_guc_id < 0);
@@ -3092,7 +3120,7 @@ static int tasklet_pin_guc_id(struct guc_submit_engine 
*gse,
 
 static int guc_request_alloc(struct i915_request *rq)
 {
-       struct intel_context *ce = rq->context;
+       struct intel_context *ce = request_to_scheduling_context(rq);
        struct intel_guc *guc = ce_to_guc(ce);
        struct guc_submit_engine *gse = ce_to_gse(ce);
        unsigned long flags;
@@ -3143,11 +3171,12 @@ static int guc_request_alloc(struct i915_request *rq)
         * persistent until the generated request is retired. Thus, sealing 
these
         * race conditions.
         *
-        * There is no need for a lock here as the timeline mutex ensures at
-        * most one context can be executing this code path at once. The
-        * guc_id_ref is incremented once for every request in flight and
-        * decremented on each retire. When it is zero, a lock around the
-        * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
+        * There is no need for a lock here as the timeline mutex (or
+        * parallel_submit mutex in the case of multi-lrc) ensures at most one
+        * context can be executing this code path at once. The guc_id_ref is
+        * incremented once for every request in flight and decremented on each
+        * retire. When it is zero, a lock around the increment (in pin_guc_id)
+        * is needed to seal a race with unpin_guc_id.
         */
        if (atomic_add_unless(&ce->guc_id_ref, 1, 0))
                goto out;
@@ -3185,8 +3214,7 @@ static int guc_request_alloc(struct i915_request *rq)
                }
        }
 
-       clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
-
+       clear_lrca_dirty(ce);
 out:
        incr_num_rq_not_ready(ce);
 
-- 
2.28.0

Reply via email to