Drop pin count check trick between a sched_disable and re-pin, now rely
on the lock and counter of the number of committed requests to determine
if scheduling should be disabled on the context.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  2 +
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 49 ++++++++++++-------
 2 files changed, 34 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index d5d643b04d54..524a35a78bf4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -169,6 +169,8 @@ struct intel_context {
                struct list_head fences;
                /* GuC context blocked fence */
                struct i915_sw_fence blocked_fence;
+               /* GuC committed requests */
+               int number_committed_requests;
        } guc_state;
 
        struct {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 5f77f25322ca..3e90985b0c1b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -248,6 +248,25 @@ static inline void decr_context_blocked(struct 
intel_context *ce)
        ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
 }
 
+static inline bool context_has_committed_requests(struct intel_context *ce)
+{
+       return !!ce->guc_state.number_committed_requests;
+}
+
+static inline void incr_context_committed_requests(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+       ++ce->guc_state.number_committed_requests;
+       GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static inline void decr_context_committed_requests(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+       --ce->guc_state.number_committed_requests;
+       GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
 static inline bool context_guc_id_invalid(struct intel_context *ce)
 {
        return ce->guc_id == GUC_INVALID_LRC_ID;
@@ -1751,14 +1770,11 @@ static void guc_context_sched_disable(struct 
intel_context *ce)
        spin_lock_irqsave(&ce->guc_state.lock, flags);
 
        /*
-        * We have to check if the context has been disabled by another thread.
-        * We also have to check if the context has been pinned again as another
-        * pin operation is allowed to pass this function. Checking the pin
-        * count, within ce->guc_state.lock, synchronizes this function with
-        * guc_request_alloc ensuring a request doesn't slip through the
-        * 'context_pending_disable' fence. Checking within the spin lock (can't
-        * sleep) ensures another process doesn't pin this context and generate
-        * a request before we set the 'context_pending_disable' flag here.
+        * We have to check if the context has been disabled by another thread,
+        * check if submssion has been disabled to seal a race with reset and
+        * finally check if any more requests have been committed to the
+        * context ensursing that a request doesn't slip through the
+        * 'context_pending_disable' fence.
         */
        enabled = context_enabled(ce);
        if (unlikely(!enabled || submission_disabled(guc))) {
@@ -1767,7 +1783,8 @@ static void guc_context_sched_disable(struct 
intel_context *ce)
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
                goto unpin;
        }
-       if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
+       if (unlikely(context_has_committed_requests(ce))) {
+               intel_context_sched_disable_unpin(ce);
                spin_unlock_irqrestore(&ce->guc_state.lock, flags);
                return;
        }
@@ -1800,6 +1817,7 @@ static void __guc_context_destroy(struct intel_context 
*ce)
                   ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
                   ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
                   ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+       GEM_BUG_ON(ce->guc_state.number_committed_requests);
 
        lrc_fini(ce);
        intel_context_fini(ce);
@@ -2030,6 +2048,10 @@ static void remove_from_context(struct i915_request *rq)
 
        spin_unlock_irq(&ce->guc_active.lock);
 
+       spin_lock_irq(&ce->guc_state.lock);
+       decr_context_committed_requests(ce);
+       spin_unlock_irq(&ce->guc_state.lock);
+
        atomic_dec(&ce->guc_id_ref);
        i915_request_notify_execute_cb_imm(rq);
 }
@@ -2177,15 +2199,7 @@ static int guc_request_alloc(struct i915_request *rq)
         * schedule enable or context registration if either G2H is pending
         * respectfully. Once a G2H returns, the fence is released that is
         * blocking these requests (see guc_signal_context_fence).
-        *
-        * We can safely check the below fields outside of the lock as it isn't
-        * possible for these fields to transition from being clear to set but
-        * converse is possible, hence the need for the check within the lock.
         */
-       if (likely(!context_wait_for_deregister_to_register(ce) &&
-                  !context_pending_disable(ce)))
-               return 0;
-
        spin_lock_irqsave(&ce->guc_state.lock, flags);
        if (context_wait_for_deregister_to_register(ce) ||
            context_pending_disable(ce)) {
@@ -2194,6 +2208,7 @@ static int guc_request_alloc(struct i915_request *rq)
 
                list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
        }
+       incr_context_committed_requests(ce);
        spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
        return 0;
-- 
2.32.0

Reply via email to