On 10/6/2022 15:51, Alan Previn wrote:
With the introduction of the delayed disable-sched behavior,
we use the GuC's xarray of valid guc-id's as a way to
identify if new requests had been added to a context
when the said context is being checked for closure.

Additionally that prior change also closes the race for when
a new incoming request fails to cancel the pending
delayed disable-sched worker.

With these two complementary checks, we see no more
use for intel_context:guc_state:number_committed_requests.

Signed-off-by: Alan Previn <alan.previn.teres.ale...@intel.com>
Reviewed-by: John Harrison <john.c.harri...@intel.com>

---
  drivers/gpu/drm/i915/gt/intel_context_types.h |  2 --
  .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 23 -------------------
  2 files changed, 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 6a49fa7e119f..e36670f2e626 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -199,8 +199,6 @@ struct intel_context {
                 * context's submissions is complete.
                 */
                struct i915_sw_fence blocked;
-               /** @number_committed_requests: number of committed requests */
-               int number_committed_requests;
                /** @requests: list of active requests on this context */
                struct list_head requests;
                /** @prio: the context's current guc priority */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3cfdb0a5e5bb..b91c3085501d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -370,25 +370,6 @@ static inline void decr_context_blocked(struct 
intel_context *ce)
        ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
  }
-static inline bool context_has_committed_requests(struct intel_context *ce)
-{
-       return !!ce->guc_state.number_committed_requests;
-}
-
-static inline void incr_context_committed_requests(struct intel_context *ce)
-{
-       lockdep_assert_held(&ce->guc_state.lock);
-       ++ce->guc_state.number_committed_requests;
-       GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
-}
-
-static inline void decr_context_committed_requests(struct intel_context *ce)
-{
-       lockdep_assert_held(&ce->guc_state.lock);
-       --ce->guc_state.number_committed_requests;
-       GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
-}
-
  static struct intel_context *
  request_to_scheduling_context(struct i915_request *rq)
  {
@@ -3172,7 +3153,6 @@ static void __guc_context_destroy(struct intel_context 
*ce)
                   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
                   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
                   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
-       GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
        intel_context_fini(ce);
@@ -3441,8 +3421,6 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce); - decr_context_committed_requests(ce);
-
        spin_unlock_irq(&ce->guc_state.lock);
atomic_dec(&ce->guc_id.ref);
@@ -3651,7 +3629,6 @@ static int guc_request_alloc(struct i915_request *rq)
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
        }
-       incr_context_committed_requests(ce);
        spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;

Reply via email to