Change the preemption timeout to the smallest possible value (1 us) when
disabling scheduling to cancel a request and restore it after
cancellation. This not only cancels the request as fast as possible, it
fixes a bug where the preemption timeout is 0 which results in the
schedule disable hanging forever.

Reported-by: Jani Saarinen <jani.saari...@intel.com>
Fixes: 62eaf0ae217d4 ("drm/i915/guc: Support request cancellation")
Link: https://gitlab.freedesktop.org/drm/intel/-/issues/4960
Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 +++++++++++--------
 2 files changed, 31 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h 
b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 30cd81ad8911a..730998823dbea 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -198,6 +198,11 @@ struct intel_context {
                 * each priority bucket
                 */
                u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
+               /**
+                * @preemption_timeout: preemption timeout of the context, used
+                * to restore this value after request cancellation
+                */
+               u32 preemption_timeout;
        } guc_state;
 
        struct {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c 
b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3918f1be114fa..966947c450253 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2147,7 +2147,8 @@ static inline u32 get_children_join_value(struct 
intel_context *ce,
        return __get_parent_scratch(ce)->join[child_index].semaphore;
 }
 
-static void guc_context_policy_init(struct intel_engine_cs *engine,
+static void guc_context_policy_init(struct intel_context *ce,
+                                   struct intel_engine_cs *engine,
                                    struct guc_lrc_desc *desc)
 {
        desc->policy_flags = 0;
@@ -2157,7 +2158,8 @@ static void guc_context_policy_init(struct 
intel_engine_cs *engine,
 
        /* NB: For both of these, zero means disabled. */
        desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
-       desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+       ce->guc_state.preemption_timeout = engine->props.preempt_timeout_ms * 
1000;
+       desc->preemption_timeout = ce->guc_state.preemption_timeout;
 }
 
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
@@ -2193,7 +2195,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
        desc->hw_context_desc = ce->lrc.lrca;
        desc->priority = ce->guc_state.prio;
        desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
-       guc_context_policy_init(engine, desc);
+       guc_context_policy_init(ce, engine, desc);
 
        /*
         * If context is a parent, we need to register a process descriptor
@@ -2226,7 +2228,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, 
bool loop)
                        desc->hw_context_desc = child->lrc.lrca;
                        desc->priority = ce->guc_state.prio;
                        desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
-                       guc_context_policy_init(engine, desc);
+                       guc_context_policy_init(child, engine, desc);
                }
 
                clear_children_join_go_memory(ce);
@@ -2409,6 +2411,19 @@ static u16 prep_context_pending_disable(struct 
intel_context *ce)
        return ce->guc_id.id;
 }
 
+static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
+                                                u16 guc_id,
+                                                u32 preemption_timeout)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
+               guc_id,
+               preemption_timeout
+       };
+
+       intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+}
+
 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
 {
        struct intel_guc *guc = ce_to_guc(ce);
@@ -2442,8 +2457,10 @@ static struct i915_sw_fence *guc_context_block(struct 
intel_context *ce)
 
        spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-       with_intel_runtime_pm(runtime_pm, wakeref)
+       with_intel_runtime_pm(runtime_pm, wakeref) {
+               __guc_context_set_preemption_timeout(guc, guc_id, 1);
                __guc_context_sched_disable(guc, ce, guc_id);
+       }
 
        return &ce->guc_state.blocked;
 }
@@ -2492,8 +2509,10 @@ static void guc_context_unblock(struct intel_context *ce)
 
        spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-       if (enable) {
-               with_intel_runtime_pm(runtime_pm, wakeref)
+       with_intel_runtime_pm(runtime_pm, wakeref) {
+               __guc_context_set_preemption_timeout(guc, ce->guc_id.id,
+                                                    
ce->guc_state.preemption_timeout);
+               if (enable)
                        __guc_context_sched_enable(guc, ce);
        }
 }
@@ -2521,19 +2540,6 @@ static void guc_context_cancel_request(struct 
intel_context *ce,
        }
 }
 
-static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
-                                                u16 guc_id,
-                                                u32 preemption_timeout)
-{
-       u32 action[] = {
-               INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
-               guc_id,
-               preemption_timeout
-       };
-
-       intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
-}
-
 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
 {
        struct intel_guc *guc = ce_to_guc(ce);
-- 
2.34.1

Reply via email to