No longer take a runtime_pm reference for each execlist request.  Only
take a single reference when the execlist queue becomes nonempty and
release it when it becomes empty.

Signed-off-by: Thomas Daniel <thomas.dan...@intel.com>
Signed-off-by: Nick Hoath <nicholas.ho...@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c  | 10 +++++++---
 drivers/gpu/drm/i915/intel_lrc.c | 15 +++++++++++++--
 2 files changed, 20 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3681a33..d9f5e4d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2758,6 +2758,13 @@ static void i915_gem_reset_ring_cleanup(struct 
drm_i915_private *dev_priv,
        }
 
        /*
+        * If we come in here, and the list wasn't empty, then there was
+        * a pm taken, so free it up now
+        */
+       if (!list_empty(&ring->execlist_queue))
+               intel_runtime_pm_put(dev_priv);
+
+       /*
         * Clear the execlists queue up before freeing the requests, as those
         * are the ones that keep the context and ringbuffer backing objects
         * pinned in place.
@@ -2866,8 +2873,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs 
*ring)
 
                if (i915.enable_execlists) {
                        struct intel_context *ctx = request->ctx;
-                       struct drm_i915_private *dev_priv =
-                               ring->dev->dev_private;
                        unsigned long flags;
                        struct drm_i915_gem_object *ctx_obj =
                                ctx->engine[ring->id].state;
@@ -2877,7 +2882,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs 
*ring)
                        if (ctx_obj && (ctx != ring->default_context))
                                intel_lr_context_unpin(ring, ctx);
 
-                       intel_runtime_pm_put(dev_priv);
                        spin_unlock_irqrestore(&ring->execlist_lock, flags);
                }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8373900..adc4942 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -399,8 +399,16 @@ static void execlists_context_unqueue(struct 
intel_engine_cs *ring)
         */
        WARN_ON(!intel_irqs_enabled(ring->dev->dev_private));
 
-       if (list_empty(&ring->execlist_queue))
+       if (list_empty(&ring->execlist_queue)) {
+               /*
+                * We can only come in to this function if at some
+                * point there was a request queued. So if there
+                * are no longer any requests queued, it's time to
+                * put the pm
+                */
+               intel_runtime_pm_put(ring->dev->dev_private);
                return;
+       }
 
        /* Try to read in pairs */
        list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue,
@@ -546,6 +554,7 @@ bool intel_lrc_irq_handler(struct intel_engine_cs *ring)
 static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *ring = request->ring;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
@@ -577,8 +586,10 @@ static int execlists_context_queue(struct 
drm_i915_gem_request *request)
        }
 
        list_add_tail(&request->execlist_link, &ring->execlist_queue);
-       if (num_elements == 0)
+       if (num_elements == 0) {
+               intel_runtime_pm_get(dev_priv);
                execlists_context_unqueue(ring);
+       }
 
        spin_unlock_irq(&ring->execlist_lock);
 
-- 
2.1.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to