It's useful to track runtime PM refs that don't guarantee a device
power-on state to the rest of the driver. One such case is holding a
reference that will be put asynchronously, during which normal users
without their own reference shouldn't access the HW. A follow-up patch
will add support for disabling display power domains asynchronously
which needs this.

For this we can track all references with a separate wakeref_track_count
and references guaranteeing a power-on state with the current
wakeref_count.

Follow-up patches will make use of the API added here, so add a
__used__ attribute quirk to keep git bisect working.

No functional changes.

Cc: Chris Wilson <ch...@chris-wilson.co.uk>
Signed-off-by: Imre Deak <imre.d...@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |   1 +
 drivers/gpu/drm/i915/intel_runtime_pm.c | 121 ++++++++++++++++++++----
 2 files changed, 102 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9a634ba57ff9..9fb26634a6be 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1177,6 +1177,7 @@ struct skl_wm_params {
  */
 struct i915_runtime_pm {
        atomic_t wakeref_count;
+       atomic_t wakeref_track_count;
        bool suspended;
        bool irqs_enabled;
 
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c 
b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 30e7cb9d5801..4a7bfc945322 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -59,6 +59,12 @@
  * present for a given platform.
  */
 
+static void
+assert_raw_rpm_wakelock_held(struct drm_i915_private *i915)
+{
+       WARN_ON(!atomic_read(&i915->runtime_pm.wakeref_track_count));
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 
 #include <linux/sort.h>
@@ -100,17 +106,18 @@ static void init_intel_runtime_pm_wakeref(struct 
drm_i915_private *i915)
        struct i915_runtime_pm *rpm = &i915->runtime_pm;
 
        spin_lock_init(&rpm->debug.lock);
+       atomic_set(&rpm->wakeref_track_count, 0);
 }
 
 static noinline depot_stack_handle_t
-track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+track_intel_runtime_pm_wakeref_raw(struct drm_i915_private *i915)
 {
        struct i915_runtime_pm *rpm = &i915->runtime_pm;
        depot_stack_handle_t stack, *stacks;
        unsigned long flags;
 
-       atomic_inc(&rpm->wakeref_count);
-       assert_rpm_wakelock_held(i915);
+       atomic_inc(&rpm->wakeref_track_count);
+       assert_raw_rpm_wakelock_held(i915);
 
        if (!HAS_RUNTIME_PM(i915))
                return -1;
@@ -139,6 +146,15 @@ track_intel_runtime_pm_wakeref(struct drm_i915_private 
*i915)
        return stack;
 }
 
+static noinline depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       atomic_inc(&i915->runtime_pm.wakeref_count);
+       assert_rpm_wakelock_held(i915);
+
+       return track_intel_runtime_pm_wakeref_raw(i915);
+}
+
 static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
                                            depot_stack_handle_t stack)
 {
@@ -163,7 +179,7 @@ static void cancel_intel_runtime_pm_wakeref(struct 
drm_i915_private *i915,
 
        if (WARN(!found,
                 "Unmatched wakeref (tracking %lu), count %u\n",
-                rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+                rpm->debug.count, atomic_read(&rpm->wakeref_track_count))) {
                char *buf;
 
                buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
@@ -235,15 +251,15 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
 }
 
 static noinline void
-untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+untrack_intel_runtime_pm_wakeref_raw(struct drm_i915_private *i915)
 {
        struct i915_runtime_pm *rpm = &i915->runtime_pm;
        struct intel_runtime_pm_debug dbg = {};
        struct drm_printer p;
        unsigned long flags;
 
-       assert_rpm_wakelock_held(i915);
-       if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
+       assert_raw_rpm_wakelock_held(i915);
+       if (atomic_dec_and_lock_irqsave(&rpm->wakeref_track_count,
                                        &rpm->debug.lock,
                                        flags)) {
                dbg = rpm->debug;
@@ -263,6 +279,15 @@ untrack_intel_runtime_pm_wakeref(struct drm_i915_private 
*i915)
        kfree(dbg.owners);
 }
 
+static noinline void
+untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       untrack_intel_runtime_pm_wakeref_raw(i915);
+
+       assert_rpm_wakelock_held(i915);
+       atomic_dec(&i915->runtime_pm.wakeref_count);
+}
+
 void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
                                    struct drm_printer *p)
 {
@@ -308,15 +333,33 @@ static void init_intel_runtime_pm_wakeref(struct 
drm_i915_private *i915)
 }
 
 static depot_stack_handle_t
-track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+track_intel_runtime_pm_wakeref_raw(struct drm_i915_private *i915)
 {
-       atomic_inc(&i915->runtime_pm.wakeref_count);
-       assert_rpm_wakelock_held(i915);
+       atomic_inc(&i915->runtime_pm.wakeref_track_count);
+       assert_raw_rpm_wakelock_held(i915);
+
        return -1;
 }
 
+static depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+       atomic_inc(&i915->runtime_pm.wakeref_count);
+       assert_rpm_wakelock_held(i915);
+
+       return track_intel_runtime_pm_wakeref_raw(i915);
+}
+
+static void untrack_intel_runtime_pm_wakeref_raw(struct drm_i915_private *i915)
+{
+       assert_raw_rpm_wakelock_held(i915);
+       atomic_dec(&i915->runtime_pm.wakeref_track_count);
+}
+
 static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
 {
+       untrack_intel_runtime_pm_wakeref_raw(i915);
+
        assert_rpm_wakelock_held(i915);
        atomic_dec(&i915->runtime_pm.wakeref_count);
 }
@@ -4347,7 +4390,7 @@ static void intel_power_domains_verify_state(struct 
drm_i915_private *i915)
  *
  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
  */
-intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
+static void __intel_runtime_pm_get(struct drm_i915_private *i915)
 {
        struct pci_dev *pdev = i915->drm.pdev;
        struct device *kdev = &pdev->dev;
@@ -4355,6 +4398,19 @@ intel_wakeref_t intel_runtime_pm_get(struct 
drm_i915_private *i915)
 
        ret = pm_runtime_get_sync(kdev);
        WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
+}
+
+__attribute__((__used__))
+static intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915)
+{
+       __intel_runtime_pm_get(i915);
+
+       return track_intel_runtime_pm_wakeref_raw(i915);
+}
+
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
+{
+       __intel_runtime_pm_get(i915);
 
        return track_intel_runtime_pm_wakeref(i915);
 }
@@ -4430,23 +4486,48 @@ intel_wakeref_t intel_runtime_pm_get_noresume(struct 
drm_i915_private *i915)
  * intel_runtime_pm_get() and might power down the corresponding
  * hardware block right away if this is the last reference.
  */
+static void __intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
+{
+       struct pci_dev *pdev = i915->drm.pdev;
+       struct device *kdev = &pdev->dev;
+
+       pm_runtime_mark_last_busy(kdev);
+       pm_runtime_put_autosuspend(kdev);
+}
+
+static void intel_runtime_pm_put_unchecked_raw(struct drm_i915_private *i915)
+{
+       untrack_intel_runtime_pm_wakeref_raw(i915);
+       __intel_runtime_pm_put_unchecked(i915);
+}
+
 void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
 {
-       struct pci_dev *pdev = i915->drm.pdev;
-       struct device *kdev = &pdev->dev;
-
        untrack_intel_runtime_pm_wakeref(i915);
-
-       pm_runtime_mark_last_busy(kdev);
-       pm_runtime_put_autosuspend(kdev);
+       __intel_runtime_pm_put_unchecked(i915);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+__attribute__((__used__))
+static void intel_runtime_pm_put_raw(struct drm_i915_private *i915,
+                                    intel_wakeref_t wref)
+{
+       cancel_intel_runtime_pm_wakeref(i915, wref);
+       intel_runtime_pm_put_unchecked_raw(i915);
+}
+
 void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
 {
        cancel_intel_runtime_pm_wakeref(i915, wref);
        intel_runtime_pm_put_unchecked(i915);
 }
+#else
+__attribute__((__used__))
+static void intel_runtime_pm_put_raw(struct drm_i915_private *i915,
+                                    intel_wakeref_t wref)
+{
+       intel_runtime_pm_put_unchecked_raw(i915);
+}
 #endif
 
 /**
@@ -4521,12 +4602,12 @@ void intel_runtime_pm_cleanup(struct drm_i915_private 
*i915)
        struct i915_runtime_pm *rpm = &i915->runtime_pm;
        int count;
 
-       count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
+       count = atomic_fetch_inc(&rpm->wakeref_track_count); /* balance untrack 
*/
        WARN(count,
-            "i915->runtime_pm.wakeref_count=%d on cleanup\n",
+            "i915->runtime_pm.wakeref_track_count=%d on cleanup\n",
             count);
 
-       untrack_intel_runtime_pm_wakeref(i915);
+       untrack_intel_runtime_pm_wakeref_raw(i915);
 }
 
 void intel_runtime_pm_init_early(struct drm_i915_private *i915)
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to