From: Tilak Tangudu <tilak.tang...@intel.com>

Added lmem deep suspend/resume, which covers lmem
eviction and added GT/GUC deep suspend/resume
using i915_gem_backup_suspend, i915_gem_suspend_late
and i915_gem_resume.

Signed-off-by: Tilak Tangudu <tilak.tang...@intel.com>
---
 drivers/gpu/drm/i915/i915_driver.c | 74 ++++++++++++++++++++++++------
 1 file changed, 61 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_driver.c 
b/drivers/gpu/drm/i915/i915_driver.c
index 3697ecb2c138..608287bb27ea 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -1630,6 +1630,7 @@ static int intel_runtime_idle(struct device *kdev)
 static int intel_runtime_suspend(struct device *kdev)
 {
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
        int ret;
 
@@ -1644,9 +1645,14 @@ static int intel_runtime_suspend(struct device *kdev)
         * We are safe here against re-faults, since the fault handler takes
         * an RPM reference.
         */
-       i915_gem_runtime_suspend(dev_priv);
-
-       intel_gt_runtime_suspend(to_gt(dev_priv));
+       if (rpm->d3_state == INTEL_D3COLD_OFF) {
+               i915_gem_backup_suspend(dev_priv);
+               i915_ggtt_suspend(to_gt(dev_priv)->ggtt);
+               i915_gem_suspend_late(dev_priv);
+       } else {
+               i915_gem_runtime_suspend(dev_priv);
+               intel_gt_runtime_suspend(to_gt(dev_priv));
+       }
 
        intel_runtime_pm_disable_interrupts(dev_priv);
 
@@ -1691,14 +1697,18 @@ static int intel_runtime_suspend(struct device *kdev)
                 */
                intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
        } else {
-               /*
-                * current versions of firmware which depend on this opregion
-                * notification have repurposed the D1 definition to mean
-                * "runtime suspended" vs. what you would normally expect (D3)
-                * to distinguish it from notifications that might be sent via
-                * the suspend path.
-                */
-               intel_opregion_notify_adapter(dev_priv, PCI_D1);
+               if (rpm->d3_state == INTEL_D3COLD_OFF) {
+                       intel_opregion_suspend(dev_priv, PCI_D3cold);
+               } else {
+                       /*
+                        * current versions of firmware which depend on this 
opregion
+                        * notification have repurposed the D1 definition to 
mean
+                        * "runtime suspended" vs. what you would normally 
expect (D3)
+                        * to distinguish it from notifications that might be 
sent via
+                        * the suspend path.
+                        */
+                       intel_opregion_notify_adapter(dev_priv, PCI_D1);
+               }
        }
 
        assert_forcewakes_inactive(&dev_priv->uncore);
@@ -1706,6 +1716,12 @@ static int intel_runtime_suspend(struct device *kdev)
        if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_poll_enable(dev_priv);
 
+       if (rpm->d3_state == INTEL_D3COLD_OFF) {
+               i915_save_pci_state(pdev);
+               pci_disable_device(pdev);
+               pci_set_power_state(pdev, PCI_D3cold);
+       }
+
        drm_dbg(&dev_priv->drm, "Device suspended\n");
        return 0;
 }
@@ -1713,6 +1729,7 @@ static int intel_runtime_suspend(struct device *kdev)
 static int intel_runtime_resume(struct device *kdev)
 {
        struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+       struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
        int ret;
 
@@ -1724,7 +1741,25 @@ static int intel_runtime_resume(struct device *kdev)
        drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
        disable_rpm_wakeref_asserts(rpm);
 
-       intel_opregion_notify_adapter(dev_priv, PCI_D0);
+       if (rpm->d3_state == INTEL_D3COLD_OFF) {
+               ret = pci_set_power_state(pdev, PCI_D0);
+               if (ret) {
+                       drm_err(&dev_priv->drm,
+                               "failed to set PCI D0 power state (%d)\n", ret);
+                       goto out;
+               }
+
+               i915_load_pci_state(pdev);
+
+               ret = pci_enable_device(pdev);
+               if (ret)
+                       goto out;
+               pci_set_master(pdev);
+               intel_opregion_resume(dev_priv);
+       } else {
+               intel_opregion_notify_adapter(dev_priv, PCI_D0);
+       }
+
        rpm->suspended = false;
        if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
                drm_dbg(&dev_priv->drm,
@@ -1742,8 +1777,20 @@ static int intel_runtime_resume(struct device *kdev)
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
         */
-       intel_gt_runtime_resume(to_gt(dev_priv));
+       if (rpm->d3_state == INTEL_D3COLD_OFF) {
+               ret = i915_pcode_init(dev_priv);
+               if (ret)
+                       goto out;
 
+               sanitize_gpu(dev_priv);
+               ret = i915_ggtt_enable_hw(dev_priv);
+               if (ret)
+                       drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
+               i915_ggtt_resume(to_gt(dev_priv)->ggtt);
+               i915_gem_resume(dev_priv);
+       } else {
+               intel_gt_runtime_resume(to_gt(dev_priv));
+       }
        /*
         * On VLV/CHV display interrupts are part of the display
         * power well, so hpd is reinitialized from there. For
@@ -1756,6 +1803,7 @@ static int intel_runtime_resume(struct device *kdev)
 
        intel_enable_ipc(dev_priv);
 
+out:
        enable_rpm_wakeref_asserts(rpm);
 
        if (ret)
-- 
2.25.1

Reply via email to