Chris Wilson <ch...@chris-wilson.co.uk> writes:

> Some hardware requires a valid render context before it can initiate
> rc6 power gating of the GPU; the default state of the GPU is not
> sufficient and may lead to undefined behaviour. The first execution of
> any batch will load the "golden render state", at which point it is safe
> to enable rc6. As we do not forcibly load the kernel context at resume,
> we have to hook into the batch submission to be sure that the render
> state is setup before enabling rc6.
>
> However, since we don't enable powersaving until that first batch, we
> queued a delayed task in order to guarantee that the batch is indeed
> submitted.
>
> v2: Rearrange intel_disable_gt_powersave() to match.
> v3: Apply user specified cur_freq (or idle_freq if not set).
> v4: Give in, and supply a delayed work to autoenable rc6
> v5: Mika suggested a couple of better names for delayed_resume_work
>
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuopp...@intel.com>
> Cc: Ville Syrjälä <ville.syrj...@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_drv.c      |   7 +-
>  drivers/gpu/drm/i915/i915_drv.h      |   2 +-
>  drivers/gpu/drm/i915/i915_gem.c      |   1 +
>  drivers/gpu/drm/i915/intel_display.c |   2 +-
>  drivers/gpu/drm/i915/intel_drv.h     |   2 +
>  drivers/gpu/drm/i915/intel_pm.c      | 122 
> +++++++++++++++++++++++------------
>  drivers/gpu/drm/i915/intel_uncore.c  |   2 +-
>  7 files changed, 89 insertions(+), 49 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index b9a811750ca8..ff3342b78a74 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -1343,7 +1343,7 @@ void i915_driver_unload(struct drm_device *dev)
>       i915_destroy_error_state(dev);
>  
>       /* Flush any outstanding unpin_work. */
> -     flush_workqueue(dev_priv->wq);
> +     drain_workqueue(dev_priv->wq);
>

For this to make sense, should we also use the dev priv workqueue
for the gt autoenabling?

-Mika


>       intel_guc_fini(dev);
>       i915_gem_fini(dev);
> @@ -1652,6 +1652,7 @@ static int i915_drm_resume(struct drm_device *dev)
>  
>       intel_opregion_notify_adapter(dev_priv, PCI_D0);
>  
> +     intel_autoenable_gt_powersave(dev_priv);
>       drm_kms_helper_poll_enable(dev);
>  
>       enable_rpm_wakeref_asserts(dev_priv);
> @@ -1835,8 +1836,7 @@ int i915_reset(struct drm_i915_private *dev_priv)
>        * previous concerns that it doesn't respond well to some forms
>        * of re-init after reset.
>        */
> -     if (INTEL_INFO(dev)->gen > 5)
> -             intel_enable_gt_powersave(dev_priv);
> +     intel_autoenable_gt_powersave(dev_priv);
>  
>       return 0;
>  
> @@ -2459,7 +2459,6 @@ static int intel_runtime_resume(struct device *device)
>        * we can do is to hope that things will still work (and disable RPM).
>        */
>       i915_gem_init_swizzling(dev);
> -     gen6_update_ring_freq(dev_priv);
>  
>       intel_runtime_pm_enable_interrupts(dev_priv);
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index bb59bc637f7b..8d74db096189 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1188,7 +1188,7 @@ struct intel_gen6_power_mgmt {
>       bool client_boost;
>  
>       bool enabled;
> -     struct delayed_work delayed_resume_work;
> +     struct delayed_work autoenable_work;
>       unsigned boosts;
>  
>       struct intel_rps_client semaphores, mmioflips;
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index adeca0ec4cfb..6f2227b24711 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2842,6 +2842,7 @@ static void i915_gem_mark_busy(const struct 
> intel_engine_cs *engine)
>       intel_runtime_pm_get_noresume(dev_priv);
>       dev_priv->gt.awake = true;
>  
> +     intel_enable_gt_powersave(dev_priv);
>       i915_update_gfx_val(dev_priv);
>       if (INTEL_GEN(dev_priv) >= 6)
>               gen6_rps_busy(dev_priv);
> diff --git a/drivers/gpu/drm/i915/intel_display.c 
> b/drivers/gpu/drm/i915/intel_display.c
> index be3b2cab2640..92cd0a70f8c8 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -15456,7 +15456,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
>       dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
>  
>       intel_init_clock_gating(dev);
> -     intel_enable_gt_powersave(dev_priv);
>  }
>  
>  /*
> @@ -16252,6 +16251,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
>  {
>       struct drm_i915_private *dev_priv = to_i915(dev);
>  
> +     intel_suspend_gt_powersave(dev_priv);
>       intel_disable_gt_powersave(dev_priv);
>  
>       /*
> diff --git a/drivers/gpu/drm/i915/intel_drv.h 
> b/drivers/gpu/drm/i915/intel_drv.h
> index 55aeaf041749..c68dd6166c52 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -1697,7 +1697,9 @@ void intel_gpu_ips_init(struct drm_i915_private 
> *dev_priv);
>  void intel_gpu_ips_teardown(void);
>  void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
>  void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
>  void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
> +void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
>  void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
>  void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
>  void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index aab1e0b5d2eb..c6ba923425ce 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -6549,13 +6549,6 @@ void intel_cleanup_gt_powersave(struct 
> drm_i915_private *dev_priv)
>               intel_runtime_pm_put(dev_priv);
>  }
>  
> -static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
> -{
> -     flush_delayed_work(&dev_priv->rps.delayed_resume_work);
> -
> -     gen6_disable_rps_interrupts(dev_priv);
> -}
> -
>  /**
>   * intel_suspend_gt_powersave - suspend PM work and helper threads
>   * @dev_priv: i915 device
> @@ -6569,50 +6562,65 @@ void intel_suspend_gt_powersave(struct 
> drm_i915_private *dev_priv)
>       if (INTEL_GEN(dev_priv) < 6)
>               return;
>  
> -     gen6_suspend_rps(dev_priv);
> +     cancel_delayed_work_sync(&dev_priv->rps.autoenable_work);
> +
> +     gen6_disable_rps_interrupts(dev_priv);
>  
>       /* Force GPU to min freq during suspend */
>       gen6_rps_idle(dev_priv);
>  }
>  
> +void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
> +{
> +     dev_priv->rps.enabled = true; /* force disabling */
> +     intel_disable_gt_powersave(dev_priv);
> +
> +     gen6_reset_rps_interrupts(dev_priv);
> +}
> +
>  void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
>  {
> -     if (IS_IRONLAKE_M(dev_priv)) {
> -             ironlake_disable_drps(dev_priv);
> -     } else if (INTEL_INFO(dev_priv)->gen >= 6) {
> -             intel_suspend_gt_powersave(dev_priv);
> +     if (!READ_ONCE(dev_priv->rps.enabled))
> +             return;
>  
> -             mutex_lock(&dev_priv->rps.hw_lock);
> -             if (INTEL_INFO(dev_priv)->gen >= 9) {
> -                     gen9_disable_rc6(dev_priv);
> -                     gen9_disable_rps(dev_priv);
> -             } else if (IS_CHERRYVIEW(dev_priv))
> -                     cherryview_disable_rps(dev_priv);
> -             else if (IS_VALLEYVIEW(dev_priv))
> -                     valleyview_disable_rps(dev_priv);
> -             else
> -                     gen6_disable_rps(dev_priv);
> +     mutex_lock(&dev_priv->rps.hw_lock);
>  
> -             dev_priv->rps.enabled = false;
> -             mutex_unlock(&dev_priv->rps.hw_lock);
> +     if (INTEL_GEN(dev_priv) >= 9) {
> +             gen9_disable_rc6(dev_priv);
> +             gen9_disable_rps(dev_priv);
> +     } else if (IS_CHERRYVIEW(dev_priv)) {
> +             cherryview_disable_rps(dev_priv);
> +     } else if (IS_VALLEYVIEW(dev_priv)) {
> +             valleyview_disable_rps(dev_priv);
> +     } else if (INTEL_GEN(dev_priv) >= 6) {
> +             gen6_disable_rps(dev_priv);
> +     }  else if (IS_IRONLAKE_M(dev_priv)) {
> +             ironlake_disable_drps(dev_priv);
>       }
> +
> +     dev_priv->rps.enabled = false;
> +     mutex_unlock(&dev_priv->rps.hw_lock);
>  }
>  
> -static void intel_gen6_powersave_work(struct work_struct *work)
> +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
>  {
> -     struct drm_i915_private *dev_priv =
> -             container_of(work, struct drm_i915_private,
> -                          rps.delayed_resume_work.work);
> +     /* We shouldn't be disabling as we submit, so this should be less
> +      * racy than it appears!
> +      */
> +     if (READ_ONCE(dev_priv->rps.enabled))
> +             return;
>  
> -     mutex_lock(&dev_priv->rps.hw_lock);
> +     /* Powersaving is controlled by the host when inside a VM */
> +     if (intel_vgpu_active(dev_priv))
> +             return;
>  
> -     gen6_reset_rps_interrupts(dev_priv);
> +     mutex_lock(&dev_priv->rps.hw_lock);
>  
>       if (IS_CHERRYVIEW(dev_priv)) {
>               cherryview_enable_rps(dev_priv);
>       } else if (IS_VALLEYVIEW(dev_priv)) {
>               valleyview_enable_rps(dev_priv);
> -     } else if (INTEL_INFO(dev_priv)->gen >= 9) {
> +     } else if (INTEL_GEN(dev_priv) >= 9) {
>               gen9_enable_rc6(dev_priv);
>               gen9_enable_rps(dev_priv);
>               if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
> @@ -6620,9 +6628,12 @@ static void intel_gen6_powersave_work(struct 
> work_struct *work)
>       } else if (IS_BROADWELL(dev_priv)) {
>               gen8_enable_rps(dev_priv);
>               __gen6_update_ring_freq(dev_priv);
> -     } else {
> +     } else if (INTEL_GEN(dev_priv) >= 6) {
>               gen6_enable_rps(dev_priv);
>               __gen6_update_ring_freq(dev_priv);
> +     } else if (IS_IRONLAKE_M(dev_priv)) {
> +             ironlake_enable_drps(dev_priv);
> +             intel_init_emon(dev_priv);
>       }
>  
>       WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
> @@ -6632,18 +6643,45 @@ static void intel_gen6_powersave_work(struct 
> work_struct *work)
>       WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq);
>  
>       dev_priv->rps.enabled = true;
> +     mutex_unlock(&dev_priv->rps.hw_lock);
> +}
>  
> -     gen6_enable_rps_interrupts(dev_priv);
> +static void __intel_autoenable_gt_powersave(struct work_struct *work)
> +{
> +     struct drm_i915_private *dev_priv =
> +             container_of(work, typeof(*dev_priv), rps.autoenable_work.work);
> +     struct intel_engine_cs *rcs;
> +     struct drm_i915_gem_request *req;
>  
> -     mutex_unlock(&dev_priv->rps.hw_lock);
> +     if (READ_ONCE(dev_priv->rps.enabled))
> +             return;
> +
> +     rcs = &dev_priv->engine[RCS];
> +     if (rcs->last_context)
> +             return;
> +
> +     if (!rcs->init_context)
> +             return;
> +
> +     intel_runtime_pm_get(dev_priv);
> +     mutex_lock(&dev_priv->drm.struct_mutex);
>  
> +     req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
> +     if (IS_ERR(req))
> +             goto unlock;
> +
> +     if (!i915.enable_execlists && i915_switch_context(req) == 0)
> +             rcs->init_context(req);
> +     i915_add_request_no_flush(req);
> +
> +unlock:
> +     mutex_unlock(&dev_priv->drm.struct_mutex);
>       intel_runtime_pm_put(dev_priv);
>  }
>  
> -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
> +void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
>  {
> -     /* Powersaving is controlled by the host when inside a VM */
> -     if (intel_vgpu_active(dev_priv))
> +     if (READ_ONCE(dev_priv->rps.enabled))
>               return;
>  
>       if (IS_IRONLAKE_M(dev_priv)) {
> @@ -6664,8 +6702,8 @@ void intel_enable_gt_powersave(struct drm_i915_private 
> *dev_priv)
>                * paths, so the _noresume version is enough (and in case of
>                * runtime resume it's necessary).
>                */
> -             if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
> -                                        round_jiffies_up_relative(HZ)))
> +             if (schedule_delayed_work(&dev_priv->rps.autoenable_work,
> +                                       round_jiffies_up_relative(HZ)))
>                       intel_runtime_pm_get_noresume(dev_priv);
>       }
>  }
> @@ -6675,7 +6713,7 @@ void intel_reset_gt_powersave(struct drm_i915_private 
> *dev_priv)
>       if (INTEL_INFO(dev_priv)->gen < 6)
>               return;
>  
> -     gen6_suspend_rps(dev_priv);
> +     gen6_disable_rps_interrupts(dev_priv);
>       dev_priv->rps.enabled = false;
>  }
>  
> @@ -7785,8 +7823,8 @@ void intel_pm_setup(struct drm_device *dev)
>       mutex_init(&dev_priv->rps.hw_lock);
>       spin_lock_init(&dev_priv->rps.client_lock);
>  
> -     INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
> -                       intel_gen6_powersave_work);
> +     INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work,
> +                       __intel_autoenable_gt_powersave);
>       INIT_LIST_HEAD(&dev_priv->rps.clients);
>       INIT_LIST_HEAD(&dev_priv->rps.semaphores.link);
>       INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link);
> diff --git a/drivers/gpu/drm/i915/intel_uncore.c 
> b/drivers/gpu/drm/i915/intel_uncore.c
> index ff80a81b1a84..eeb4cbce19ff 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.c
> +++ b/drivers/gpu/drm/i915/intel_uncore.c
> @@ -435,7 +435,7 @@ void intel_uncore_sanitize(struct drm_i915_private 
> *dev_priv)
>       i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
>  
>       /* BIOS often leaves RC6 enabled, but disable it for hw init */
> -     intel_disable_gt_powersave(dev_priv);
> +     intel_sanitize_gt_powersave(dev_priv);
>  }
>  
>  static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
> -- 
> 2.8.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to