The global workqueue is only used for vblanks inside KMS code. Move allocation / flushing / deallcation of it to msm_kms.c
Signed-off-by: Dmitry Baryshkov <dmitry.barysh...@oss.qualcomm.com> --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 2 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 2 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 2 +- drivers/gpu/drm/msm/msm_drv.c | 21 ++------------------- drivers/gpu/drm/msm/msm_drv.h | 2 -- drivers/gpu/drm/msm/msm_kms.c | 11 +++++++++-- drivers/gpu/drm/msm/msm_kms.h | 8 ++++++++ 7 files changed, 22 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index f9c46180b8f7ace9122e74015244334c1f13ef2b..1aaed1cd9ec58fed3230acda4c283f0eedf3a9f0 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -980,7 +980,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc, return 0; } - queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, + queue_delayed_work(priv->kms->wq, &dpu_enc->delayed_off_work, msecs_to_jiffies(dpu_enc->idle_timeout)); trace_dpu_enc_rc(DRMID(drm_enc), sw_event, diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index b8610aa806eaeb540e76a6a17283faea6f482a99..5e1e62256c382426f70d21a5312fb40dda68d695 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -511,7 +511,7 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) if (pending & PENDING_CURSOR) { update_cursor(crtc); - drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); + drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq); } } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 0f653e62b4a008e3bafe09ee7fb4399e1fccb722..fce2365753e22850e56521e82b9d9dca29c09280 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -1196,7 +1196,7 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) } if (pending & PENDING_CURSOR) - drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); + drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->kms->wq); } static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f316e6776f672e7a97268f716040d0cf73256c4b..78cea9d4999488648b4131a2da425fb349d1b664 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -82,13 +82,6 @@ static int msm_drm_uninit(struct device *dev) drm_atomic_helper_shutdown(ddev); } - /* We must cancel and cleanup any pending vblank enable/disable - * work before msm_irq_uninstall() to avoid work re-enabling an - * irq after uninstall has disabled it. - */ - - flush_workqueue(priv->wq); - msm_gem_shrinker_cleanup(ddev); msm_perf_debugfs_cleanup(priv); @@ -104,8 +97,6 @@ static int msm_drm_uninit(struct device *dev) ddev->dev_private = NULL; drm_dev_put(ddev); - destroy_workqueue(priv->wq); - return 0; } @@ -227,12 +218,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) ddev->dev_private = priv; priv->dev = ddev; - priv->wq = alloc_ordered_workqueue("msm", 0); - if (!priv->wq) { - ret = -ENOMEM; - goto err_put_dev; - } - INIT_LIST_HEAD(&priv->objects); mutex_init(&priv->obj_lock); @@ -253,12 +238,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) if (priv->kms_init) { ret = drmm_mode_config_init(ddev); if (ret) - goto err_destroy_wq; + goto err_put_dev; } ret = msm_init_vram(ddev); if (ret) - goto err_destroy_wq; + goto err_put_dev; dma_set_max_seg_size(dev, UINT_MAX); @@ -304,8 +289,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) err_deinit_vram: msm_deinit_vram(ddev); -err_destroy_wq: - destroy_workqueue(priv->wq); err_put_dev: drm_dev_put(ddev); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index a65077855201746c37ee742364b61116565f3794..cc603bd4729e909e9381a3c277db262b13361de6 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -177,8 +177,6 @@ struct msm_drm_private { struct mutex lock; } lru; - struct workqueue_struct *wq; - unsigned int num_crtcs; struct msm_drm_thread event_thread[MAX_CRTCS]; diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index 35d5397e73b4c5cb90b1770e8570277e782be7ec..184a4503fef0deff7234a3ce332e0bf564fbce46 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -137,7 +137,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv, vbl_work->enable = enable; vbl_work->priv = priv; - queue_work(priv->wq, &vbl_work->work); + queue_work(priv->kms->wq, &vbl_work->work); return 0; } @@ -227,6 +227,13 @@ void msm_drm_kms_uninit(struct device *dev) BUG_ON(!kms); + /* We must cancel and cleanup any pending vblank enable/disable + * work before msm_irq_uninstall() to avoid work re-enabling an + * irq after uninstall has disabled it. + */ + + flush_workqueue(kms->wq); + /* clean up event worker threads */ for (i = 0; i < priv->num_crtcs; i++) { if (priv->event_thread[i].worker) @@ -261,7 +268,7 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv) ret = priv->kms_init(ddev); if (ret) { DRM_DEV_ERROR(dev, "failed to load kms\n"); - return ret; + goto err_msm_uninit; } /* Enable normalization of plane zpos */ diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 43b58d052ee6aae0ce34d09c88e1e1c34f9c52ef..e52649bbee7dc6a80abfecf7f8d5bcfad3d8f60b 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -153,6 +153,8 @@ struct msm_kms { struct mutex commit_lock[MAX_CRTCS]; unsigned pending_crtc_mask; struct msm_pending_timer pending_timers[MAX_CRTCS]; + + struct workqueue_struct *wq; }; static inline int msm_kms_init(struct msm_kms *kms, @@ -165,6 +167,10 @@ static inline int msm_kms_init(struct msm_kms *kms, kms->funcs = funcs; + kms->wq = alloc_ordered_workqueue("msm", 0); + if (!kms->wq) + return -ENOMEM; + for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) { ret = msm_atomic_init_pending_timer(&kms->pending_timers[i], kms, i); if (ret) { @@ -181,6 +187,8 @@ static inline void msm_kms_destroy(struct msm_kms *kms) for (i = 0; i < ARRAY_SIZE(kms->pending_timers); i++) msm_atomic_destroy_pending_timer(&kms->pending_timers[i]); + + destroy_workqueue(kms->wq); } #define for_each_crtc_mask(dev, crtc, crtc_mask) \ -- 2.39.5