On Fri, 2017-12-01 at 11:36 +0100, Lucas Stach wrote:
> While the etnaviv workqueue needs to be ordered, as we rely on work items
> being executed in queuing order, this is only true for a single GPU.
> Having a shared workqueue for all GPUs in the system limits concurrency
> artificially.
> 
> Getting each GPU its own ordered workqueue still meets our ordering
> expectations and enables retire workers to run concurrently.
> 
> Signed-off-by: Lucas Stach <l.st...@pengutronix.de>

Reviewed-by: Philipp Zabel <p.za...@pengutronix.de>

regards
Philipp
> ---
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c | 12 ------------
>  drivers/gpu/drm/etnaviv/etnaviv_drv.h | 10 ----------
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 18 ++++++++++++++----
>  drivers/gpu/drm/etnaviv/etnaviv_gpu.h |  1 +
>  4 files changed, 15 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 491eddf9b150..ca03b5e4789b 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -580,12 +580,6 @@ static int etnaviv_bind(struct device *dev)
>       }
>       drm->dev_private = priv;
>  
> -     priv->wq = alloc_ordered_workqueue("etnaviv", 0);
> -     if (!priv->wq) {
> -             ret = -ENOMEM;
> -             goto out_wq;
> -     }
> -
>       mutex_init(&priv->gem_lock);
>       INIT_LIST_HEAD(&priv->gem_list);
>       priv->num_gpus = 0;
> @@ -607,9 +601,6 @@ static int etnaviv_bind(struct device *dev)
>  out_register:
>       component_unbind_all(dev, drm);
>  out_bind:
> -     flush_workqueue(priv->wq);
> -     destroy_workqueue(priv->wq);
> -out_wq:
>       kfree(priv);
>  out_unref:
>       drm_dev_unref(drm);
> @@ -624,9 +615,6 @@ static void etnaviv_unbind(struct device *dev)
>  
>       drm_dev_unregister(drm);
>  
> -     flush_workqueue(priv->wq);
> -     destroy_workqueue(priv->wq);
> -
>       component_unbind_all(dev, drm);
>  
>       drm->dev_private = NULL;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h 
> b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> index d249acb6da08..8668bfd4abd5 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
> @@ -56,18 +56,8 @@ struct etnaviv_drm_private {
>       /* list of GEM objects: */
>       struct mutex gem_lock;
>       struct list_head gem_list;
> -
> -     struct workqueue_struct *wq;
>  };
>  
> -static inline void etnaviv_queue_work(struct drm_device *dev,
> -     struct work_struct *w)
> -{
> -     struct etnaviv_drm_private *priv = dev->dev_private;
> -
> -     queue_work(priv->wq, w);
> -}
> -
>  int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
>               struct drm_file *file);
>  
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c 
> b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> index 65eaa8c10ba2..1e1e34adb07f 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
> @@ -958,7 +958,7 @@ static void recover_worker(struct work_struct *work)
>       pm_runtime_put_autosuspend(gpu->dev);
>  
>       /* Retire the buffer objects in a work */
> -     etnaviv_queue_work(gpu->drm, &gpu->retire_work);
> +     queue_work(gpu->wq, &gpu->retire_work);
>  }
>  
>  static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
> @@ -994,7 +994,7 @@ static void hangcheck_handler(struct timer_list *t)
>               dev_err(gpu->dev, "     completed fence: %u\n", fence);
>               dev_err(gpu->dev, "     active fence: %u\n",
>                       gpu->active_fence);
> -             etnaviv_queue_work(gpu->drm, &gpu->recover_work);
> +             queue_work(gpu->wq, &gpu->recover_work);
>       }
>  
>       /* if still more pending work, reset the hangcheck timer: */
> @@ -1526,7 +1526,7 @@ static irqreturn_t irq_handler(int irq, void *data)
>  
>                       if (gpu->event[event].sync_point) {
>                               gpu->sync_point_event = event;
> -                             etnaviv_queue_work(gpu->drm, 
> &gpu->sync_point_work);
> +                             queue_work(gpu->wq, &gpu->sync_point_work);
>                       }
>  
>                       fence = gpu->event[event].fence;
> @@ -1552,7 +1552,7 @@ static irqreturn_t irq_handler(int irq, void *data)
>               }
>  
>               /* Retire the buffer objects in a work */
> -             etnaviv_queue_work(gpu->drm, &gpu->retire_work);
> +             queue_work(gpu->wq, &gpu->retire_work);
>  
>               ret = IRQ_HANDLED;
>       }
> @@ -1721,12 +1721,19 @@ static int etnaviv_gpu_bind(struct device *dev, 
> struct device *master,
>                       return PTR_ERR(gpu->cooling);
>       }
>  
> +     gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
> +     if (!gpu->wq) {
> +             thermal_cooling_device_unregister(gpu->cooling);

After the THERMAL selection patch this should change to:

                if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
                        thermal_cooling_device_unregister(gpu->cooling);

Reviewed-by: Philipp Zabel <p.za...@pengutronix.de>

regards
Philipp
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to