On Thu, Aug 15, 2024 at 08:26:12PM +0200, Antonino Maniscalco wrote:
> This patch adds a bit of infrastructure to give the different Adreno
> targets the flexibility to setup the submitqueues per their needs.
> 
> Signed-off-by: Sharat Masetty <smase...@codeaurora.org>

Reviewed-by: Akhil P Oommen <quic_akhi...@quicinc.com>

-Akhil

> ---
>  drivers/gpu/drm/msm/msm_gpu.h         |  7 +++++++
>  drivers/gpu/drm/msm/msm_submitqueue.c | 10 ++++++++++
>  2 files changed, 17 insertions(+)
> 
> diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
> index 1f02bb9956be..70f5c18e5aee 100644
> --- a/drivers/gpu/drm/msm/msm_gpu.h
> +++ b/drivers/gpu/drm/msm/msm_gpu.h
> @@ -92,6 +92,10 @@ struct msm_gpu_funcs {
>        * for cmdstream that is buffered in this FIFO upstream of the CP fw.
>        */
>       bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
> +     int (*submitqueue_setup)(struct msm_gpu *gpu,
> +                     struct msm_gpu_submitqueue *queue);
> +     void (*submitqueue_close)(struct msm_gpu *gpu,
> +                     struct msm_gpu_submitqueue *queue);
>  };
>  
>  /* Additional state for iommu faults: */
> @@ -522,6 +526,9 @@ struct msm_gpu_submitqueue {
>       struct mutex lock;
>       struct kref ref;
>       struct drm_sched_entity *entity;
> +     struct msm_gpu *gpu;
> +     struct drm_gem_object *bo;
> +     uint64_t bo_iova;
>  };
>  
>  struct msm_gpu_state_bo {
> diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c 
> b/drivers/gpu/drm/msm/msm_submitqueue.c
> index 0e803125a325..4ffb336d9a60 100644
> --- a/drivers/gpu/drm/msm/msm_submitqueue.c
> +++ b/drivers/gpu/drm/msm/msm_submitqueue.c
> @@ -71,6 +71,11 @@ void msm_submitqueue_destroy(struct kref *kref)
>       struct msm_gpu_submitqueue *queue = container_of(kref,
>               struct msm_gpu_submitqueue, ref);
>  
> +     struct msm_gpu *gpu = queue->gpu;
> +
> +     if (gpu && gpu->funcs->submitqueue_close)
> +             gpu->funcs->submitqueue_close(gpu, queue);
> +
>       idr_destroy(&queue->fence_idr);
>  
>       msm_file_private_put(queue->ctx);
> @@ -160,6 +165,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct 
> msm_file_private *ctx,
>  {
>       struct msm_drm_private *priv = drm->dev_private;
>       struct msm_gpu_submitqueue *queue;
> +     struct msm_gpu *gpu = priv->gpu;
>       enum drm_sched_priority sched_prio;
>       unsigned ring_nr;
>       int ret;
> @@ -195,6 +201,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct 
> msm_file_private *ctx,
>  
>       queue->ctx = msm_file_private_get(ctx);
>       queue->id = ctx->queueid++;
> +     queue->gpu = gpu;
>  
>       if (id)
>               *id = queue->id;
> @@ -207,6 +214,9 @@ int msm_submitqueue_create(struct drm_device *drm, struct 
> msm_file_private *ctx,
>  
>       write_unlock(&ctx->queuelock);
>  
> +     if (gpu && gpu->funcs->submitqueue_setup)
> +             gpu->funcs->submitqueue_setup(gpu, queue);
> +
>       return 0;
>  }
>  
> 
> -- 
> 2.46.0
> 
> 

Reply via email to