Hi, Hsin-yi:

On Thu, 2021-04-22 at 19:10 +0800, Hsin-Yi Wang wrote:
> From: CK Hu <ck...@mediatek.com>
> 
> In cmdq mode, packet may be flushed before it is executed, so
> the pending flag should be cleared after cmdq packet is done.
> 
> Signed-off-by: CK Hu <ck...@mediatek.com>
> Signed-off-by: Hsin-Yi Wang <hsi...@chromium.org>
> ---
>  drivers/gpu/drm/mediatek/mtk_drm_crtc.c  | 56 +++++++++++++++++++++---
>  include/linux/mailbox/mtk-cmdq-mailbox.h |  1 +
>  2 files changed, 51 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c 
> b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> index 40df2c823187..051bf0eb00d3 100644
> --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> @@ -224,6 +224,45 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct 
> drm_crtc *crtc,
>  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
>  static void ddp_cmdq_cb(struct cmdq_cb_data data)
>  {
> +     struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
> +     struct mtk_drm_crtc *mtk_crtc = (struct mtk_drm_crtc *)pkt->crtc;
> +     struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
> +     unsigned int i;
> +
> +     if (data.sta == CMDQ_CB_ERROR)

I prefer use standard error status instead of proprietary one, so I send
a patch [1]. I would like this patch depend on [1].

[1]
https://patchwork.kernel.org/project/linux-mediatek/patch/20210314233323.23377-2-chunkuang...@kernel.org/

> +             goto destroy_pkt;
> +
> +     if (state->pending_config) {
> +             state->pending_config = false;
> +     }
> +
> +     if (mtk_crtc->pending_planes) {
> +             for (i = 0; i < mtk_crtc->layer_nr; i++) {
> +                     struct drm_plane *plane = &mtk_crtc->planes[i];
> +                     struct mtk_plane_state *plane_state;
> +
> +                     plane_state = to_mtk_plane_state(plane->state);
> +
> +                     if (plane_state->pending.config)
> +                             plane_state->pending.config = false;
> +             }
> +             mtk_crtc->pending_planes = false;
> +     }
> +
> +     if (mtk_crtc->pending_async_planes) {
> +             for (i = 0; i < mtk_crtc->layer_nr; i++) {
> +                     struct drm_plane *plane = &mtk_crtc->planes[i];
> +                     struct mtk_plane_state *plane_state;
> +
> +                     plane_state = to_mtk_plane_state(plane->state);
> +
> +                     if (plane_state->pending.async_config)
> +                             plane_state->pending.async_config = false;
> +             }
> +             mtk_crtc->pending_async_planes = false;
> +     }
> +
> +destroy_pkt:
>       cmdq_pkt_destroy(data.data);
>  }
>  #endif
> @@ -377,8 +416,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
>                                   state->pending_height,
>                                   state->pending_vrefresh, 0,
>                                   cmdq_handle);
> -
> -             state->pending_config = false;
> +             if (!cmdq_handle)
> +                     state->pending_config = false;
>       }
>  
>       if (mtk_crtc->pending_planes) {
> @@ -398,9 +437,11 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
>                               mtk_ddp_comp_layer_config(comp, local_layer,
>                                                         plane_state,
>                                                         cmdq_handle);
> -                     plane_state->pending.config = false;
> +                     if (!cmdq_handle)
> +                             plane_state->pending.config = false;
>               }
> -             mtk_crtc->pending_planes = false;
> +             if (!cmdq_handle)
> +                     mtk_crtc->pending_planes = false;
>       }
>  
>       if (mtk_crtc->pending_async_planes) {
> @@ -420,9 +461,11 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
>                               mtk_ddp_comp_layer_config(comp, local_layer,
>                                                         plane_state,
>                                                         cmdq_handle);
> -                     plane_state->pending.async_config = false;
> +                     if (!cmdq_handle)
> +                             plane_state->pending.async_config = false;
>               }
> -             mtk_crtc->pending_async_planes = false;
> +             if (!cmdq_handle)
> +                     mtk_crtc->pending_async_planes = false;
>       }
>  }
>  
> @@ -475,6 +518,7 @@ static void mtk_drm_crtc_update_config(struct 
> mtk_drm_crtc *mtk_crtc,
>               cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
>               mtk_crtc_ddp_config(crtc, cmdq_handle);
>               cmdq_pkt_finalize(cmdq_handle);
> +             cmdq_handle->crtc = mtk_crtc;
>               cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
>       }
>  #endif
> diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h 
> b/include/linux/mailbox/mtk-cmdq-mailbox.h
> index d5a983d65f05..c06b14ec03e5 100644
> --- a/include/linux/mailbox/mtk-cmdq-mailbox.h
> +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
> @@ -90,6 +90,7 @@ struct cmdq_pkt {
>       struct cmdq_task_cb     cb;
>       struct cmdq_task_cb     async_cb;
>       void                    *cl;
> +     void                    *crtc;

Not all client need crtc, so I would like to use another structure to
include cmdq_pkt and crtc and cast that structure to cmdq_pkt. I have a
plan to use mailbox rx_callback instead of cmdq_task_cb [2] (not
upstreamed yet), and it is an example that crtc include cmdq_pkt. To
upstream [2] would take a long time, you could choose to depend on [2]
or not.

[2]
https://git.kernel.org/pub/scm/linux/kernel/git/chunkuang.hu/linux.git/commit/?h=mediatek-cmdq&id=2c3289b1550ea1105a02750928df738753ddf8e1

Regards,
CK

>  };
>  
>  u8 cmdq_get_shift_pa(struct mbox_chan *chan);

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to