On Wed, Apr 28, 2021 at 12:36:49PM -0700, Rob Clark wrote:
> From: Rob Clark <robdcl...@chromium.org>
> 
> On a5xx and a6xx devices that are using CP_WHERE_AM_I to update a
> ringbuffer read-ptr shadow value, periodically emit a CP_WHERE_AM_I
> every 32 commands, so that a later submit waiting for ringbuffer
> space to become available sees partial progress, rather than not
> seeing rptr advance at all until the GPU gets to the end of the
> submit that it is currently chewing on.

Acked-by: Jordan Crouse <jor...@cosmicpenguin.net>

> Signed-off-by: Rob Clark <robdcl...@chromium.org>
> ---
>  drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 29 ++++++++++++++++++++++-----
>  drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 27 +++++++++++++++++++------
>  2 files changed, 45 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c 
> b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
> index 0c8faad3b328..5202f1498a48 100644
> --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
> @@ -18,6 +18,18 @@ static void a5xx_dump(struct msm_gpu *gpu);
>  
>  #define GPU_PAS_ID 13
>  
> +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer 
> *ring)
> +{
> +     struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
> +     struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
> +
> +     if (a5xx_gpu->has_whereami) {
> +             OUT_PKT7(ring, CP_WHERE_AM_I, 2);
> +             OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
> +             OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
> +     }
> +}
> +
>  void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
>               bool sync)
>  {
> @@ -30,11 +42,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer 
> *ring,
>        * Most flush operations need to issue a WHERE_AM_I opcode to sync up
>        * the rptr shadow
>        */
> -     if (a5xx_gpu->has_whereami && sync) {
> -             OUT_PKT7(ring, CP_WHERE_AM_I, 2);
> -             OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
> -             OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
> -     }
> +     if (sync)
> +             update_shadow_rptr(gpu, ring);
>  
>       if (unlikely(ring->overflow))
>               return;
> @@ -171,6 +180,16 @@ static void a5xx_submit(struct msm_gpu *gpu, struct 
> msm_gem_submit *submit)
>                       ibs++;
>                       break;
>               }
> +
> +             /*
> +              * Periodically update shadow-wptr if needed, so that we
> +              * can see partial progress of submits with large # of
> +              * cmds.. otherwise we could needlessly stall waiting for
> +              * ringbuffer state, simply due to looking at a shadow
> +              * rptr value that has not been updated
> +              */
> +             if ((ibs % 32) == 0)
> +                     update_shadow_rptr(gpu, ring);
>       }
>  
>       /*
> diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c 
> b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> index 4a4728a774c0..2986e36ffd8d 100644
> --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
> @@ -52,21 +52,25 @@ static bool a6xx_idle(struct msm_gpu *gpu, struct 
> msm_ringbuffer *ring)
>       return true;
>  }
>  
> -static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
> +static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer 
> *ring)
>  {
>       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
>       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> -     uint32_t wptr;
> -     unsigned long flags;
>  
>       /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
>       if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
> -             struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
> -
>               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
>               OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
>               OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
>       }
> +}
> +
> +static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
> +{
> +     uint32_t wptr;
> +     unsigned long flags;
> +
> +     update_shadow_rptr(gpu, ring);
>  
>       if (unlikely(ring->overflow))
>               return;
> @@ -148,7 +152,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct 
> msm_gem_submit *submit)
>       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
>       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
>       struct msm_ringbuffer *ring = submit->ring;
> -     unsigned int i;
> +     unsigned int i, ibs = 0;
>  
>       a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
>  
> @@ -184,8 +188,19 @@ static void a6xx_submit(struct msm_gpu *gpu, struct 
> msm_gem_submit *submit)
>                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
>                       OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
>                       OUT_RING(ring, submit->cmd[i].size);
> +                     ibs++;
>                       break;
>               }
> +
> +             /*
> +              * Periodically update shadow-wptr if needed, so that we
> +              * can see partial progress of submits with large # of
> +              * cmds.. otherwise we could needlessly stall waiting for
> +              * ringbuffer state, simply due to looking at a shadow
> +              * rptr value that has not been updated
> +              */
> +             if ((ibs % 32) == 0)
> +                     update_shadow_rptr(gpu, ring);
>       }
>  
>       get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
> -- 
> 2.30.2
> 
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to