On Wed, Apr 06, 2022 at 09:51:21AM +0200, Christian König wrote:
> Add an usage for kernel submissions. Waiting for those are mandatory for
> dynamic DMA-bufs.
> 
> As a precaution this patch also changes all occurrences where fences are
> added as part of memory management in TTM, VMWGFX and i915 to use the
> new value because it now becomes possible for drivers to ignore fences
> with the WRITE usage.
> 
> v2: use "must" in documentation, fix whitespaces
> v3: separate out some driver changes and better document why some
>     changes should still be part of this patch.
> 
> Signed-off-by: Christian König <christian.koe...@amd.com>
> ---
>  drivers/dma-buf/dma-resv.c                  |  2 +-
>  drivers/dma-buf/st-dma-resv.c               |  2 +-
>  drivers/gpu/drm/i915/gem/i915_gem_clflush.c |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo.c                |  2 +-
>  drivers/gpu/drm/ttm/ttm_bo_util.c           |  4 ++--
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c          |  2 +-
>  include/linux/dma-resv.h                    | 24 ++++++++++++++++++---
>  7 files changed, 28 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
> index 378d47e1cfea..f4860e5f2d8b 100644
> --- a/drivers/dma-buf/dma-resv.c
> +++ b/drivers/dma-buf/dma-resv.c
> @@ -726,7 +726,7 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
>   */
>  void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
>  {
> -     static const char *usage[] = { "write", "read" };
> +     static const char *usage[] = { "kernel", "write", "read" };
>       struct dma_resv_iter cursor;
>       struct dma_fence *fence;
>  
> diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
> index d0f7c2bfd4f0..062b57d63fa6 100644
> --- a/drivers/dma-buf/st-dma-resv.c
> +++ b/drivers/dma-buf/st-dma-resv.c
> @@ -296,7 +296,7 @@ int dma_resv(void)
>       int r;
>  
>       spin_lock_init(&fence_lock);
> -     for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
> +     for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_READ;
>            ++usage) {
>               r = subtests(tests, (void *)(unsigned long)usage);
>               if (r)
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c 
> b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> index f5f2b8b115ea..0512afdd20d8 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
> @@ -117,7 +117,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object 
> *obj,
>                                               i915_fence_timeout(i915),
>                                               I915_FENCE_GFP);
>               dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
> -                                DMA_RESV_USAGE_WRITE);
> +                                DMA_RESV_USAGE_KERNEL);
>               dma_fence_work_commit(&clflush->base);
>               /*
>                * We must have successfully populated the pages(since we are
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index d74f9eea855e..6bf3fb1c8045 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -739,7 +739,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object 
> *bo,
>               return ret;
>       }
>  
> -     dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
> +     dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
>  
>       ret = dma_resv_reserve_fences(bo->base.resv, 1);
>       if (unlikely(ret)) {
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
> b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 7a96a1db13a7..99deb45894f4 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -508,7 +508,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object 
> *bo,
>               return ret;
>  
>       dma_resv_add_fence(&ghost_obj->base._resv, fence,
> -                        DMA_RESV_USAGE_WRITE);
> +                        DMA_RESV_USAGE_KERNEL);
>  
>       /**
>        * If we're not moving to fixed memory, the TTM object
> @@ -562,7 +562,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object 
> *bo,
>       struct ttm_resource_manager *man = ttm_manager_type(bdev, 
> new_mem->mem_type);
>       int ret = 0;
>  
> -     dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
> +     dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
>       if (!evict)
>               ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
>       else if (!from->use_tt && pipeline)
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c 
> b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index bec50223efe5..408ede1f967f 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -759,7 +759,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
>       ret = dma_resv_reserve_fences(bo->base.resv, 1);
>       if (!ret)
>               dma_resv_add_fence(bo->base.resv, &fence->base,
> -                                DMA_RESV_USAGE_WRITE);
> +                                DMA_RESV_USAGE_KERNEL);
>       else
>               /* Last resort fallback when we are OOM */
>               dma_fence_wait(&fence->base, false);
> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
> index 7bb7e7edbb6f..a749f229ae91 100644
> --- a/include/linux/dma-resv.h
> +++ b/include/linux/dma-resv.h
> @@ -55,11 +55,29 @@ struct dma_resv_list;
>   * This enum describes the different use cases for a dma_resv object and
>   * controls which fences are returned when queried.
>   *
> - * An important fact is that there is the order WRITE<READ and when the
> - * dma_resv object is asked for fences for one use case the fences for the
> - * lower use case are returned as well.
> + * An important fact is that there is the order KERNEL<WRITE<READ and
> + * when the dma_resv object is asked for fences for one use case the fences
> + * for the lower use case are returned as well.
> + *
> + * For example when asking for WRITE fences then the KERNEL fences are 
> returned
> + * as well. Similar when asked for READ fences then both WRITE and KERNEL
> + * fences are returned as well.
>   */
>  enum dma_resv_usage {
> +     /**
> +      * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
> +      *
> +      * This should only be used for things like copying or clearing memory
> +      * with a DMA hardware engine for the purpose of kernel memory
> +      * management.
> +      *
> +      * Drivers *always* must wait for those fences before accessing the
> +      * resource protected by the dma_resv object. The only exception for
> +      * that is when the resource is known to be locked down in place by
> +      * pinning it previously.
> +      */
> +     DMA_RESV_USAGE_KERNEL,
> +
>       /**
>        * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
>        *
> -- 
> 2.25.1

All the functional changes in drivers make sense to me now.

Reviewed-by: Daniel Vetter <daniel.vet...@ffwll.ch>

> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

Reply via email to