It would be good if we add some explanation of why we used DMA_RESV_USAGE_BOOKKEEP for buffer copy and for fill we use DMA_RESV_USAGE_KERNEL. Either as a comment or in commit message would help new folks to get a hold on it. Other than that its a good catch.

Acked-by: Sunil Khatri <[email protected]>


For my understanding:
A copy buffer could involve buffer move to different domains too and might need to depend on all fences including read/write and internal kernel fences. At the same time buffer fill only
writes to the memory and only depend on kernel implicit sync fences ?

Regards
Sunil Khatri

On 11-03-2026 12:43 am, Christian König wrote:
That's not even remotely correct, but should unblock testing for now.

Signed-off-by: Christian König<[email protected]>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++-----
  1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 714fd8d12ca5..69f52a078022 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -2428,12 +2428,14 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device 
*adev,
                                  struct amdgpu_ttm_buffer_entity *entity,
                                  unsigned int num_dw,
                                  struct dma_resv *resv,
+                                 enum dma_resv_usage usage,
                                  bool vm_needs_flush,
                                  struct amdgpu_job **job,
                                  u64 k_job_id)
  {
        enum amdgpu_ib_pool_type pool = AMDGPU_IB_POOL_DELAYED;
        int r;
+
        r = amdgpu_job_alloc_with_ib(adev, &entity->base,
                                     AMDGPU_FENCE_OWNER_UNDEFINED,
                                     num_dw * 4, pool, job, k_job_id);
@@ -2449,8 +2451,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device 
*adev,
        if (!resv)
                return 0;
- return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
-                                                  DMA_RESV_USAGE_BOOKKEEP);
+       return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, usage);
  }
int amdgpu_copy_buffer(struct amdgpu_device *adev,
@@ -2479,9 +2480,9 @@ int amdgpu_copy_buffer(struct amdgpu_device *adev,
        max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
-       r = amdgpu_ttm_prepare_job(adev, entity, num_dw,
-                                  resv, vm_needs_flush, &job,
-                                  AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
+       r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
+                                  DMA_RESV_USAGE_BOOKKEEP, vm_needs_flush,
+                                  &job, AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
        if (r)
                goto error_free;
@@ -2524,6 +2525,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_device *adev,
        num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
        r = amdgpu_ttm_prepare_job(adev, entity, num_dw, resv,
+                                  DMA_RESV_USAGE_KERNEL,
                                   vm_needs_flush, &job, k_job_id);
        if (r)
                return r;

Reply via email to