Am 04.03.22 um 10:15 schrieb Lang Yu:
It is a hardware issue that VCN can't handle a GTT
backing stored TMZ buffer on Raven.

Move such a TMZ buffer to VRAM domain before command
submission.

Please don't touch the CS code with that. What you can do is to implement a Raven specific patch_cs_in_place callback in the vcn_v*.c file.

See amdgpu_uvd_force_into_uvd_segment() how we handle the 256MiB window on really old UVD blocks. You just don't need to implement the full parsing of the command stream (yet).

Regards,
Christian.


Signed-off-by: Lang Yu <lang...@amd.com>
---
  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 27 ++++++++++++++++++++++++++
  1 file changed, 27 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index af12256e1bd3..66345f2ce6ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -393,6 +393,24 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device 
*adev, u64 num_bytes,
        spin_unlock(&adev->mm_stats.lock);
  }
+static int raven_vcn_tmz_quirks(struct amdgpu_cs_parser *p, struct amdgpu_bo *bo, uint32_t *domain)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+
+       if ((adev->asic_type == CHIP_RAVEN) &&
+           (bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED) &&
+           (ring->funcs->type == AMDGPU_HW_IP_VCN_DEC ||
+           ring->funcs->type == AMDGPU_HW_IP_VCN_ENC ||
+           ring->funcs->type == AMDGPU_HW_IP_VCN_JPEG)) {
+               if (domain)
+                       *domain = AMDGPU_GEM_DOMAIN_VRAM;
+               return 1;
+       }
+
+       return 0;
+}
+
  static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
  {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -403,6 +421,7 @@ static int amdgpu_cs_bo_validate(void *param, struct 
amdgpu_bo *bo)
                .resv = bo->tbo.base.resv
        };
        uint32_t domain;
+       bool need_retry = 1;
        int r;
if (bo->tbo.pin_count)
@@ -431,6 +450,8 @@ static int amdgpu_cs_bo_validate(void *param, struct 
amdgpu_bo *bo)
                domain = bo->allowed_domains;
        }
+ raven_vcn_tmz_quirks(p, bo, &domain);
+
  retry:
        amdgpu_bo_placement_from_domain(bo, domain);
        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -442,6 +463,12 @@ static int amdgpu_cs_bo_validate(void *param, struct 
amdgpu_bo *bo)
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
                domain = bo->allowed_domains;
+               if (raven_vcn_tmz_quirks(p, bo, &domain)) {
+                       if (need_retry)
+                               need_retry = 0;
+                       else
+                               return r;
+               }
                goto retry;
        }

Reply via email to