From: Lijo Lazar <lijo.la...@amd.com> [ Upstream commit 0b9647d40ef82837d5025de6daad64db775ea1c5 ]
VCN v5.0.1 also will need register offset normalization. Reuse the logic from VCN v4.0.3. Also, avoid HDP flush similar to VCN v4.0.3 Signed-off-by: Lijo Lazar <lijo.la...@amd.com> Reviewed-by: Hawking Zhang <hawking.zh...@amd.com> Signed-off-by: Alex Deucher <alexander.deuc...@amd.com> Signed-off-by: Sasha Levin <sas...@kernel.org> --- drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c | 14 ++++++++------ drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c | 20 +++++++++++--------- 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index ecdc027f82203..e6cf21ed8afb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -31,6 +31,7 @@ #include "soc15d.h" #include "soc15_hw_ip.h" #include "vcn_v2_0.h" +#include "vcn_v4_0_3.h" #include "mmsch_v4_0_3.h" #include "vcn/vcn_4_0_3_offset.h" @@ -1461,8 +1462,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring) regUVD_RB_WPTR); } -static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) +void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask) { /* Use normalized offsets when required */ if (vcn_v4_0_3_normalizn_reqd(ring->adev)) @@ -1474,7 +1475,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t amdgpu_ring_write(ring, val); } -static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) +void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val) { /* Use normalized offsets when required */ if (vcn_v4_0_3_normalizn_reqd(ring->adev)) @@ -1485,8 +1487,8 @@ static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg amdgpu_ring_write(ring, val); } -static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, - unsigned int vmid, uint64_t pd_addr) +void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr) { struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; @@ -1498,7 +1500,7 @@ static void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, lower_32_bits(pd_addr), 0xffffffff); } -static void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) +void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) { /* VCN engine access for HDP flush doesn't work when RRMT is enabled. * This is a workaround to avoid any HDP flush through VCN ring. diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h index 0b046114373ae..03572a1d0c9cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h @@ -26,4 +26,13 @@ extern const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block; +void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val, uint32_t mask); + +void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val); +void vcn_v4_0_3_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned int vmid, uint64_t pd_addr); +void vcn_v4_0_3_ring_emit_hdp_flush(struct amdgpu_ring *ring); + #endif /* __VCN_V4_0_3_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index cdbc10d7c9fb7..f893a84282832 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -29,6 +29,7 @@ #include "soc15d.h" #include "soc15_hw_ip.h" #include "vcn_v2_0.h" +#include "vcn_v4_0_3.h" #include "vcn/vcn_5_0_0_offset.h" #include "vcn/vcn_5_0_0_sh_mask.h" @@ -905,16 +906,17 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, .set_wptr = vcn_v5_0_1_unified_ring_set_wptr, - .emit_frame_size = - SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + - SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + - 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ - 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ - 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ .emit_ib = vcn_v2_0_enc_ring_emit_ib, .emit_fence = vcn_v2_0_enc_ring_emit_fence, - .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, + .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush, + .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush, .test_ring = amdgpu_vcn_enc_ring_test_ring, .test_ib = amdgpu_vcn_unified_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, @@ -922,8 +924,8 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_vcn_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, - .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, - .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, + .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -- 2.39.5