On 2025-01-31 11:57, Alex Deucher wrote:
Pass the vcn instance structure to these functions rather
than adev and the instance number.

TODO: clean up the function internals to use the vinst state
directly rather than accessing it indirectly via adev->vcn.inst[].

Signed-off-by: Alex Deucher<alexander.deuc...@amd.com>


Reviewed-by: Boyuan Zhang <boyuan.zh...@amd.com> <mailto:boyuan.zh...@amd.com>


---
  drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c | 71 ++++++++++++++-----------
  1 file changed, 41 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c 
b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index f49fdf2bb6e33..8eccb45b04d2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -282,13 +282,14 @@ static int vcn_v5_0_1_resume(struct amdgpu_ip_block 
*ip_block)
  /**
   * vcn_v5_0_1_mc_resume - memory controller programming
   *
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
   *
   * Let the VCN memory controller know it's offsets
   */
-static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
+static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst)
  {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst = vinst->inst;
        uint32_t offset, size, vcn_inst;
        const struct common_firmware_header *hdr;
@@ -344,14 +345,16 @@ static void vcn_v5_0_1_mc_resume(struct amdgpu_device *adev, int inst)
  /**
   * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode
   *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
   * @indirect: indirectly write sram
   *
   * Let the VCN memory controller know it's offsets with dpg mode
   */
-static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int 
inst_idx, bool indirect)
+static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
+                                         bool indirect)
  {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t offset, size;
        const struct common_firmware_header *hdr;
@@ -456,38 +459,38 @@ static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
  /**
   * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating
   *
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
   *
   * Disable clock gating for VCN block
   */
-static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_device *adev, int 
inst)
+static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
  {
  }
/**
   * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating
   *
- * @adev: amdgpu_device pointer
- * @inst: instance number
+ * @vinst: VCN instance
   *
   * Enable clock gating for VCN block
   */
-static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_device *adev, int 
inst)
+static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
  {
  }
/**
   * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode
   *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
   * @indirect: indirectly write sram
   *
   * Start VCN block with dpg mode
   */
-static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, 
bool indirect)
+static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
+                                    bool indirect)
  {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        volatile struct amdgpu_vcn4_fw_shared *fw_shared =
                adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
        struct amdgpu_ring *ring;
@@ -535,7 +538,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device 
*adev, int inst_idx, b
        WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET(
                VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
- vcn_v5_0_1_mc_resume_dpg_mode(adev, inst_idx, indirect);
+       vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
@@ -589,12 +592,14 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_device 
*adev, int inst_idx, b
  /**
   * vcn_v5_0_1_start - VCN start
   *
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
   *
   * Start VCN block
   */
-static int vcn_v5_0_1_start(struct amdgpu_device *adev, int i)
+static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
  {
+       struct amdgpu_device *adev = vinst->adev;
+       int i = vinst->inst;
        volatile struct amdgpu_vcn4_fw_shared *fw_shared;
        struct amdgpu_ring *ring;
        uint32_t tmp;
@@ -606,7 +611,7 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev, int 
i)
        fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
-               return vcn_v5_0_1_start_dpg_mode(adev, i, 
adev->vcn.inst[i].indirect_sram);
+               return vcn_v5_0_1_start_dpg_mode(vinst, 
adev->vcn.inst[i].indirect_sram);
vcn_inst = GET_INST(VCN, i); @@ -639,7 +644,7 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev, int i)
                     UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
                     UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
- vcn_v5_0_1_mc_resume(adev, i);
+       vcn_v5_0_1_mc_resume(vinst);
/* VCN global tiling registers */
        WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
@@ -739,13 +744,14 @@ static int vcn_v5_0_1_start(struct amdgpu_device *adev, 
int i)
  /**
   * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode
   *
- * @adev: amdgpu_device pointer
- * @inst_idx: instance number index
+ * @vinst: VCN instance
   *
   * Stop VCN block with dpg mode
   */
-static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
+static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
  {
+       struct amdgpu_device *adev = vinst->adev;
+       int inst_idx = vinst->inst;
        uint32_t tmp;
        int vcn_inst;
@@ -767,12 +773,14 @@ static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
  /**
   * vcn_v5_0_1_stop - VCN stop
   *
- * @adev: amdgpu_device pointer
+ * @vinst: VCN instance
   *
   * Stop VCN block
   */
-static int vcn_v5_0_1_stop(struct amdgpu_device *adev, int i)
+static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
  {
+       struct amdgpu_device *adev = vinst->adev;
+       int i = vinst->inst;
        volatile struct amdgpu_vcn4_fw_shared *fw_shared;
        uint32_t tmp;
        int r = 0, vcn_inst;
@@ -783,7 +791,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_device *adev, int 
i)
        fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
-               vcn_v5_0_1_stop_dpg_mode(adev, i);
+               vcn_v5_0_1_stop_dpg_mode(vinst);
                return 0;
        }
@@ -1005,12 +1013,14 @@ static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
        int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+               struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
                if (enable) {
                        if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) 
!= UVD_STATUS__IDLE)
                                return -EBUSY;
-                       vcn_v5_0_1_enable_clock_gating(adev, i);
+                       vcn_v5_0_1_enable_clock_gating(vinst);
                } else {
-                       vcn_v5_0_1_disable_clock_gating(adev, i);
+                       vcn_v5_0_1_disable_clock_gating(vinst);
                }
        }
@@ -1022,15 +1032,16 @@ static int vcn_v5_0_1_set_powergating_state_inst(struct amdgpu_ip_block *ip_bloc
                                                 int i)
  {
        struct amdgpu_device *adev = ip_block->adev;
+       struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
        int ret = 0;
if (state == adev->vcn.inst[i].cur_state)
                return 0;
if (state == AMD_PG_STATE_GATE)
-               ret = vcn_v5_0_1_stop(adev, i);
+               ret = vcn_v5_0_1_stop(vinst);
        else
-               ret = vcn_v5_0_1_start(adev, i);
+               ret = vcn_v5_0_1_start(vinst);
if (!ret)
                adev->vcn.inst[i].cur_state = state;

Reply via email to