From: "jesse.zh...@amd.com" <jesse.zh...@amd.com>

This patch refactors the SDMA v5.0 queue reset and stop logic to improve
code readability, maintainability, and performance. The key changes include:

1. **Generalized `sdma_v5_0_gfx_stop` Function**:
   - Added an `inst_mask` parameter to allow stopping specific SDMA instances
     instead of all instances. This is useful for resetting individual queues.

2. **Simplified `sdma_v5_0_reset_queue` Function**:
   - Removed redundant loops and checks by directly using the `ring->me` field
     to identify the SDMA instance.
   - Reused the `sdma_v5_0_gfx_stop` function to stop the queue, reducing code
     duplication.

Signed-off-by: Jesse Zhang <jesse.zh...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 65 +++++++++++---------------
 1 file changed, 26 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c 
b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index e1348b6d9c6a..9501652f903d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -555,15 +555,15 @@ static void sdma_v5_0_ring_emit_fence(struct amdgpu_ring 
*ring, u64 addr, u64 se
  * sdma_v5_0_gfx_stop - stop the gfx async dma engines
  *
  * @adev: amdgpu_device pointer
- *
+ * @inst_mask: mask of dma engine instances to be disabled
  * Stop the gfx async dma ring buffers (NAVI10).
  */
-static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
+static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev, uint32_t inst_mask)
 {
        u32 rb_cntl, ib_cntl;
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++) {
+       for_each_inst(i, inst_mask) {
                rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 
0);
                WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_GFX_RB_CNTL), rb_cntl);
@@ -655,9 +655,11 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, 
bool enable)
 {
        u32 f32_cntl;
        int i;
+       uint32_t inst_mask;
 
+       inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
        if (!enable) {
-               sdma_v5_0_gfx_stop(adev);
+               sdma_v5_0_gfx_stop(adev, 1 << inst_mask);
                sdma_v5_0_rlc_stop(adev);
        }
 
@@ -1506,40 +1508,25 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block 
*ip_block)
 static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
 {
        struct amdgpu_device *adev = ring->adev;
-       int i, j, r;
-       u32 rb_cntl, ib_cntl, f32_cntl, freeze, cntl, preempt, soft_reset, 
stat1_reg;
+       int j, r;
+       u32 f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg;
+       u32 inst_id;
 
        if (amdgpu_sriov_vf(adev))
                return -EINVAL;
-
-       for (i = 0; i < adev->sdma.num_instances; i++) {
-               if (ring == &adev->sdma.instance[i].ring)
-                       break;
-       }
-
-       if (i == adev->sdma.num_instances) {
-               DRM_ERROR("sdma instance not found\n");
-               return -EINVAL;
-       }
-
+       inst_id = ring->me;
        amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
 
        /* stop queue */
-       ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_GFX_IB_CNTL));
-       ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
-
-       rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_GFX_RB_CNTL));
-       rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+       sdma_v5_0_gfx_stop(adev, 1 << ring->me);
 
        /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 
1 */
-       freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+       freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, 
mmSDMA0_FREEZE));
        freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 1);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+       WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
 
        for (j = 0; j < adev->usec_timeout; j++) {
-               freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_FREEZE));
+               freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, 
mmSDMA0_FREEZE));
                if (REG_GET_FIELD(freeze, SDMA0_FREEZE, FROZEN) & 1)
                        break;
                udelay(1);
@@ -1547,7 +1534,7 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring 
*ring, unsigned int vmid)
 
        /* check sdma copy engine all idle if frozen not received*/
        if (j == adev->usec_timeout) {
-               stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_STATUS1_REG));
+               stat1_reg = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, 
mmSDMA0_STATUS1_REG));
                if ((stat1_reg & 0x3FF) != 0x3FF) {
                        DRM_ERROR("cannot soft reset as sdma not idle\n");
                        r = -ETIMEDOUT;
@@ -1555,35 +1542,35 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring 
*ring, unsigned int vmid)
                }
        }
 
-       f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+       f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, 
mmSDMA0_F32_CNTL));
        f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+       WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_F32_CNTL), 
f32_cntl);
 
-       cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+       cntl = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_CNTL));
        cntl = REG_SET_FIELD(cntl, SDMA0_CNTL, UTC_L1_ENABLE, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), cntl);
+       WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_CNTL), cntl);
 
        /* soft reset SDMA_GFX_PREEMPT.IB_PREEMPT = 0 
mmGRBM_SOFT_RESET.SOFT_RESET_SDMA0/1 = 1 */
-       preempt = RREG32(sdma_v5_0_get_reg_offset(adev, i, 
mmSDMA0_GFX_PREEMPT));
+       preempt = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, 
mmSDMA0_GFX_PREEMPT));
        preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_PREEMPT), preempt);
+       WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_GFX_PREEMPT), 
preempt);
 
        soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
-       soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i;
+       soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << inst_id;
 
        WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
 
        udelay(50);
 
-       soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << i);
+       soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << 
inst_id);
        WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset);
 
        /* unfreeze*/
-       freeze = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE));
+       freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, 
mmSDMA0_FREEZE));
        freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0);
-       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_FREEZE), freeze);
+       WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE), freeze);
 
-       r = sdma_v5_0_gfx_resume_instance(adev, i, true);
+       r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true);
 
 err0:
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
-- 
2.25.1

Reply via email to