From: Jiadong Zhu <jiadong....@amd.com>

Implement queue reset for graphic and compute queue.

v2: use amdgpu_gfx_rlc funcs to enter/exit safe mode.
v3: use gfx_v11_0_request_gfx_index_mutex()

Signed-off-by: Jiadong Zhu <jiadong....@amd.com>
Reviewed-by: Alex Deucher <alexander.deuc...@amd.com>
Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 78 ++++++++++++++++++++++++++
 1 file changed, 78 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index bf8fb6a1becb..fb617f6cef13 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -26,6 +26,7 @@
 #include "amdgpu.h"
 #include "soc15_common.h"
 #include "soc21.h"
+#include "gfx_v11_0.h"
 #include "gc/gc_11_0_0_offset.h"
 #include "gc/gc_11_0_0_sh_mask.h"
 #include "gc/gc_11_0_0_default.h"
@@ -350,9 +351,81 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes 
*mes,
                        offsetof(union MESAPI__REMOVE_QUEUE, api_status));
 }
 
+static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t 
queue_type,
+                                     uint32_t me_id, uint32_t pipe_id,
+                                     uint32_t queue_id, uint32_t vmid)
+{
+       struct amdgpu_device *adev = mes->adev;
+       uint32_t value;
+       int i, r = 0;
+
+       amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+       if (queue_type == AMDGPU_RING_TYPE_GFX) {
+               dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+                        me_id, pipe_id, queue_id, vmid);
+
+               gfx_v11_0_request_gfx_index_mutex(adev, true);
+               /* all se allow writes */
+               WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+                            (uint32_t)(0x1 << 
GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+               value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << 
vmid);
+               if (pipe_id == 0)
+                       value = REG_SET_FIELD(value, CP_VMID_RESET, 
PIPE0_QUEUES, 1 << queue_id);
+               else
+                       value = REG_SET_FIELD(value, CP_VMID_RESET, 
PIPE1_QUEUES, 1 << queue_id);
+               WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+               gfx_v11_0_request_gfx_index_mutex(adev, false);
+
+               mutex_lock(&adev->srbm_mutex);
+               soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+               /* wait till dequeue take effects */
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+                               break;
+                       udelay(1);
+               }
+               if (i >= adev->usec_timeout){
+                       dev_err(adev->dev, "failed to wait on gfx hqd 
deactive\n");
+                       r = -ETIMEDOUT;
+               }
+
+               soc21_grbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+       } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+               dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+                        me_id, pipe_id, queue_id);
+               mutex_lock(&adev->srbm_mutex);
+               soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+               WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+               WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+               /* wait till dequeue take effects */
+               for (i = 0; i < adev->usec_timeout; i++) {
+                       if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+                       break;
+                       udelay(1);
+               }
+               if (i >= adev->usec_timeout){
+                       dev_err(adev->dev, "failed to wait on hqd deactive\n");
+                       r = -ETIMEDOUT;
+               }
+               soc21_grbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+       }
+
+       amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+       return r;
+}
+
 static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
                                    struct mes_reset_queue_input *input)
 {
+       if (input->use_mmio)
+               return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+                                                 input->me_id, input->pipe_id,
+                                                 input->queue_id, input->vmid);
+
        union MESAPI__RESET mes_reset_queue_pkt;
 
        memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
@@ -612,6 +685,11 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes 
*mes,
 {
        union MESAPI__RESET mes_reset_queue_pkt;
 
+       if (input->use_mmio)
+               return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+                                                 input->me_id, input->pipe_id,
+                                                 input->queue_id, input->vmid);
+
        memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
 
        mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
-- 
2.45.2

Reply via email to