Configure tow pipes with different hardware resources.

Signed-off-by: Jack Xiao <jack.x...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 77 +++++++++++++++----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 12 ++--
 drivers/gpu/drm/amd/amdgpu/mes_v11_0.c  |  7 +--
 drivers/gpu/drm/amd/amdgpu/mes_v12_0.c  | 49 ++++++++--------
 4 files changed, 81 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 52277129ea5d..f2310075545b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -165,36 +165,38 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
                        adev->mes.sdma_hqd_mask[i] = 0xfc;
        }
 
-       r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs);
-       if (r) {
-               dev_err(adev->dev,
-                       "(%d) ring trail_fence_offs wb alloc failed\n", r);
-               goto error_ids;
-       }
-       adev->mes.sch_ctx_gpu_addr =
-               adev->wb.gpu_addr + (adev->mes.sch_ctx_offs * 4);
-       adev->mes.sch_ctx_ptr =
-               (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs];
+       for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+               r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
+               if (r) {
+                       dev_err(adev->dev,
+                               "(%d) ring trail_fence_offs wb alloc failed\n",
+                               r);
+                       goto error;
+               }
+               adev->mes.sch_ctx_gpu_addr[i] =
+                       adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
+               adev->mes.sch_ctx_ptr[i] =
+                       (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
 
-       r = amdgpu_device_wb_get(adev, &adev->mes.query_status_fence_offs);
-       if (r) {
-               amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
-               dev_err(adev->dev,
-                       "(%d) query_status_fence_offs wb alloc failed\n", r);
-               goto error_ids;
+               r = amdgpu_device_wb_get(adev,
+                                &adev->mes.query_status_fence_offs[i]);
+               if (r) {
+                       dev_err(adev->dev,
+                             "(%d) query_status_fence_offs wb alloc failed\n",
+                             r);
+                       goto error;
+               }
+               adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
+                       (adev->mes.query_status_fence_offs[i] * 4);
+               adev->mes.query_status_fence_ptr[i] =
+                       (uint64_t 
*)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
        }
-       adev->mes.query_status_fence_gpu_addr =
-               adev->wb.gpu_addr + (adev->mes.query_status_fence_offs * 4);
-       adev->mes.query_status_fence_ptr =
-               (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs];
 
        r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
        if (r) {
-               amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
-               amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
                dev_err(adev->dev,
                        "(%d) read_val_offs alloc failed\n", r);
-               goto error_ids;
+               goto error;
        }
        adev->mes.read_val_gpu_addr =
                adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
@@ -214,10 +216,16 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
 error_doorbell:
        amdgpu_mes_doorbell_free(adev);
 error:
-       amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
-       amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-       amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
-error_ids:
+       for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+               if (adev->mes.sch_ctx_ptr[i])
+                       amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+               if (adev->mes.query_status_fence_ptr[i])
+                       amdgpu_device_wb_free(adev,
+                                     adev->mes.query_status_fence_offs[i]);
+       }
+       if (adev->mes.read_val_ptr)
+               amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+
        idr_destroy(&adev->mes.pasid_idr);
        idr_destroy(&adev->mes.gang_id_idr);
        idr_destroy(&adev->mes.queue_id_idr);
@@ -228,13 +236,22 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
 
 void amdgpu_mes_fini(struct amdgpu_device *adev)
 {
+       int i;
+
        amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
                              &adev->mes.event_log_gpu_addr,
                              &adev->mes.event_log_cpu_addr);
 
-       amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
-       amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-       amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+       for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+               if (adev->mes.sch_ctx_ptr[i])
+                       amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+               if (adev->mes.query_status_fence_ptr[i])
+                       amdgpu_device_wb_free(adev,
+                                     adev->mes.query_status_fence_offs[i]);
+       }
+       if (adev->mes.read_val_ptr)
+               amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
+
        amdgpu_mes_doorbell_free(adev);
 
        idr_destroy(&adev->mes.pasid_idr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index e7915a24dfdd..c5fe404d4e12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -113,12 +113,12 @@ struct amdgpu_mes {
        uint32_t                        gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
        uint32_t                        
sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
        uint32_t                        
aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
-       uint32_t                        sch_ctx_offs;
-       uint64_t                        sch_ctx_gpu_addr;
-       uint64_t                        *sch_ctx_ptr;
-       uint32_t                        query_status_fence_offs;
-       uint64_t                        query_status_fence_gpu_addr;
-       uint64_t                        *query_status_fence_ptr;
+       uint32_t                        sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
+       uint64_t                        sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
+       uint64_t                        *sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
+       uint32_t                        
query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
+       uint64_t                        
query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
+       uint64_t                        
*query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
        uint32_t                        read_val_offs;
        uint64_t                        read_val_gpu_addr;
        uint32_t                        *read_val_ptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 3d103a5ca184..1b7e1faaa421 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -522,9 +522,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes 
*mes)
        mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
        mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
        mes_set_hw_res_pkt.paging_vmid = 0;
-       mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
+       mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr[0];
        mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
-               mes->query_status_fence_gpu_addr;
+               mes->query_status_fence_gpu_addr[0];
 
        for (i = 0; i < MAX_COMPUTE_PIPES; i++)
                mes_set_hw_res_pkt.compute_hqd_mask[i] =
@@ -1208,9 +1208,6 @@ static int mes_v11_0_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int pipe;
 
-       amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
-       amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-
        for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
                kfree(adev->mes.mqd_backup[pipe]);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c 
b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 4218ac7d4372..aa56034f9e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -542,27 +542,33 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes 
*mes, int pipe)
        mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC;
        mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
 
-       mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
-       mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
-       mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
-       mes_set_hw_res_pkt.paging_vmid = 0;
-       mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = mes->sch_ctx_gpu_addr;
-       mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
-               mes->query_status_fence_gpu_addr;
-
-       for (i = 0; i < MAX_COMPUTE_PIPES; i++)
-               mes_set_hw_res_pkt.compute_hqd_mask[i] =
-                       mes->compute_hqd_mask[i];
-
-       for (i = 0; i < MAX_GFX_PIPES; i++)
-               mes_set_hw_res_pkt.gfx_hqd_mask[i] = mes->gfx_hqd_mask[i];
-
-       for (i = 0; i < MAX_SDMA_PIPES; i++)
-               mes_set_hw_res_pkt.sdma_hqd_mask[i] = mes->sdma_hqd_mask[i];
+       if (pipe == AMDGPU_MES_SCHED_PIPE) {
+               mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub;
+               mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub;
+               mes_set_hw_res_pkt.gds_size = adev->gds.gds_size;
+               mes_set_hw_res_pkt.paging_vmid = 0;
+
+               for (i = 0; i < MAX_COMPUTE_PIPES; i++)
+                       mes_set_hw_res_pkt.compute_hqd_mask[i] =
+                               mes->compute_hqd_mask[i];
+
+               for (i = 0; i < MAX_GFX_PIPES; i++)
+                       mes_set_hw_res_pkt.gfx_hqd_mask[i] =
+                               mes->gfx_hqd_mask[i];
+
+               for (i = 0; i < MAX_SDMA_PIPES; i++)
+                       mes_set_hw_res_pkt.sdma_hqd_mask[i] =
+                               mes->sdma_hqd_mask[i];
+
+               for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
+                       mes_set_hw_res_pkt.aggregated_doorbells[i] =
+                               mes->aggregated_doorbells[i];
+       }
 
-       for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++)
-               mes_set_hw_res_pkt.aggregated_doorbells[i] =
-                       mes->aggregated_doorbells[i];
+       mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr =
+               mes->sch_ctx_gpu_addr[pipe];
+       mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr =
+               mes->query_status_fence_gpu_addr[pipe];
 
        for (i = 0; i < 5; i++) {
                mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i];
@@ -1288,9 +1294,6 @@ static int mes_v12_0_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int pipe;
 
-       amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs);
-       amdgpu_device_wb_free(adev, adev->mes.query_status_fence_offs);
-
        for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
                kfree(adev->mes.mqd_backup[pipe]);
 
-- 
2.41.0

Reply via email to