From: James Zhu <james....@amd.com>

Implement partition schedule for GC(9, 4, 3).

Signed-off-by: James Zhu <james....@amd.com>
Acked-by: Lijo Lazar <lijo.la...@amd.com>
Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 .../drm/amd/amdgpu/aqua_vanjaram_reg_init.c   | 41 +++++++++++++++++++
 1 file changed, 41 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c 
b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
index 073ae95e6dd6..4ca932a62ce6 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram_reg_init.c
@@ -166,6 +166,46 @@ static int 
aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev)
        return aqua_vanjaram_xcp_sched_list_update(adev);
 }
 
+int aqua_vanjaram_select_scheds(
+               struct amdgpu_device *adev,
+               u32 hw_ip,
+               u32 hw_prio,
+               struct amdgpu_fpriv *fpriv,
+               unsigned int *num_scheds,
+               struct drm_gpu_scheduler ***scheds)
+{
+       u32 sel_xcp_id;
+       int i;
+
+       if (fpriv->xcp_id == ~0) {
+               u32 least_ref_cnt = ~0;
+
+               fpriv->xcp_id = 0;
+               for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+                       u32 total_ref_cnt;
+
+                       total_ref_cnt = 
atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt);
+                       if (total_ref_cnt < least_ref_cnt) {
+                               fpriv->xcp_id = i;
+                               least_ref_cnt = total_ref_cnt;
+                       }
+               }
+       }
+       sel_xcp_id = fpriv->xcp_id;
+
+       if 
(adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+               *num_scheds = 
adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+               *scheds = 
adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+               atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+               DRM_DEBUG("Selected partition #%d", sel_xcp_id);
+       } else {
+               DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id);
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
 static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev,
                                         enum amd_hw_ip_block_type block,
                                         int8_t inst)
@@ -548,6 +588,7 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
        .query_partition_mode = &aqua_vanjaram_query_partition_mode,
        .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
        .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
+       .select_scheds = &aqua_vanjaram_select_scheds,
        .update_partition_sched_list = 
&aqua_vanjaram_update_partition_sched_list
 };
 
-- 
2.40.1

Reply via email to