On 5/28/25 10:37, Prike Liang wrote:
> The kernel driver only requires exporting available rings to the mesa
> when the userq is disabled; otherwise, the userq IP mask will be cleaned
> up in the mesa.

Hui? That doesn't sounds correct to me.

That userq is disable in mesa when kernel queues are available is intentionally 
for now.

Regards,
Christian.

> 
> Signed-off-by: Prike Liang <prike.li...@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 20 ++++++++++----------
>  1 file changed, 10 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index d2ce7d86dbc8..43d86c09d8bb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -409,7 +409,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>               type = AMD_IP_BLOCK_TYPE_GFX;
>               for (i = 0; i < adev->gfx.num_gfx_rings; i++)
>                       if (adev->gfx.gfx_ring[i].sched.ready &&
> -                         !adev->gfx.gfx_ring[i].no_user_submission)
> +                         adev->gfx.disable_uq)
>                               ++num_rings;
>               ib_start_alignment = 32;
>               ib_size_alignment = 32;
> @@ -418,7 +418,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>               type = AMD_IP_BLOCK_TYPE_GFX;
>               for (i = 0; i < adev->gfx.num_compute_rings; i++)
>                       if (adev->gfx.compute_ring[i].sched.ready &&
> -                         !adev->gfx.compute_ring[i].no_user_submission)
> +                         adev->gfx.disable_uq)
>                               ++num_rings;
>               ib_start_alignment = 32;
>               ib_size_alignment = 32;
> @@ -427,7 +427,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>               type = AMD_IP_BLOCK_TYPE_SDMA;
>               for (i = 0; i < adev->sdma.num_instances; i++)
>                       if (adev->sdma.instance[i].ring.sched.ready &&
> -                         !adev->sdma.instance[i].ring.no_user_submission)
> +                             adev->gfx.disable_uq)
>                               ++num_rings;
>               ib_start_alignment = 256;
>               ib_size_alignment = 4;
> @@ -439,7 +439,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>                               continue;
>  
>                       if (adev->uvd.inst[i].ring.sched.ready &&
> -                         !adev->uvd.inst[i].ring.no_user_submission)
> +                         adev->gfx.disable_uq)
>                               ++num_rings;
>               }
>               ib_start_alignment = 256;
> @@ -449,7 +449,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>               type = AMD_IP_BLOCK_TYPE_VCE;
>               for (i = 0; i < adev->vce.num_rings; i++)
>                       if (adev->vce.ring[i].sched.ready &&
> -                         !adev->vce.ring[i].no_user_submission)
> +                         adev->gfx.disable_uq)
>                               ++num_rings;
>               ib_start_alignment = 256;
>               ib_size_alignment = 4;
> @@ -462,7 +462,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>  
>                       for (j = 0; j < adev->uvd.num_enc_rings; j++)
>                               if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
> -                                 
> !adev->uvd.inst[i].ring_enc[j].no_user_submission)
> +                                 adev->gfx.disable_uq)
>                                       ++num_rings;
>               }
>               ib_start_alignment = 256;
> @@ -475,7 +475,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>                               continue;
>  
>                       if (adev->vcn.inst[i].ring_dec.sched.ready &&
> -                         !adev->vcn.inst[i].ring_dec.no_user_submission)
> +                         adev->gfx.disable_uq)
>                               ++num_rings;
>               }
>               ib_start_alignment = 256;
> @@ -489,7 +489,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>  
>                       for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
>                               if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
> -                                 
> !adev->vcn.inst[i].ring_enc[j].no_user_submission)
> +                                 adev->gfx.disable_uq)
>                                       ++num_rings;
>               }
>               ib_start_alignment = 256;
> @@ -505,7 +505,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>  
>                       for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
>                               if (adev->jpeg.inst[i].ring_dec[j].sched.ready 
> &&
> -                                 
> !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
> +                                 adev->gfx.disable_uq)
>                                       ++num_rings;
>               }
>               ib_start_alignment = 256;
> @@ -514,7 +514,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
>       case AMDGPU_HW_IP_VPE:
>               type = AMD_IP_BLOCK_TYPE_VPE;
>               if (adev->vpe.ring.sched.ready &&
> -                 !adev->vpe.ring.no_user_submission)
> +                 adev->gfx.disable_uq)
>                       ++num_rings;
>               ib_start_alignment = 256;
>               ib_size_alignment = 4;

Reply via email to