Plumb in support for disabling kernel queues in
GFX11.  We have to bring up a GFX queue briefly in
order to initialize the clear state.  After that
we can disable it.

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 77 +++++++++++++++++---------
 1 file changed, 51 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 3e49c0f399e02..e18100c5faa2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1128,6 +1128,10 @@ static int gfx_v11_0_gfx_ring_init(struct amdgpu_device 
*adev, int ring_id,
 
        ring->ring_obj = NULL;
        ring->use_doorbell = true;
+       if (adev->gfx.disable_kq) {
+               ring->no_scheduler = true;
+               ring->no_user_submission = true;
+       }
 
        if (!ring_id)
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
@@ -1709,21 +1713,23 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block 
*ip_block)
                }
        }
 
-       ring_id = 0;
-       /* set up the compute queues - allocate horizontally across pipes */
-       for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
-               for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
-                       for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
-                               if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
-                                                                    k, j))
-                                       continue;
+       if (!adev->gfx.disable_kq) {
+               ring_id = 0;
+               /* set up the compute queues - allocate horizontally across 
pipes */
+               for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
+                       for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
+                               for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; 
k++) {
+                                       if 
(!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
+                                                                            k, 
j))
+                                               continue;
 
-                               r = gfx_v11_0_compute_ring_init(adev, ring_id,
-                                                               i, k, j);
-                               if (r)
-                                       return r;
+                                       r = gfx_v11_0_compute_ring_init(adev, 
ring_id,
+                                                                       i, k, 
j);
+                                       if (r)
+                                               return r;
 
-                               ring_id++;
+                                       ring_id++;
+                               }
                        }
                }
        }
@@ -1823,8 +1829,10 @@ static int gfx_v11_0_sw_fini(struct amdgpu_ip_block 
*ip_block)
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
-       for (i = 0; i < adev->gfx.num_compute_rings; i++)
-               amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+       if (!adev->gfx.disable_kq) {
+               for (i = 0; i < adev->gfx.num_compute_rings; i++)
+                       amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
+       }
 
        amdgpu_gfx_mqd_sw_fini(adev, 0);
 
@@ -4489,6 +4497,9 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device 
*adev)
        if (!amdgpu_async_gfx_ring)
                gfx_v11_0_cp_compute_enable(adev, true);
 
+       if (adev->gfx.disable_kq)
+               return 0;
+
        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
 
@@ -4561,18 +4572,29 @@ static int gfx_v11_0_cp_resume(struct amdgpu_device 
*adev)
                        return r;
        }
 
-       for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-               ring = &adev->gfx.gfx_ring[i];
-               r = amdgpu_ring_test_helper(ring);
-               if (r)
-                       return r;
-       }
+       if (adev->gfx.disable_kq) {
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       ring = &adev->gfx.gfx_ring[i];
+                       /* we don't want to set ring->ready */
+                       r = amdgpu_ring_test_ring(ring);
+                       if (r)
+                               return r;
+               }
+               amdgpu_gfx_disable_kgq(adev, 0);
+       } else {
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       ring = &adev->gfx.gfx_ring[i];
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
+                               return r;
+               }
 
-       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-               ring = &adev->gfx.compute_ring[i];
-               r = amdgpu_ring_test_helper(ring);
-               if (r)
-                       return r;
+               for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+                       ring = &adev->gfx.compute_ring[i];
+                       r = amdgpu_ring_test_helper(ring);
+                       if (r)
+                               return r;
+               }
        }
 
        return 0;
@@ -5111,6 +5133,9 @@ static int gfx_v11_0_early_init(struct amdgpu_ip_block 
*ip_block)
 {
        struct amdgpu_device *adev = ip_block->adev;
 
+       if (amdgpu_disable_kq == 1)
+               adev->gfx.disable_kq = true;
+
        adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
 
        adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
-- 
2.48.1

Reply via email to