Introduce amdgpu_device_fini_schedulers() to clean scheduler related
resources, and avoid possible invalid memory access.

Signed-off-by: Jiang Liu <ge...@linux.alibaba.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 29 +++++++++++++++++++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  |  9 -------
 2 files changed, 26 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ee695e70fb4f..1619bd2473c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2857,27 +2857,48 @@ static int amdgpu_device_init_schedulers(struct 
amdgpu_device *adev)
                if (r) {
                        DRM_ERROR("Failed to create scheduler on ring %s.\n",
                                  ring->name);
-                       return r;
+                       goto out_err;
                }
                r = amdgpu_uvd_entity_init(adev, ring);
                if (r) {
                        DRM_ERROR("Failed to create UVD scheduling entity on 
ring %s.\n",
                                  ring->name);
-                       return r;
+                       goto out_sched_fini;
                }
                r = amdgpu_vce_entity_init(adev, ring);
                if (r) {
                        DRM_ERROR("Failed to create VCE scheduling entity on 
ring %s.\n",
                                  ring->name);
-                       return r;
+                       goto out_sched_fini;
                }
        }
 
        amdgpu_xcp_update_partition_sched_list(adev);
 
        return 0;
+
+out_sched_fini:
+       drm_sched_fini(&adev->rings[i]->sched);
+out_err:
+       while (i--)
+               if (adev->rings[i] && !adev->rings[i]->no_scheduler)
+                       drm_sched_fini(&adev->rings[i]->sched);
+       return r;
 }
 
+static void amdgpu_device_fini_schedulers(struct amdgpu_device *adev)
+{
+       int i;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+
+               if (!ring || ring->no_scheduler)
+                       continue;
+
+               drm_sched_fini(&ring->sched);
+       }
+}
 
 /**
  * amdgpu_device_ip_init - run init for hardware IPs
@@ -3424,6 +3445,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device 
*adev)
 
        amdgpu_amdkfd_device_fini_sw(adev);
 
+       amdgpu_device_fini_schedulers(adev);
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.sw)
                        continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 2f24a6aa13bf..c95895a7b888 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -650,15 +650,6 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device 
*adev)
                if (!ring || !ring->fence_drv.initialized)
                        continue;
 
-               /*
-                * Notice we check for sched.ops since there's some
-                * override on the meaning of sched.ready by amdgpu.
-                * The natural check would be sched.ready, which is
-                * set as drm_sched_init() finishes...
-                */
-               if (ring->sched.ops)
-                       drm_sched_fini(&ring->sched);
-
                for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
                        dma_fence_put(ring->fence_drv.fences[j]);
                kfree(ring->fence_drv.fences);
-- 
2.43.5

Reply via email to