Change-Id: Ic74508ec9de0bf1c027313ce9574e6cb8ea9bb1d
Signed-off-by: Chunming Zhou <david1.z...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 34 ++++++++++++++++++++++--------
 1 file changed, 25 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1968251..e91177a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2083,6 +2083,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
        int i, r;
        int resched;
        bool need_full_reset;
+       u32 unpark_bits;
 
        if (!amdgpu_check_soft_reset(adev)) {
                DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
@@ -2104,6 +2105,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
                amd_sched_hw_job_reset(&ring->sched);
                amdgpu_ring_reset(ring);
        }
+       unpark_bits = 0;
        /* after all hw jobs are reset, hw fence is meaningless, so 
force_completion */
        amdgpu_fence_driver_force_completion(adev);
        /* store modesetting */
@@ -2147,8 +2149,6 @@ retry:
                amdgpu_atombios_scratch_regs_restore(adev);
        }
        if (!r) {
-               struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring;
-
                amdgpu_irq_gpu_reset_resume_helper(adev);
                r = amdgpu_ib_ring_tests(adev);
                if (r) {
@@ -2163,11 +2163,20 @@ retry:
                 */
                if (need_full_reset && !(adev->flags & AMD_IS_APU)) {
                        struct amdgpu_vm *vm, *tmp;
+                       int i;
 
                        DRM_INFO("recover page table from shadow\n");
-                       amd_sched_rq_block_entity(
-                               
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
-                       kthread_unpark(buffer_ring->sched.thread);
+                       for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) 
{
+                               struct amdgpu_ring *ring = 
adev->vm_manager.vm_pte_rings[i];
+
+                               amd_sched_rq_block_entity(
+                                       
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], true);
+                               amd_sched_rq_block_entity(
+                                       
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true);
+                               kthread_unpark(ring->sched.thread);
+                               unpark_bits |= 1 << ring->idx;
+                       }
+
                        spin_lock(&adev->vm_list_lock);
                        list_for_each_entry_safe(vm, tmp, &adev->vm_list, list) 
{
                                spin_unlock(&adev->vm_list_lock);
@@ -2175,8 +2184,15 @@ retry:
                                spin_lock(&adev->vm_list_lock);
                        }
                        spin_unlock(&adev->vm_list_lock);
-                       amd_sched_rq_block_entity(
-                               
&buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+
+                       for (i = 0; i < adev->vm_manager.vm_pte_num_rings; i++) 
{
+                               struct amdgpu_ring *ring = 
adev->vm_manager.vm_pte_rings[i];
+
+                               amd_sched_rq_block_entity(
+                                       
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL], false);
+                               amd_sched_rq_block_entity(
+                                       
&ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false);
+                       }
                }
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
@@ -2184,9 +2200,9 @@ retry:
                                continue;
 
                        DRM_INFO("ring:%d recover jobs\n", ring->idx);
-                       kthread_park(buffer_ring->sched.thread);
                        amd_sched_job_recovery(&ring->sched);
-                       kthread_unpark(ring->sched.thread);
+                       if (!((unpark_bits >> ring->idx) & 0x1))
+                               kthread_unpark(ring->sched.thread);
                }
        } else {
                dev_err(adev->dev, "asic resume failed (%d).\n", r);
-- 
1.9.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to