To allow completion and further block of HW accesses post device PCI
remove.

Signed-off-by: Andrey Grodzovsky <andrey.grodzov...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 11 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 29 ++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c    | 26 +++++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c   | 28 ++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c    | 55 ++++++++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c    | 43 ++++++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c    | 30 ++++++-----
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c    | 61 ++++++++++++----------
 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c      | 10 +++-
 9 files changed, 189 insertions(+), 104 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 9edb35ba181b..f942496c2b35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -30,6 +30,7 @@
 #include <linux/dma-buf.h>
 #include "amdgpu_xgmi.h"
 #include <uapi/linux/kfd_ioctl.h>
+#include <drm/drm_drv.h>
 
 /* Total memory size in system memory and all GPU VRAM. Used to
  * estimate worst case amount of memory to reserve for page tables
@@ -223,9 +224,15 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+       int idx;
 
-       if (amdgpu_device_should_recover_gpu(adev))
-               amdgpu_device_gpu_recover(adev, NULL);
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+
+               if (amdgpu_device_should_recover_gpu(adev))
+                       amdgpu_device_gpu_recover(adev, NULL);
+
+               drm_dev_exit(idx);
+       }
 }
 
 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 55afc11c17e6..c30e0b0596a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2674,24 +2674,35 @@ static void 
amdgpu_device_delayed_init_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, 
delayed_init_work.work);
-       int r;
+       int r, idx;
 
-       r = amdgpu_ib_ring_tests(adev);
-       if (r)
-               DRM_ERROR("ib ring test failed (%d).\n", r);
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+               r = amdgpu_ib_ring_tests(adev);
+               if (r)
+                       DRM_ERROR("ib ring test failed (%d).\n", r);
+
+               drm_dev_exit(idx);
+       }
 }
 
 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
 {
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, 
gfx.gfx_off_delay_work.work);
+       int idx;
+
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+
+               mutex_lock(&adev->gfx.gfx_off_mutex);
+               if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
+                       if (!amdgpu_dpm_set_powergating_by_smu(adev, 
AMD_IP_BLOCK_TYPE_GFX, true))
+                               adev->gfx.gfx_off_state = true;
+               }
+               mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+               drm_dev_exit(idx);
 
-       mutex_lock(&adev->gfx.gfx_off_mutex);
-       if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, 
AMD_IP_BLOCK_TYPE_GFX, true))
-                       adev->gfx.gfx_off_state = true;
        }
-       mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index a922154953a7..5eda0d0fc974 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -188,8 +188,15 @@ static void amdgpu_irq_handle_ih1(struct work_struct *work)
 {
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  irq.ih1_work);
+       int idx;
 
-       amdgpu_ih_process(adev, &adev->irq.ih1);
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+
+               amdgpu_ih_process(adev, &adev->irq.ih1);
+
+               drm_dev_exit(idx);
+
+       }
 }
 
 /**
@@ -203,8 +210,14 @@ static void amdgpu_irq_handle_ih2(struct work_struct *work)
 {
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  irq.ih2_work);
+       int idx;
+
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+
+               amdgpu_ih_process(adev, &adev->irq.ih2);
 
-       amdgpu_ih_process(adev, &adev->irq.ih2);
+               drm_dev_exit(idx);
+       }
 }
 
 /**
@@ -218,8 +231,15 @@ static void amdgpu_irq_handle_ih_soft(struct work_struct 
*work)
 {
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
                                                  irq.ih_soft_work);
+       int idx;
+
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+
+               amdgpu_ih_process(adev, &adev->irq.ih_soft);
 
-       amdgpu_ih_process(adev, &adev->irq.ih_soft);
+               drm_dev_exit(idx);
+
+       }
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 8996cb4ed57a..1e8fd66c1e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -29,6 +29,7 @@
 #include "amdgpu_pm.h"
 #include "soc15d.h"
 #include "soc15_common.h"
+#include <drm/drm_drv.h>
 
 #define JPEG_IDLE_TIMEOUT      msecs_to_jiffies(1000)
 
@@ -78,20 +79,25 @@ static void amdgpu_jpeg_idle_work_handler(struct 
work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, jpeg.idle_work.work);
        unsigned int fences = 0;
-       unsigned int i;
+       unsigned int i, idx;
 
-       for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
-               if (adev->jpeg.harvest_config & (1 << i))
-                       continue;
+       if (drm_dev_enter(&adev->ddev, &idx)) {
 
-               fences += 
amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
-       }
+               for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+                       if (adev->jpeg.harvest_config & (1 << i))
+                               continue;
 
-       if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
-               amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_JPEG,
-                                                      AMD_PG_STATE_GATE);
-       else
-               schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+                       fences += 
amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
+               }
+
+               if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+                       amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_JPEG,
+                                                              
AMD_PG_STATE_GATE);
+               else
+                       schedule_delayed_work(&adev->jpeg.idle_work, 
JPEG_IDLE_TIMEOUT);
+
+               drm_dev_exit(idx);
+       }
 }
 
 void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c0a16eac4923..97a6c028ac74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/reboot.h>
 #include <linux/syscalls.h>
+#include <drm/drm_drv.h>
 
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
@@ -1334,7 +1335,15 @@ static void amdgpu_ras_interrupt_process_handler(struct 
work_struct *work)
        struct ras_manager *obj =
                container_of(data, struct ras_manager, ih_data);
 
-       amdgpu_ras_interrupt_handler(obj);
+       int idx;
+
+       if (drm_dev_enter(&obj->adev->ddev, &idx)) {
+
+               amdgpu_ras_interrupt_handler(obj);
+
+               drm_dev_exit(idx);
+
+       }
 }
 
 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
@@ -1565,31 +1574,37 @@ static void amdgpu_ras_do_recovery(struct work_struct 
*work)
        struct amdgpu_device *remote_adev = NULL;
        struct amdgpu_device *adev = ras->adev;
        struct list_head device_list, *device_list_handle =  NULL;
+       int idx;
 
-       if (!ras->disable_ras_err_cnt_harvest) {
-               struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+       if (drm_dev_enter(&adev->ddev, &idx)) {
 
-               /* Build list of devices to query RAS related errors */
-               if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
-                       device_list_handle = &hive->device_list;
-               } else {
-                       INIT_LIST_HEAD(&device_list);
-                       list_add_tail(&adev->gmc.xgmi.head, &device_list);
-                       device_list_handle = &device_list;
-               }
+               if (!ras->disable_ras_err_cnt_harvest) {
+                       struct amdgpu_hive_info *hive = 
amdgpu_get_xgmi_hive(adev);
 
-               list_for_each_entry(remote_adev,
-                               device_list_handle, gmc.xgmi.head) {
-                       amdgpu_ras_query_err_status(remote_adev);
-                       amdgpu_ras_log_on_err_counter(remote_adev);
+                       /* Build list of devices to query RAS related errors */
+                       if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+                               device_list_handle = &hive->device_list;
+                       } else {
+                               INIT_LIST_HEAD(&device_list);
+                               list_add_tail(&adev->gmc.xgmi.head, 
&device_list);
+                               device_list_handle = &device_list;
+                       }
+
+                       list_for_each_entry(remote_adev,
+                                       device_list_handle, gmc.xgmi.head) {
+                               amdgpu_ras_query_err_status(remote_adev);
+                               amdgpu_ras_log_on_err_counter(remote_adev);
+                       }
+
+                       amdgpu_put_xgmi_hive(hive);
                }
 
-               amdgpu_put_xgmi_hive(hive);
-       }
+               if (amdgpu_device_should_recover_gpu(ras->adev))
+                       amdgpu_device_gpu_recover(ras->adev, NULL);
+               atomic_set(&ras->in_recovery, 0);
 
-       if (amdgpu_device_should_recover_gpu(ras->adev))
-               amdgpu_device_gpu_recover(ras->adev, NULL);
-       atomic_set(&ras->in_recovery, 0);
+               drm_dev_exit(idx);
+       }
 }
 
 /* alloc/realloc bps array */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index df47f5ffa08f..19790afd2893 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1234,29 +1234,36 @@ static void amdgpu_uvd_idle_work_handler(struct 
work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, uvd.idle_work.work);
        unsigned fences = 0, i, j;
+       int idx;
 
-       for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
-               if (adev->uvd.harvest_config & (1 << i))
-                       continue;
-               fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
-               for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
-                       fences += 
amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
+       if (drm_dev_enter(&adev->ddev, &idx)) {
+
+               for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+                       if (adev->uvd.harvest_config & (1 << i))
+                               continue;
+                       fences += 
amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
+                       for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
+                               fences += 
amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
+                       }
                }
-       }
 
-       if (fences == 0) {
-               if (adev->pm.dpm_enabled) {
-                       amdgpu_dpm_enable_uvd(adev, false);
+               if (fences == 0) {
+                       if (adev->pm.dpm_enabled) {
+                               amdgpu_dpm_enable_uvd(adev, false);
+                       } else {
+                               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+                               /* shutdown the UVD block */
+                               amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,
+                                                                      
AMD_PG_STATE_GATE);
+                               amdgpu_device_ip_set_clockgating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,
+                                                                      
AMD_CG_STATE_GATE);
+                       }
                } else {
-                       amdgpu_asic_set_uvd_clocks(adev, 0, 0);
-                       /* shutdown the UVD block */
-                       amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,
-                                                              
AMD_PG_STATE_GATE);
-                       amdgpu_device_ip_set_clockgating_state(adev, 
AMD_IP_BLOCK_TYPE_UVD,
-                                                              
AMD_CG_STATE_GATE);
+                       schedule_delayed_work(&adev->uvd.idle_work, 
UVD_IDLE_TIMEOUT);
                }
-       } else {
-               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+
+               drm_dev_exit(idx);
+
        }
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 833203401ef4..81ad937936bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -340,22 +340,28 @@ static void amdgpu_vce_idle_work_handler(struct 
work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, vce.idle_work.work);
        unsigned i, count = 0;
+       int idx;
 
-       for (i = 0; i < adev->vce.num_rings; i++)
-               count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
+       if (drm_dev_enter(&adev->ddev, &idx)) {
 
-       if (count == 0) {
-               if (adev->pm.dpm_enabled) {
-                       amdgpu_dpm_enable_vce(adev, false);
+               for (i = 0; i < adev->vce.num_rings; i++)
+                       count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
+
+               if (count == 0) {
+                       if (adev->pm.dpm_enabled) {
+                               amdgpu_dpm_enable_vce(adev, false);
+                       } else {
+                               amdgpu_asic_set_vce_clocks(adev, 0, 0);
+                               amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCE,
+                                                                      
AMD_PG_STATE_GATE);
+                               amdgpu_device_ip_set_clockgating_state(adev, 
AMD_IP_BLOCK_TYPE_VCE,
+                                                                      
AMD_CG_STATE_GATE);
+                       }
                } else {
-                       amdgpu_asic_set_vce_clocks(adev, 0, 0);
-                       amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCE,
-                                                              
AMD_PG_STATE_GATE);
-                       amdgpu_device_ip_set_clockgating_state(adev, 
AMD_IP_BLOCK_TYPE_VCE,
-                                                              
AMD_CG_STATE_GATE);
+                       schedule_delayed_work(&adev->vce.idle_work, 
VCE_IDLE_TIMEOUT);
                }
-       } else {
-               schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
+
+               drm_dev_exit(idx);
        }
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b42db22761b8..0e7404653ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -342,42 +342,47 @@ static void amdgpu_vcn_idle_work_handler(struct 
work_struct *work)
                container_of(work, struct amdgpu_device, vcn.idle_work.work);
        unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
        unsigned int i, j;
-       int r = 0;
+       int idx, r = 0;
 
-       for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
-               if (adev->vcn.harvest_config & (1 << j))
-                       continue;
+       if (drm_dev_enter(&adev->ddev, &idx)) {
 
-               for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
-                       fence[j] += 
amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
-               }
+               for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
+                       if (adev->vcn.harvest_config & (1 << j))
+                               continue;
 
-               if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
-                       struct dpg_pause_state new_state;
+                       for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+                               fence[j] += 
amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]);
 
-                       if (fence[j] ||
-                               
unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
-                               new_state.fw_based = VCN_DPG_STATE__PAUSE;
-                       else
-                               new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+                       if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)    {
+                               struct dpg_pause_state new_state;
+
+                               if (fence[j] ||
+                                       
unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
+                                       new_state.fw_based = 
VCN_DPG_STATE__PAUSE;
+                               else
+                                       new_state.fw_based = 
VCN_DPG_STATE__UNPAUSE;
 
-                       adev->vcn.pause_dpg_mode(adev, j, &new_state);
+                               adev->vcn.pause_dpg_mode(adev, j, &new_state);
+                       }
+
+                       fence[j] += 
amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
+                       fences += fence[j];
                }
 
-               fence[j] += 
amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec);
-               fences += fence[j];
-       }
+               if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
+                       amdgpu_gfx_off_ctrl(adev, true);
+                       amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
+                              AMD_PG_STATE_GATE);
+                       r = amdgpu_dpm_switch_power_profile(adev, 
PP_SMC_POWER_PROFILE_VIDEO,
+                                       false);
+                       if (r)
+                               dev_warn(adev->dev, "(%d) failed to disable 
video power profile mode\n", r);
+               } else {
+                       schedule_delayed_work(&adev->vcn.idle_work, 
VCN_IDLE_TIMEOUT);
+               }
+
+               drm_dev_exit(idx);
 
-       if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
-               amdgpu_gfx_off_ctrl(adev, true);
-               amdgpu_device_ip_set_powergating_state(adev, 
AMD_IP_BLOCK_TYPE_VCN,
-                      AMD_PG_STATE_GATE);
-               r = amdgpu_dpm_switch_power_profile(adev, 
PP_SMC_POWER_PROFILE_VIDEO,
-                               false);
-               if (r)
-                       dev_warn(adev->dev, "(%d) failed to disable video power 
profile mode\n", r);
-       } else {
-               schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
        }
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c 
b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 84d2eaa38101..4799290e5625 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -57,6 +57,8 @@
 
 #include "ivsrcid/ivsrcid_vislands30.h"
 
+#include <drm/drm_drv.h>
+
 #define GFX8_NUM_GFX_RINGS     1
 #define GFX8_MEC_HPD_SIZE 4096
 
@@ -6793,8 +6795,14 @@ static void gfx_v8_0_sq_irq_work_func(struct work_struct 
*work)
 
        struct amdgpu_device *adev = container_of(work, struct amdgpu_device, 
gfx.sq_work.work);
        struct sq_work *sq_work = container_of(work, struct sq_work, work);
+       int idx;
+
+       if (drm_dev_enter(&adev->ddev, &idx)) {
 
-       gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+               gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+
+               drm_dev_exit(idx);
+       }
 }
 
 static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to