Trap handler is set per-process per-device and is unrelated
to queue management.

Move implementation closer to TMA setup code.

Signed-off-by: Jay Cornwall <jay.cornw...@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c      |  6 +----
 .../drm/amd/amdkfd/kfd_device_queue_manager.c | 22 -------------------
 .../drm/amd/amdkfd/kfd_device_queue_manager.h |  5 -----
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  4 ++++
 drivers/gpu/drm/amd/amdkfd/kfd_process.c      | 19 ++++++++++++++++
 5 files changed, 24 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 8cc51cec988a..6802c616e10e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -556,11 +556,7 @@ static int kfd_ioctl_set_trap_handler(struct file *filep,
                goto out;
        }
 
-       if (dev->dqm->ops.set_trap_handler(dev->dqm,
-                                       &pdd->qpd,
-                                       args->tba_addr,
-                                       args->tma_addr))
-               err = -EINVAL;
+       kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
 
 out:
        mutex_unlock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c37e9c4b1fb4..6bb778f24441 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1596,26 +1596,6 @@ static bool set_cache_memory_policy(struct 
device_queue_manager *dqm,
        return retval;
 }
 
-static int set_trap_handler(struct device_queue_manager *dqm,
-                               struct qcm_process_device *qpd,
-                               uint64_t tba_addr,
-                               uint64_t tma_addr)
-{
-       uint64_t *tma;
-
-       if (dqm->dev->cwsr_enabled) {
-               /* Jump from CWSR trap handler to user trap */
-               tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
-               tma[0] = tba_addr;
-               tma[1] = tma_addr;
-       } else {
-               qpd->tba_addr = tba_addr;
-               qpd->tma_addr = tma_addr;
-       }
-
-       return 0;
-}
-
 static int process_termination_nocpsch(struct device_queue_manager *dqm,
                struct qcm_process_device *qpd)
 {
@@ -1859,7 +1839,6 @@ struct device_queue_manager 
*device_queue_manager_init(struct kfd_dev *dev)
                dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
                dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
                dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
-               dqm->ops.set_trap_handler = set_trap_handler;
                dqm->ops.process_termination = process_termination_cpsch;
                dqm->ops.evict_process_queues = evict_process_queues_cpsch;
                dqm->ops.restore_process_queues = restore_process_queues_cpsch;
@@ -1878,7 +1857,6 @@ struct device_queue_manager 
*device_queue_manager_init(struct kfd_dev *dev)
                dqm->ops.initialize = initialize_nocpsch;
                dqm->ops.uninitialize = uninitialize;
                dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
-               dqm->ops.set_trap_handler = set_trap_handler;
                dqm->ops.process_termination = process_termination_nocpsch;
                dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
                dqm->ops.restore_process_queues =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 16262e5d93f5..aee033b1d148 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -121,11 +121,6 @@ struct device_queue_manager_ops {
                                           void __user *alternate_aperture_base,
                                           uint64_t alternate_aperture_size);
 
-       int     (*set_trap_handler)(struct device_queue_manager *dqm,
-                                   struct qcm_process_device *qpd,
-                                   uint64_t tba_addr,
-                                   uint64_t tma_addr);
-
        int (*process_termination)(struct device_queue_manager *dqm,
                        struct qcm_process_device *qpd);
 
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h 
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index e2ebd5a1d4de..8f839154bf1f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -944,6 +944,10 @@ bool interrupt_is_wanted(struct kfd_dev *dev,
 /* amdkfd Apertures */
 int kfd_init_apertures(struct kfd_process *process);
 
+void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
+                                 uint64_t tba_addr,
+                                 uint64_t tma_addr);
+
 /* Queue Context Management */
 int init_queue(struct queue **q, const struct queue_properties *properties);
 void uninit_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c 
b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index a4d7682289bb..ff3e76450b66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1162,6 +1162,25 @@ static int kfd_process_device_init_cwsr_dgpu(struct 
kfd_process_device *pdd)
        return 0;
 }
 
+void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
+                                 uint64_t tba_addr,
+                                 uint64_t tma_addr)
+{
+       if (qpd->cwsr_kaddr) {
+               /* KFD trap handler is bound, record as second-level TBA/TMA
+                * in first-level TMA. First-level trap will jump to second.
+                */
+               uint64_t *tma =
+                       (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
+               tma[0] = tba_addr;
+               tma[1] = tma_addr;
+       } else {
+               /* No trap handler bound, bind as first-level TBA/TMA. */
+               qpd->tba_addr = tba_addr;
+               qpd->tma_addr = tma_addr;
+       }
+}
+
 /*
  * On return the kfd_process is fully operational and will be freed when the
  * mm is released
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to