Gangs are sets of userqs that schedule together.  You
specify the primary and secondary queues and the scheduler
will make sure they always run at the same time.

v2: handle setting up the gang again after unmaps

Signed-off-by: Alex Deucher <alexander.deuc...@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 95 +++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h |  2 +
 2 files changed, 97 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index e56fae10400db..e42b8cd78884c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -92,19 +92,39 @@ amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
        struct amdgpu_device *adev = uq_mgr->adev;
        const struct amdgpu_userq_funcs *userq_funcs =
                adev->userq_funcs[queue->queue_type];
+       struct amdgpu_usermode_queue *secondary_queue = NULL;
        bool gpu_reset = false;
        int r = 0;
 
+       if (queue->gang_primary)
+               secondary_queue = queue->gang_secondary;
+
        if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
                r = userq_funcs->unmap(uq_mgr, queue);
                if (r) {
                        queue->state = AMDGPU_USERQ_STATE_HUNG;
                        gpu_reset = amdgpu_userq_queue_reset_helper(uq_mgr, 
queue);
+                       if (gpu_reset)
+                               goto reset;
                } else {
                        queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
                }
        }
+       if (secondary_queue) {
+               if (secondary_queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+                       r = userq_funcs->unmap(uq_mgr, secondary_queue);
+                       if (r) {
+                               secondary_queue->state = 
AMDGPU_USERQ_STATE_HUNG;
+                               gpu_reset = 
amdgpu_userq_queue_reset_helper(uq_mgr, secondary_queue);
+                               if (gpu_reset)
+                                       goto reset;
+                       } else {
+                               secondary_queue->state = 
AMDGPU_USERQ_STATE_MAPPED;
+                       }
+               }
+       }
 
+reset:
        if (gpu_reset)
                amdgpu_userq_gpu_reset(adev);
 
@@ -118,19 +138,40 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
        struct amdgpu_device *adev = uq_mgr->adev;
        const struct amdgpu_userq_funcs *userq_funcs =
                adev->userq_funcs[queue->queue_type];
+       struct amdgpu_usermode_queue *secondary_queue = NULL;
        bool gpu_reset = false;
        int r = 0;
 
+       if (queue->gang_primary)
+               secondary_queue = queue->gang_secondary;
+
        if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
                r = userq_funcs->map(uq_mgr, queue);
                if (r) {
                        queue->state = AMDGPU_USERQ_STATE_HUNG;
                        gpu_reset = amdgpu_userq_queue_reset_helper(uq_mgr, 
queue);
+                       if (gpu_reset)
+                               goto reset;
                } else {
                        queue->state = AMDGPU_USERQ_STATE_MAPPED;
                }
        }
+       if (secondary_queue) {
+               if (secondary_queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+                       r = userq_funcs->map(uq_mgr, secondary_queue);
+                       if (r) {
+                               secondary_queue->state = 
AMDGPU_USERQ_STATE_HUNG;
+                               gpu_reset = 
amdgpu_userq_queue_reset_helper(uq_mgr, secondary_queue);
+                               if (gpu_reset)
+                                       goto reset;
+                       } else {
+                               secondary_queue->state = 
AMDGPU_USERQ_STATE_MAPPED;
+                       }
+               }
+               r = userq_funcs->set_gang(uq_mgr, queue, secondary_queue);
+       }
 
+reset:
        if (gpu_reset)
                amdgpu_userq_gpu_reset(adev);
 
@@ -561,6 +602,53 @@ amdgpu_userq_query_status(struct drm_file *filp, union 
drm_amdgpu_userq *args)
        return 0;
 }
 
+static int
+amdgpu_userq_create_gang(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+       struct amdgpu_usermode_queue *primary_queue, *secondary_queue;
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+       const struct amdgpu_userq_funcs *userq_funcs;
+       struct amdgpu_device *adev = uq_mgr->adev;
+       int primary_queue_id = args->in_cg.primary_queue_id;
+       int secondary_queue_id = args->in_cg.secondary_queue_id;
+       int r;
+
+       mutex_lock(&uq_mgr->userq_mutex);
+       primary_queue = amdgpu_userq_find(uq_mgr, primary_queue_id);
+       if (!primary_queue) {
+               dev_err(adev->dev, "Invalid gang primary queue id\n");
+               mutex_unlock(&uq_mgr->userq_mutex);
+               return -EINVAL;
+       }
+       if ((primary_queue->queue_type != AMDGPU_HW_IP_GFX) &&
+           (primary_queue->queue_type != AMDGPU_HW_IP_COMPUTE)) {
+               dev_err(adev->dev, "Invalid gang primary queue type\n");
+               mutex_unlock(&uq_mgr->userq_mutex);
+               return -EINVAL;
+       }
+       secondary_queue = amdgpu_userq_find(uq_mgr, secondary_queue_id);
+       if (!secondary_queue) {
+               dev_err(adev->dev, "Invalid gang secondary queue id\n");
+               mutex_unlock(&uq_mgr->userq_mutex);
+               return -EINVAL;
+       }
+       if ((secondary_queue->queue_type != AMDGPU_HW_IP_GFX) &&
+           (secondary_queue->queue_type != AMDGPU_HW_IP_COMPUTE)) {
+               dev_err(adev->dev, "Invalid gang secondary queue type\n");
+               mutex_unlock(&uq_mgr->userq_mutex);
+               return -EINVAL;
+       }
+
+       primary_queue->gang_primary = true;
+       primary_queue->gang_secondary = secondary_queue;
+       userq_funcs = adev->userq_funcs[primary_queue->queue_type];
+       r = userq_funcs->set_gang(uq_mgr, primary_queue, secondary_queue);
+       mutex_unlock(&uq_mgr->userq_mutex);
+
+       return r;
+}
+
 int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *filp)
 {
@@ -611,6 +699,13 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
                if (r)
                        DRM_ERROR("Failed to query usermode queue status\n");
                break;
+       case AMDGPU_USERQ_OP_CREATE_GANG:
+               if (args->in_cg.pad)
+                       return -EINVAL;
+               r = amdgpu_userq_create_gang(filp, args);
+               if (r)
+                       DRM_ERROR("Failed to create usermode queue gang\n");
+               break;
        default:
                DRM_DEBUG_DRIVER("Invalid user queue op specified: %d\n", 
args->in.op);
                return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h 
b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index ca11f7748d031..40510b4d824b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -66,6 +66,8 @@ struct amdgpu_usermode_queue {
        u32                     xcp_id;
        int                     priority;
        uint64_t                generation;
+       bool                    gang_primary;
+       struct amdgpu_usermode_queue *gang_secondary;
 };
 
 struct amdgpu_userq_funcs {
-- 
2.49.0

Reply via email to