Use the GIC functions to allocate interrupt contexts for RDMA EQs. These
interrupt contexts may be shared with Ethernet EQs when MSI-X vectors
are limited.

The driver now supports allocating dedicated MSI-X for each EQ. Indicate
this capability through driver capability bits.

Signed-off-by: Long Li <[email protected]>
---
 drivers/infiniband/hw/mana/main.c | 33 ++++++++++++++++++++++++++-----
 include/net/mana/gdma.h           |  7 +++++--
 2 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/drivers/infiniband/hw/mana/main.c 
b/drivers/infiniband/hw/mana/main.c
index d51dd0ee85f4..0b74dd093b41 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -787,6 +787,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
 {
        struct gdma_context *gc = mdev_to_gc(mdev);
        struct gdma_queue_spec spec = {};
+       struct gdma_irq_context *gic;
        int err, i;
 
        spec.type = GDMA_EQ;
@@ -797,9 +798,15 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
        spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
        spec.eq.msix_index = 0;
 
+       gic = mana_gd_get_gic(gc, false, &spec.eq.msix_index);
+       if (!gic)
+               return -ENOMEM;
+
        err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, 
&mdev->fatal_err_eq);
-       if (err)
+       if (err) {
+               mana_gd_put_gic(gc, false, 0);
                return err;
+       }
 
        mdev->eqs = kzalloc_objs(struct gdma_queue *,
                                 mdev->ib_dev.num_comp_vectors);
@@ -810,31 +817,47 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
        spec.eq.callback = NULL;
        for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
                spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
+
+               gic = mana_gd_get_gic(gc, false, &spec.eq.msix_index);
+               if (!gic) {
+                       err = -ENOMEM;
+                       goto destroy_eqs;
+               }
+
                err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, 
&mdev->eqs[i]);
-               if (err)
+               if (err) {
+                       mana_gd_put_gic(gc, false, spec.eq.msix_index);
                        goto destroy_eqs;
+               }
        }
 
        return 0;
 
 destroy_eqs:
-       while (i-- > 0)
+       while (i-- > 0) {
                mana_gd_destroy_queue(gc, mdev->eqs[i]);
+               mana_gd_put_gic(gc, false, (i + 1) % gc->num_msix_usable);
+       }
        kfree(mdev->eqs);
 destroy_fatal_eq:
        mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+       mana_gd_put_gic(gc, false, 0);
        return err;
 }
 
 void mana_ib_destroy_eqs(struct mana_ib_dev *mdev)
 {
        struct gdma_context *gc = mdev_to_gc(mdev);
-       int i;
+       int i, msi;
 
        mana_gd_destroy_queue(gc, mdev->fatal_err_eq);
+       mana_gd_put_gic(gc, false, 0);
 
-       for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++)
+       for (i = 0; i < mdev->ib_dev.num_comp_vectors; i++) {
                mana_gd_destroy_queue(gc, mdev->eqs[i]);
+               msi = (i + 1) % gc->num_msix_usable;
+               mana_gd_put_gic(gc, false, msi);
+       }
 
        kfree(mdev->eqs);
 }
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 4e0278b00bbb..662e58f51e87 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -612,6 +612,7 @@ enum {
 #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3)
 #define GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB BIT(4)
 #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5)
+#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
 
 /* Driver can handle holes (zeros) in the device list */
 #define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11)
@@ -628,7 +629,8 @@ enum {
 /* Driver detects stalled send queues and recovers them */
 #define GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY BIT(18)
 
-#define GDMA_DRV_CAP_FLAG_1_HW_VPORT_LINK_AWARE BIT(6)
+/* Driver supports separate EQ/MSIs for each vPort */
+#define GDMA_DRV_CAP_FLAG_1_EQ_MSI_UNSHARE_MULTI_VPORT BIT(19)
 
 /* Driver supports linearizing the skb when num_sge exceeds hardware limit */
 #define GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE BIT(20)
@@ -656,7 +658,8 @@ enum {
         GDMA_DRV_CAP_FLAG_1_SKB_LINEARIZE | \
         GDMA_DRV_CAP_FLAG_1_PROBE_RECOVERY | \
         GDMA_DRV_CAP_FLAG_1_HANDLE_STALL_SQ_RECOVERY | \
-        GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY)
+        GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECOVERY | \
+        GDMA_DRV_CAP_FLAG_1_EQ_MSI_UNSHARE_MULTI_VPORT)
 
 #define GDMA_DRV_CAP_FLAGS2 0
 
-- 
2.43.0


Reply via email to