From: Dave Ertman <david.m.ert...@intel.com>

Enable the peer device to request queue sets from the PF.

Signed-off-by: Dave Ertman <david.m.ert...@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.ngu...@intel.com>
Tested-by: Andrew Bowers <andrewx.bow...@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirs...@intel.com>
---
 drivers/net/ethernet/intel/ice/ice.h          |   1 +
 .../net/ethernet/intel/ice/ice_adminq_cmd.h   |  32 +++
 drivers/net/ethernet/intel/ice/ice_common.c   | 188 ++++++++++++++
 drivers/net/ethernet/intel/ice/ice_common.h   |   9 +
 drivers/net/ethernet/intel/ice/ice_idc.c      | 244 ++++++++++++++++++
 drivers/net/ethernet/intel/ice/ice_sched.c    |  69 ++++-
 drivers/net/ethernet/intel/ice/ice_switch.c   |   4 +
 drivers/net/ethernet/intel/ice/ice_switch.h   |   2 +
 drivers/net/ethernet/intel/ice/ice_type.h     |   3 +
 9 files changed, 547 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice.h 
b/drivers/net/ethernet/intel/ice/ice.h
index 73366009ef03..6ad1894eca3f 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -296,6 +296,7 @@ struct ice_vsi {
        u16 req_rxq;                     /* User requested Rx queues */
        u16 num_rx_desc;
        u16 num_tx_desc;
+       u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
        struct ice_tc_cfg tc_cfg;
        struct bpf_prog *xdp_prog;
        struct ice_ring **xdp_rings;     /* XDP ring array */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h 
b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 51baab0621a2..a1066c4bf40d 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1536,6 +1536,36 @@ struct ice_aqc_dis_txq {
        struct ice_aqc_dis_txq_item qgrps[1];
 };
 
+/* Add Tx RDMA Queue Set (indirect 0x0C33) */
+struct ice_aqc_add_rdma_qset {
+       u8 num_qset_grps;
+       u8 reserved[7];
+       __le32 addr_high;
+       __le32 addr_low;
+};
+
+/* This is the descriptor of each qset entry for the Add Tx RDMA Queue Set
+ * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset.
+ */
+struct ice_aqc_add_tx_rdma_qset_entry {
+       __le16 tx_qset_id;
+       u8 rsvd[2];
+       __le32 qset_teid;
+       struct ice_aqc_txsched_elem info;
+};
+
+/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_add_rdma_qset is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_add_rdma_qset_data {
+       __le32 parent_teid;
+       __le16 num_qsets;
+       u8 rsvd[2];
+       struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[1];
+};
+
 /* Configure Firmware Logging Command (indirect 0xFF09)
  * Logging Information Read Response (indirect 0xFF10)
  * Note: The 0xFF10 command has no input parameters.
@@ -1732,6 +1762,7 @@ struct ice_aq_desc {
                struct ice_aqc_get_set_rss_key get_set_rss_key;
                struct ice_aqc_add_txqs add_txqs;
                struct ice_aqc_dis_txqs dis_txqs;
+               struct ice_aqc_add_rdma_qset add_rdma_qset;
                struct ice_aqc_add_get_update_free_vsi vsi_cmd;
                struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
                struct ice_aqc_fw_logging fw_logging;
@@ -1867,6 +1898,7 @@ enum ice_adminq_opc {
        /* Tx queue handling commands/events */
        ice_aqc_opc_add_txqs                            = 0x0C30,
        ice_aqc_opc_dis_txqs                            = 0x0C31,
+       ice_aqc_opc_add_rdma_qset                       = 0x0C33,
 
        /* package commands */
        ice_aqc_opc_download_pkg                        = 0x0C40,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c 
b/drivers/net/ethernet/intel/ice/ice_common.c
index 2dca49aed5bb..c760fae4aed4 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2917,6 +2917,59 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
        return status;
 }
 
+/**
+ * ice_aq_add_rdma_qsets
+ * @hw: pointer to the hardware structure
+ * @num_qset_grps: Number of RDMA Qset groups
+ * @qset_list: list of qset groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx RDMA Qsets (0x0C33)
+ */
+static enum ice_status
+ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
+                     struct ice_aqc_add_rdma_qset_data *qset_list,
+                     u16 buf_size, struct ice_sq_cd *cd)
+{
+       struct ice_aqc_add_rdma_qset_data *list;
+       u16 i, sum_header_size, sum_q_size = 0;
+       struct ice_aqc_add_rdma_qset *cmd;
+       struct ice_aq_desc desc;
+
+       cmd = &desc.params.add_rdma_qset;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
+
+       if (!qset_list)
+               return ICE_ERR_PARAM;
+
+       if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
+               return ICE_ERR_PARAM;
+
+       sum_header_size = num_qset_grps *
+               (sizeof(*qset_list) - sizeof(*qset_list->rdma_qsets));
+
+       list = qset_list;
+       for (i = 0; i < num_qset_grps; i++) {
+               struct ice_aqc_add_tx_rdma_qset_entry *qset = list->rdma_qsets;
+               u16 num_qsets = le16_to_cpu(list->num_qsets);
+
+               sum_q_size += num_qsets * sizeof(*qset);
+               list = (struct ice_aqc_add_rdma_qset_data *)
+                       (qset + num_qsets);
+       }
+
+       if (buf_size != (sum_header_size + sum_q_size))
+               return ICE_ERR_PARAM;
+
+       desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+       cmd->num_qset_grps = num_qset_grps;
+
+       return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
+}
+
 /* End of FW Admin Queue command wrappers */
 
 /**
@@ -3388,6 +3441,141 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 
vsi_handle, u8 tc_bitmap,
                              ICE_SCHED_NODE_OWNER_LAN);
 }
 
+/**
+ * ice_cfg_vsi_rdma - configure the VSI RDMA queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @max_rdmaqs: max RDMA queues array per TC
+ *
+ * This function adds/updates the VSI RDMA queues per TC.
+ */
+enum ice_status
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
+                u16 *max_rdmaqs)
+{
+       return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
+                             ICE_SCHED_NODE_OWNER_RDMA);
+}
+
+/**
+ * ice_ena_vsi_rdma_qset
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @rdma_qset: pointer to RDMA qset
+ * @num_qsets: number of RDMA qsets
+ * @qset_teid: pointer to qset node teids
+ *
+ * This function adds RDMA qset
+ */
+enum ice_status
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+                     u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
+{
+       struct ice_aqc_txsched_elem_data node = { 0 };
+       struct ice_aqc_add_rdma_qset_data *buf;
+       struct ice_sched_node *parent;
+       enum ice_status status;
+       struct ice_hw *hw;
+       u16 i, buf_size;
+
+       if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+               return ICE_ERR_CFG;
+       hw = pi->hw;
+
+       if (!ice_is_vsi_valid(hw, vsi_handle))
+               return ICE_ERR_PARAM;
+
+       buf_size = struct_size(buf, rdma_qsets, num_qsets - 1);
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf)
+               return ICE_ERR_NO_MEMORY;
+       mutex_lock(&pi->sched_lock);
+
+       parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
+                                           ICE_SCHED_NODE_OWNER_RDMA);
+       if (!parent) {
+               status = ICE_ERR_PARAM;
+               goto rdma_error_exit;
+       }
+       buf->parent_teid = parent->info.node_teid;
+       node.parent_teid = parent->info.node_teid;
+
+       buf->num_qsets = cpu_to_le16(num_qsets);
+       for (i = 0; i < num_qsets; i++) {
+               buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
+               buf->rdma_qsets[i].info.valid_sections =
+                                               ICE_AQC_ELEM_VALID_GENERIC;
+       }
+       status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
+       if (status) {
+               ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
+               goto rdma_error_exit;
+       }
+       node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+       for (i = 0; i < num_qsets; i++) {
+               node.node_teid = buf->rdma_qsets[i].qset_teid;
+               status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+                                           &node);
+               if (status)
+                       break;
+               qset_teid[i] = le32_to_cpu(node.node_teid);
+       }
+rdma_error_exit:
+       mutex_unlock(&pi->sched_lock);
+       kfree(buf);
+       return status;
+}
+
+/**
+ * ice_dis_vsi_rdma_qset - free RDMA resources
+ * @pi: port_info struct
+ * @count: number of RDMA qsets to free
+ * @qset_teid: TEID of qset node
+ * @q_id: list of queue IDs being disabled
+ */
+enum ice_status
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+                     u16 *q_id)
+{
+       struct ice_aqc_dis_txq_item qg_list;
+       enum ice_status status = 0;
+       u16 qg_size;
+       int i;
+
+       if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+               return ICE_ERR_CFG;
+
+       qg_size = sizeof(qg_list);
+
+       mutex_lock(&pi->sched_lock);
+
+       for (i = 0; i < count; i++) {
+               struct ice_sched_node *node;
+
+               node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
+               if (!node)
+                       continue;
+
+               qg_list.parent_teid = node->info.parent_teid;
+               qg_list.num_qs = 1;
+               qg_list.q_id[0] =
+                       cpu_to_le16(q_id[i] |
+                                   ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
+
+               status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, qg_size,
+                                           ICE_NO_RESET, 0, NULL);
+               if (status)
+                       break;
+
+               ice_free_sched_node(pi, node);
+       }
+
+       mutex_unlock(&pi->sched_lock);
+       return status;
+}
+
 /**
  * ice_replay_pre_init - replay pre initialization
  * @hw: pointer to the HW struct
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h 
b/drivers/net/ethernet/intel/ice/ice_common.h
index 8104f3d64d96..db63fd6b5608 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -125,6 +125,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 
bus_addr,
                  bool write, struct ice_sq_cd *cd);
 
 enum ice_status
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
+                u16 *max_rdmaqs);
+enum ice_status
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+                     u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
+enum ice_status
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+                     u16 *q_id);
+enum ice_status
 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
                u16 *q_handle, u16 *q_ids, u32 *q_teids,
                enum ice_disq_rst_src rst_src, u16 vmvf_num,
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c 
b/drivers/net/ethernet/intel/ice/ice_idc.c
index 499c1b77dfc9..05fa5c61e2d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -388,6 +388,248 @@ ice_unroll_peer(struct ice_peer_dev_int *peer_dev_int,
        return 0;
 }
 
+/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+       int i;
+
+       ice_for_each_vsi(pf, i)
+               if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+                       return  pf->vsi[i];
+       return NULL;
+}
+
+/**
+ * ice_peer_alloc_rdma_qsets - Allocate Leaf Nodes for RDMA Qset
+ * @peer_dev: peer that is requesting the Leaf Nodes
+ * @res: Resources to be allocated
+ * @partial_acceptable: If partial allocation is acceptable to the peer
+ *
+ * This function allocates Leaf Nodes for given RDMA Qset resources
+ * for the peer device.
+ */
+static int
+ice_peer_alloc_rdma_qsets(struct iidc_peer_dev *peer_dev, struct iidc_res *res,
+                         int __always_unused partial_acceptable)
+{
+       u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
+       enum ice_status status;
+       struct ice_vsi *vsi;
+       struct device *dev;
+       struct ice_pf *pf;
+       int i, ret = 0;
+       u32 *qset_teid;
+       u16 *qs_handle;
+
+       if (!ice_validate_peer_dev(peer_dev) || !res)
+               return -EINVAL;
+
+       pf = pci_get_drvdata(peer_dev->pdev);
+       dev = ice_pf_to_dev(pf);
+
+       if (res->cnt_req > ICE_MAX_TXQ_PER_TXQG)
+               return -EINVAL;
+
+       qset_teid = kcalloc(res->cnt_req, sizeof(*qset_teid), GFP_KERNEL);
+       if (!qset_teid)
+               return -ENOMEM;
+
+       qs_handle = kcalloc(res->cnt_req, sizeof(*qs_handle), GFP_KERNEL);
+       if (!qs_handle) {
+               kfree(qset_teid);
+               return -ENOMEM;
+       }
+
+       ice_for_each_traffic_class(i)
+               max_rdmaqs[i] = 0;
+
+       for (i = 0; i < res->cnt_req; i++) {
+               struct iidc_rdma_qset_params *qset;
+
+               qset = &res->res[i].res.qsets;
+               if (qset->vsi_id != peer_dev->pf_vsi_num) {
+                       dev_err(dev, "RDMA QSet invalid VSI requested\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+               max_rdmaqs[qset->tc]++;
+               qs_handle[i] = qset->qs_handle;
+       }
+
+       vsi = ice_find_vsi(pf, peer_dev->pf_vsi_num);
+       if (!vsi) {
+               dev_err(dev, "RDMA QSet invalid VSI\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+                                 max_rdmaqs);
+       if (status) {
+               dev_err(dev, "Failed VSI RDMA qset config\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       for (i = 0; i < res->cnt_req; i++) {
+               struct iidc_rdma_qset_params *qset;
+
+               qset = &res->res[i].res.qsets;
+               status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx,
+                                              qset->tc, &qs_handle[i], 1,
+                                              &qset_teid[i]);
+               if (status) {
+                       dev_err(dev, "Failed VSI RDMA qset enable\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+               vsi->qset_handle[qset->tc] = qset->qs_handle;
+               qset->teid = qset_teid[i];
+       }
+
+out:
+       kfree(qset_teid);
+       kfree(qs_handle);
+       return ret;
+}
+
+/**
+ * ice_peer_free_rdma_qsets - Free leaf nodes for RDMA Qset
+ * @peer_dev: peer that requested qsets to be freed
+ * @res: Resource to be freed
+ */
+static int
+ice_peer_free_rdma_qsets(struct iidc_peer_dev *peer_dev, struct iidc_res *res)
+{
+       enum ice_status status;
+       int count, i, ret = 0;
+       struct ice_vsi *vsi;
+       struct device *dev;
+       struct ice_pf *pf;
+       u16 vsi_id;
+       u32 *teid;
+       u16 *q_id;
+
+       if (!ice_validate_peer_dev(peer_dev) || !res)
+               return -EINVAL;
+
+       pf = pci_get_drvdata(peer_dev->pdev);
+       dev = ice_pf_to_dev(pf);
+
+       count = res->res_allocated;
+       if (count > ICE_MAX_TXQ_PER_TXQG)
+               return -EINVAL;
+
+       teid = kcalloc(count, sizeof(*teid), GFP_KERNEL);
+       if (!teid)
+               return -ENOMEM;
+
+       q_id = kcalloc(count, sizeof(*q_id), GFP_KERNEL);
+       if (!q_id) {
+               kfree(teid);
+               return -ENOMEM;
+       }
+
+       vsi_id = res->res[0].res.qsets.vsi_id;
+       vsi = ice_find_vsi(pf, vsi_id);
+       if (!vsi) {
+               dev_err(dev, "RDMA Invalid VSI\n");
+               ret = -EINVAL;
+               goto rdma_free_out;
+       }
+
+       for (i = 0; i < count; i++) {
+               struct iidc_rdma_qset_params *qset;
+
+               qset = &res->res[i].res.qsets;
+               if (qset->vsi_id != vsi_id) {
+                       dev_err(dev, "RDMA Invalid VSI ID\n");
+                       ret = -EINVAL;
+                       goto rdma_free_out;
+               }
+               q_id[i] = qset->qs_handle;
+               teid[i] = qset->teid;
+
+               vsi->qset_handle[qset->tc] = 0;
+       }
+
+       status = ice_dis_vsi_rdma_qset(vsi->port_info, count, teid, q_id);
+       if (status)
+               ret = -EINVAL;
+
+rdma_free_out:
+       kfree(teid);
+       kfree(q_id);
+
+       return ret;
+}
+
+/**
+ * ice_peer_alloc_res - Allocate requested resources for peer device
+ * @peer_dev: peer that is requesting resources
+ * @res: Resources to be allocated
+ * @partial_acceptable: If partial allocation is acceptable to the peer
+ *
+ * This function allocates requested resources for the peer device.
+ */
+static int
+ice_peer_alloc_res(struct iidc_peer_dev *peer_dev, struct iidc_res *res,
+                  int partial_acceptable)
+{
+       struct ice_pf *pf;
+       int ret;
+
+       if (!ice_validate_peer_dev(peer_dev) || !res)
+               return -EINVAL;
+
+       pf = pci_get_drvdata(peer_dev->pdev);
+       if (!ice_pf_state_is_nominal(pf))
+               return -EBUSY;
+
+       switch (res->res_type) {
+       case IIDC_RDMA_QSETS_TXSCHED:
+               ret = ice_peer_alloc_rdma_qsets(peer_dev, res,
+                                               partial_acceptable);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * ice_peer_free_res - Free given resources
+ * @peer_dev: peer that is requesting freeing of resources
+ * @res: Resources to be freed
+ *
+ * Free/Release resources allocated to given peer device.
+ */
+static int
+ice_peer_free_res(struct iidc_peer_dev *peer_dev, struct iidc_res *res)
+{
+       int ret;
+
+       if (!ice_validate_peer_dev(peer_dev) || !res)
+               return -EINVAL;
+
+       switch (res->res_type) {
+       case IIDC_RDMA_QSETS_TXSCHED:
+               ret = ice_peer_free_rdma_qsets(peer_dev, res);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
 /**
  * ice_peer_unregister - request to unregister peer
  * @peer_dev: peer device
@@ -511,6 +753,8 @@ ice_peer_update_vsi_filter(struct iidc_peer_dev *peer_dev,
 
 /* Initialize the ice_ops struct, which is used in 'ice_init_peer_devices' */
 static const struct iidc_ops ops = {
+       .alloc_res                      = ice_peer_alloc_res,
+       .free_res                       = ice_peer_free_res,
        .peer_register                  = ice_peer_register,
        .peer_unregister                = ice_peer_unregister,
        .update_vsi_filter              = ice_peer_update_vsi_filter,
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c 
b/drivers/net/ethernet/intel/ice/ice_sched.c
index eae707ddf8e8..2f618d051b56 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -577,6 +577,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 
tc, u16 new_numqs)
        return 0;
 }
 
+/**
+ * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+       struct ice_vsi_ctx *vsi_ctx;
+       struct ice_q_ctx *q_ctx;
+
+       vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+       if (!vsi_ctx)
+               return ICE_ERR_PARAM;
+       /* allocate RDMA queue contexts */
+       if (!vsi_ctx->rdma_q_ctx[tc]) {
+               vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+                                                      new_numqs,
+                                                      sizeof(*q_ctx),
+                                                      GFP_KERNEL);
+               if (!vsi_ctx->rdma_q_ctx[tc])
+                       return ICE_ERR_NO_MEMORY;
+               vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+               return 0;
+       }
+       /* num queues are increased, update the queue contexts */
+       if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
+               u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
+
+               q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+                                    sizeof(*q_ctx), GFP_KERNEL);
+               if (!q_ctx)
+                       return ICE_ERR_NO_MEMORY;
+               memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
+                      prev_num * sizeof(*q_ctx));
+               devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
+               vsi_ctx->rdma_q_ctx[tc] = q_ctx;
+               vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+       }
+       return 0;
+}
+
 /**
  * ice_aq_rl_profile - performs a rate limiting task
  * @hw: pointer to the HW struct
@@ -1599,13 +1643,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info 
*pi, u16 vsi_handle,
        if (!vsi_ctx)
                return ICE_ERR_PARAM;
 
-       prev_numqs = vsi_ctx->sched.max_lanq[tc];
+       if (owner == ICE_SCHED_NODE_OWNER_LAN)
+               prev_numqs = vsi_ctx->sched.max_lanq[tc];
+       else
+               prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
        /* num queues are not changed or less than the previous number */
        if (new_numqs <= prev_numqs)
                return status;
-       status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
-       if (status)
-               return status;
+       if (owner == ICE_SCHED_NODE_OWNER_LAN) {
+               status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+               if (status)
+                       return status;
+       } else {
+               status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
+               if (status)
+                       return status;
+       }
 
        if (new_numqs)
                ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
@@ -1620,7 +1673,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info 
*pi, u16 vsi_handle,
                                               new_num_nodes, owner);
        if (status)
                return status;
-       vsi_ctx->sched.max_lanq[tc] = new_numqs;
+       if (owner == ICE_SCHED_NODE_OWNER_LAN)
+               vsi_ctx->sched.max_lanq[tc] = new_numqs;
+       else
+               vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
 
        return 0;
 }
@@ -1686,6 +1742,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 
vsi_handle, u8 tc, u16 maxqs,
                 * recreate the child nodes all the time in these cases.
                 */
                vsi_ctx->sched.max_lanq[tc] = 0;
+               vsi_ctx->sched.max_rdmaq[tc] = 0;
        }
 
        /* update the VSI child nodes */
@@ -1817,6 +1874,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 
vsi_handle, u8 owner)
                }
                if (owner == ICE_SCHED_NODE_OWNER_LAN)
                        vsi_ctx->sched.max_lanq[i] = 0;
+               else
+                       vsi_ctx->sched.max_rdmaq[i] = 0;
        }
        status = 0;
 
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c 
b/drivers/net/ethernet/intel/ice/ice_switch.c
index cf8e1553599a..eeb1b0e6f716 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -310,6 +310,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 
vsi_handle)
                        devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
                        vsi->lan_q_ctx[i] = NULL;
                }
+               if (vsi->rdma_q_ctx[i]) {
+                       devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
+                       vsi->rdma_q_ctx[i] = NULL;
+               }
        }
 }
 
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h 
b/drivers/net/ethernet/intel/ice/ice_switch.h
index 96010d3d96fd..acd2f150c30b 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -26,6 +26,8 @@ struct ice_vsi_ctx {
        u8 vf_num;
        u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
        struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
+       u16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];
+       struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
 };
 
 enum ice_sw_fwd_act_type {
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h 
b/drivers/net/ethernet/intel/ice/ice_type.h
index 42b2d700bc1f..3ada92536540 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -45,6 +45,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
 #define ICE_DBG_FLOW           BIT_ULL(9)
 #define ICE_DBG_SW             BIT_ULL(13)
 #define ICE_DBG_SCHED          BIT_ULL(14)
+#define ICE_DBG_RDMA           BIT_ULL(15)
 #define ICE_DBG_PKG            BIT_ULL(16)
 #define ICE_DBG_RES            BIT_ULL(17)
 #define ICE_DBG_AQ_MSG         BIT_ULL(24)
@@ -282,6 +283,7 @@ struct ice_sched_node {
        u8 tc_num;
        u8 owner;
 #define ICE_SCHED_NODE_OWNER_LAN       0
+#define ICE_SCHED_NODE_OWNER_RDMA      2
 };
 
 /* Access Macros for Tx Sched Elements data */
@@ -353,6 +355,7 @@ struct ice_sched_vsi_info {
        struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
        struct list_head list_entry;
        u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
+       u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
 };
 
 /* driver defines the policy */
-- 
2.26.2

Reply via email to