This patch adds some code while also reorganizing some of the existing code
to configure resources for a PCI function. Key changes in the patch:
1) Move the function reset HWRM call to init path from dev_start path.
2) Eliminate unused bnxt_vf_info structure.
3) Set aside resources like Tx, Rx rings, MAC adde count, stats context
etc.. for VFs
4) Fix PF resource allocation when there are no VFs.

Signed-off-by: Stephen Hurd <stephen.h...@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khapa...@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |  34 ++---
 drivers/net/bnxt/bnxt_ethdev.c |  65 ++++++---
 drivers/net/bnxt/bnxt_filter.c |  30 +---
 drivers/net/bnxt/bnxt_hwrm.c   | 324 +++++++++++++++++++++++++++++++++++++----
 drivers/net/bnxt/bnxt_hwrm.h   |   2 +
 drivers/net/bnxt/bnxt_vnic.c   |  40 +----
 6 files changed, 355 insertions(+), 140 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 4418c7f..7fa0d8a 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -54,41 +54,20 @@ enum bnxt_hw_context {
        HW_CONTEXT_IS_LB    = 3,
 };
 
-struct bnxt_vf_info {
-       uint16_t                fw_fid;
-       uint8_t                 mac_addr[ETHER_ADDR_LEN];
-       uint16_t                max_rsscos_ctx;
-       uint16_t                max_cp_rings;
-       uint16_t                max_tx_rings;
-       uint16_t                max_rx_rings;
-       uint16_t                max_l2_ctx;
-       uint16_t                max_vnics;
-       uint16_t                vlan;
-       struct bnxt_pf_info     *pf;
-};
-
 struct bnxt_pf_info {
 #define BNXT_FIRST_PF_FID      1
 #define BNXT_MAX_VFS(bp)       (bp->pf.max_vfs)
 #define BNXT_FIRST_VF_FID      128
 #define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
 #define BNXT_PF_RINGS_AVAIL(bp)        (bp->pf.max_cp_rings - 
BNXT_PF_RINGS_USED(bp))
-       uint32_t                fw_fid;
+
        uint8_t                 port_id;
-       uint8_t                 mac_addr[ETHER_ADDR_LEN];
-       uint16_t                max_rsscos_ctx;
-       uint16_t                max_cp_rings;
-       uint16_t                max_tx_rings;
-       uint16_t                max_rx_rings;
-       uint16_t                max_l2_ctx;
-       uint16_t                max_vnics;
        uint16_t                first_vf_id;
        uint16_t                active_vfs;
        uint16_t                max_vfs;
        void                    *vf_req_buf;
        phys_addr_t             vf_req_buf_dma_addr;
        uint32_t                vf_req_fwd[8];
-       struct bnxt_vf_info     *vf;
 };
 
 /* Max wait time is 10 * 100ms = 1s */
@@ -174,8 +153,17 @@ struct bnxt {
        struct bnxt_link_info   link_info;
        struct bnxt_cos_queue_info      cos_queue[BNXT_COS_QUEUE_COUNT];
 
+       uint16_t                fw_fid;
+       uint8_t                 dflt_mac_addr[ETHER_ADDR_LEN];
+       uint16_t                max_rsscos_ctx;
+       uint16_t                max_cp_rings;
+       uint16_t                max_tx_rings;
+       uint16_t                max_rx_rings;
+       uint16_t                max_l2_ctx;
+       uint16_t                max_vnics;
+       uint16_t                max_stat_ctx;
+       uint16_t                vlan;
        struct bnxt_pf_info             pf;
-       struct bnxt_vf_info             vf;
        uint8_t                 port_partition_type;
        uint8_t                 dev_stopped;
 };
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 6167443..667ba12 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -340,18 +340,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev 
*eth_dev,
        dev_info->max_hash_mac_addrs = 0;
 
        /* PF/VF specifics */
-       if (BNXT_PF(bp)) {
-               dev_info->max_rx_queues = bp->pf.max_rx_rings;
-               dev_info->max_tx_queues = bp->pf.max_tx_rings;
-               dev_info->max_vfs = bp->pf.active_vfs;
-               dev_info->reta_size = bp->pf.max_rsscos_ctx;
-               max_vnics = bp->pf.max_vnics;
-       } else {
-               dev_info->max_rx_queues = bp->vf.max_rx_rings;
-               dev_info->max_tx_queues = bp->vf.max_tx_rings;
-               dev_info->reta_size = bp->vf.max_rsscos_ctx;
-               max_vnics = bp->vf.max_vnics;
-       }
+       if (BNXT_PF(bp))
+               dev_info->max_vfs = bp->pf.max_vfs;
+       dev_info->max_rx_queues = bp->max_rx_rings;
+       dev_info->max_tx_queues = bp->max_tx_rings;
+       dev_info->reta_size = bp->max_rsscos_ctx;
+       max_vnics = bp->max_vnics;
 
        /* Fast path specifics */
        dev_info->min_rx_bufsize = 1;
@@ -483,12 +477,6 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
        int rc;
 
        bp->dev_stopped = 0;
-       rc = bnxt_hwrm_func_reset(bp);
-       if (rc) {
-               RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
-               rc = -1;
-               goto error;
-       }
 
        rc = bnxt_setup_int(bp);
        if (rc)
@@ -1091,6 +1079,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
 
        bp = eth_dev->data->dev_private;
 
+       bp->dev_stopped = 1;
+
        if (bnxt_vf_pciid(pci_dev->id.device_id))
                bp->flags |= BNXT_FLAG_VF;
 
@@ -1123,6 +1113,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
                RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
                goto error_free;
        }
+       if (bp->max_tx_rings == 0) {
+               RTE_LOG(ERR, PMD, "No TX rings available!\n");
+               rc = -EBUSY;
+               goto error_free;
+       }
        eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
                                        ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0);
        if (eth_dev->data->mac_addrs == NULL) {
@@ -1133,10 +1128,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
                goto error_free;
        }
        /* Copy the permanent MAC from the qcap response address now. */
-       if (BNXT_PF(bp))
-               memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr));
-       else
-               memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr));
+       memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
        memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
        bp->grp_info = rte_zmalloc("bnxt_grp_info",
                                sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
@@ -1162,7 +1154,34 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
                pci_dev->mem_resource[0].phys_addr,
                pci_dev->mem_resource[0].addr);
 
-       bp->dev_stopped = 0;
+       rc = bnxt_hwrm_func_reset(bp);
+       if (rc) {
+               RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
+               rc = -1;
+               goto error_free;
+       }
+
+       if (BNXT_PF(bp)) {
+               if (bp->pf.active_vfs) {
+                       RTE_LOG(ERR, PMD, "PF has %d active VFs\n",
+                               bp->pf.active_vfs);
+                       // TODO: Deallocate VF resources?
+               }
+               if (bp->pdev->max_vfs) {
+                       rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
+                       if (rc) {
+                               RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+                               goto error_free;
+                       }
+               } else {
+                       rc = bnxt_hwrm_allocate_pf_only(bp);
+                       if (rc) {
+                               RTE_LOG(ERR, PMD,
+                                       "Failed to allocate PF resources\n");
+                               goto error_free;
+                       }
+               }
+       }
 
        return 0;
 
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index df1042c..137c7b7 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -73,15 +73,7 @@ void bnxt_init_filters(struct bnxt *bp)
        struct bnxt_filter_info *filter;
        int i, max_filters;
 
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_filters = pf->max_l2_ctx;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_filters = vf->max_l2_ctx;
-       }
+       max_filters = bp->max_l2_ctx;
        STAILQ_INIT(&bp->free_filter_list);
        for (i = 0; i < max_filters; i++) {
                filter = &bp->filter_info[i];
@@ -122,15 +114,7 @@ void bnxt_free_filter_mem(struct bnxt *bp)
                return;
 
        /* Ensure that all filters are freed */
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_filters = pf->max_l2_ctx;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_filters = vf->max_l2_ctx;
-       }
+       max_filters = bp->max_l2_ctx;
        for (i = 0; i < max_filters; i++) {
                filter = &bp->filter_info[i];
                if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
@@ -155,15 +139,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
        struct bnxt_filter_info *filter_mem;
        uint16_t max_filters;
 
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_filters = pf->max_l2_ctx;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_filters = vf->max_l2_ctx;
-       }
+       max_filters = bp->max_l2_ctx;
        /* Allocate memory for VNIC pool and filter pool */
        filter_mem = rte_zmalloc("bnxt_filter_info",
                                 max_filters * sizeof(struct bnxt_filter_info),
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 3849d1a..2a24610 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -286,32 +286,21 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 
        bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
        if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               pf->fw_fid = rte_le_to_cpu_32(resp->fid);
-               pf->port_id = resp->port_id;
-               memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
-               pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
-               pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
-               pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
-               pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
-               pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-               pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
-               pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
-               pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               vf->fw_fid = rte_le_to_cpu_32(resp->fid);
-               memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
-               vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
-               vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
-               vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
-               vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
-               vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-               vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+               bp->pf.port_id = resp->port_id;
+               bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+               bp->pf.max_vfs = rte_le_to_cpu_16(resp->max_vfs);
        }
 
+       bp->fw_fid = rte_le_to_cpu_32(resp->fid);
+       memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
+       bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+       bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+       bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+       bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+       bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+       bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+       bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+
        return rc;
 }
 
@@ -345,7 +334,8 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp, 
uint32_t flags,
        HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
        req.flags = flags;
        req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
-                       HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
+                       HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD |
+                       HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD;
        req.ver_maj = RTE_VER_YEAR;
        req.ver_min = RTE_VER_MONTH;
        req.ver_upd = RTE_VER_MINOR;
@@ -1512,12 +1502,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 
        HWRM_CHECK_RESULT;
 
-       if (BNXT_VF(bp)) {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               /* Hard Coded.. 0xfff VLAN ID mask */
-               vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
-       }
+       /* Hard Coded.. 0xfff VLAN ID mask */
+       bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+       if (BNXT_PF(bp))
+               bp->pf.active_vfs = rte_le_to_cpu_16(resp->alloc_vfs);
 
        switch (resp->port_partition_type) {
        case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -1532,3 +1520,277 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 
        return rc;
 }
+
+static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
+                                  struct hwrm_func_qcaps_output *qcaps)
+{
+       qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
+       memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
+              sizeof(qcaps->mac_address));
+       qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
+       qcaps->max_rx_rings = fcfg->num_rx_rings;
+       qcaps->max_tx_rings = fcfg->num_tx_rings;
+       qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
+       qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
+       qcaps->max_vfs = 0;
+       qcaps->first_vf_id = 0;
+       qcaps->max_vnics = fcfg->num_vnics;
+       qcaps->max_decap_records = 0;
+       qcaps->max_encap_records = 0;
+       qcaps->max_tx_wm_flows = 0;
+       qcaps->max_tx_em_flows = 0;
+       qcaps->max_rx_wm_flows = 0;
+       qcaps->max_rx_em_flows = 0;
+       qcaps->max_flow_id = 0;
+       qcaps->max_mcast_filters = fcfg->num_mcast_filters;
+       qcaps->max_sp_tx_rings = 0;
+       qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
+}
+
+static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings, bool std_mode)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+       if (std_mode)
+               req.flags = rte_cpu_to_le_32(
+                       HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE);
+       req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu +
+                               ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE);
+       req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu +
+                               ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE);
+       req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
+       req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
+       req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
+       req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
+       req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
+       req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
+       req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
+       req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
+       req.fid = rte_cpu_to_le_16(0xffff);
+
+       HWRM_PREP(req, FUNC_CFG, -1, resp);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       return rc;
+}
+
+static void populate_vf_func_cfg_req(struct bnxt *bp,
+                                    struct hwrm_func_cfg_input *req,
+                                    int num_vfs)
+{
+       req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
+                       HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+
+       req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu +
+                               ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE);
+       req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu +
+                               ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE);
+       req->num_rsscos_ctxs =
+               rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1));
+       req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
+       req->num_cmpl_rings =
+               rte_cpu_to_le_16(bp->max_cp_rings / (num_vfs + 1));
+       req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
+       req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
+       req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
+       req->num_vnics = rte_cpu_to_le_16(bp->max_vnics / (num_vfs + 1));
+       req->num_hw_ring_grps =
+               rte_cpu_to_le_16(bp->max_ring_grps / (num_vfs + 1));
+}
+
+static void add_random_mac_if_needed(struct bnxt *bp,
+                                    struct hwrm_func_cfg_input *cfg_req,
+                                    int vf)
+{
+       struct hwrm_func_qcfg_input req = {0};
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* Check for zero MAC address */
+       HWRM_PREP(req, FUNC_QCFG, -1, resp);
+       req.fid = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       if (rc) {
+               RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+       } else if (resp->error_code) {
+               rc = rte_le_to_cpu_16(resp->error_code);
+               RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+       } else {
+               if (memcmp(resp->mac_address,
+                          "\x00\x00\x00\x00\x00", 6) == 0) {
+                       cfg_req->enables |= rte_cpu_to_le_32(
+                               HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+                       eth_random_addr(cfg_req->dflt_mac_addr);
+               } else {
+                       memcpy(cfg_req->dflt_mac_addr,
+                              resp->mac_address, sizeof(resp->mac_address));
+               }
+       }
+}
+
+static void reserve_resources_from_vf(struct bnxt *bp,
+                                     struct hwrm_func_cfg_input *cfg_req,
+                                     int vf)
+{
+       struct hwrm_func_qcaps_input req = {0};
+       struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* Get the actual allocated values now */
+       HWRM_PREP(req, FUNC_QCAPS, -1, resp);
+       req.fid = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       if (rc) {
+               RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+               copy_func_cfg_to_qcaps(cfg_req, resp);
+       } else if (resp->error_code) {
+               rc = rte_le_to_cpu_16(resp->error_code);
+               RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+               copy_func_cfg_to_qcaps(cfg_req, resp);
+       }
+
+       bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
+       bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
+       bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
+       bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
+       bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
+       bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
+       bp->max_vnics -= rte_le_to_cpu_16(resp->max_vnics);
+       bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
+}
+
+static int update_pf_resource_max(struct bnxt *bp)
+{
+       struct hwrm_func_qcfg_input req = {0};
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int rc;
+
+       /* And copy the allocated numbers into the pf struct */
+       HWRM_PREP(req, FUNC_QCFG, -1, resp);
+       req.fid = rte_cpu_to_le_16(0xffff);
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+       HWRM_CHECK_RESULT;
+
+       bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
+       bp->pf.active_vfs = rte_le_to_cpu_16(resp->alloc_vfs);
+       /* TODO: Only TX ring value reflects actual allocation */
+       //bp->max_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
+       //bp->max_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
+       //bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
+       //bp->max_vnics = rte_le_to_cpu_16(resp->alloc_vnics);
+       //bp->max_l2_ctx = rte_le_to_cpu_16(resp->alloc_l2_ctx);
+       //bp->max_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
+
+       return rc;
+}
+
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
+{
+       int rc;
+
+       if (!BNXT_PF(bp)) {
+               RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+               return -1;
+       }
+
+       rc = bnxt_hwrm_func_qcaps(bp);
+       if (rc)
+               return rc;
+
+       rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings, false);
+       return rc;
+}
+
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
+{
+       struct hwrm_func_cfg_input req = {0};
+       struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       int i;
+       int rc = 0;
+
+       if (!BNXT_PF(bp)) {
+               RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+               return -1;
+       }
+
+       rc = bnxt_hwrm_func_qcaps(bp);
+       if (rc)
+               return rc;
+
+       bp->pf.active_vfs = 0;
+
+       /*
+        * First, configure the PF to only use one TX ring.  This ensures that
+        * there are enough rings for all VFs.
+        *
+        * If we don't do this, when we call func_alloc() later, we will lock
+        * extra rings to the PF that won't be available during func_cfg() of
+        * the VFs.
+        *
+        * This has been fixed with firmware versions above 20.6.54
+        */
+       rc = bnxt_hwrm_pf_func_cfg(bp, 1, true);
+       if (rc)
+               return rc;
+
+       populate_vf_func_cfg_req(bp, &req, num_vfs);
+
+       for (i = 0; i < num_vfs; i++) {
+               add_random_mac_if_needed(bp, &req, i);
+
+               HWRM_PREP(req, FUNC_CFG, -1, resp);
+               req.fid = rte_cpu_to_le_16(bp->pf.first_vf_id + i);
+               rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+               /* Clear enable flag for next pass */
+               req.enables &= ~rte_cpu_to_le_32(
+                               HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
+
+               if (rc || resp->error_code) {
+                       RTE_LOG(ERR, PMD,
+                               "Failed to initizlie VF %d.\n", i);
+                       RTE_LOG(ERR, PMD,
+                               "Not all VFs available.\n");
+                       break;
+               }
+
+               reserve_resources_from_vf(bp, &req, i);
+       }
+
+       /*
+        * Now configure the PF to use "the rest" of the resources
+        * We're using STD_TX_RING_MODE here though which will limit the TX
+        * rings.  This will allow QoS to function properly.  Not setting this
+        * will cause PF rings to break bandwidth settings.
+        */
+       rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings, true);
+       if (rc)
+               return rc;
+
+       rc = update_pf_resource_max(bp);
+
+       return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 6519ef2..1639e84 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -101,5 +101,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
 int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
+int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
 
 #endif
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 33fdde2..139e4bf 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -69,15 +69,7 @@ void bnxt_init_vnics(struct bnxt *bp)
        uint16_t max_vnics;
        int i, j;
 
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_vnics = pf->max_vnics;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_vnics = vf->max_vnics;
-       }
+       max_vnics = bp->max_vnics;
        STAILQ_INIT(&bp->free_vnic_list);
        for (i = 0; i < max_vnics; i++) {
                vnic = &bp->vnic_info[i];
@@ -181,15 +173,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
        uint16_t max_vnics;
        int i;
 
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_vnics = pf->max_vnics;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_vnics = vf->max_vnics;
-       }
+       max_vnics = bp->max_vnics;
        snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
                 "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain,
                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
@@ -232,15 +216,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
        if (bp->vnic_info == NULL)
                return;
 
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_vnics = pf->max_vnics;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_vnics = vf->max_vnics;
-       }
+       max_vnics = bp->max_vnics;
        for (i = 0; i < max_vnics; i++) {
                vnic = &bp->vnic_info[i];
                if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
@@ -258,15 +234,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
        struct bnxt_vnic_info *vnic_mem;
        uint16_t max_vnics;
 
-       if (BNXT_PF(bp)) {
-               struct bnxt_pf_info *pf = &bp->pf;
-
-               max_vnics = pf->max_vnics;
-       } else {
-               struct bnxt_vf_info *vf = &bp->vf;
-
-               max_vnics = vf->max_vnics;
-       }
+       max_vnics = bp->max_vnics;
        /* Allocate memory for VNIC pool and filter pool */
        vnic_mem = rte_zmalloc("bnxt_vnic_info",
                               max_vnics * sizeof(struct bnxt_vnic_info), 0);
-- 
2.10.1 (Apple Git-78)

Reply via email to