Refactor code to support the P7 device family.
The changes include support for RSS, VNIC allocation, TPA.
Remove unnecessary check to disable vector mode support for
some device families.

Signed-off-by: Ajit Khaparde <ajit.khapa...@broadcom.com>
---
 drivers/net/bnxt/bnxt.h        |  6 +++---
 drivers/net/bnxt/bnxt_ethdev.c | 29 +++++++++--------------------
 drivers/net/bnxt/bnxt_flow.c   |  2 +-
 drivers/net/bnxt/bnxt_hwrm.c   | 26 ++++++++++++++------------
 drivers/net/bnxt/bnxt_ring.c   |  6 +++---
 drivers/net/bnxt/bnxt_rxq.c    |  2 +-
 drivers/net/bnxt/bnxt_rxr.c    |  6 +++---
 drivers/net/bnxt/bnxt_vnic.c   |  6 +++---
 8 files changed, 37 insertions(+), 46 deletions(-)

diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 3a1d8a6ff6..7439ecf4fa 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -107,11 +107,11 @@
 #define TPA_MAX_SEGS           5 /* 32 segments in log2 units */
 
 #define BNXT_TPA_MAX_AGGS(bp) \
-       (BNXT_CHIP_P5(bp) ? TPA_MAX_AGGS_TH : \
+       (BNXT_CHIP_P5_P7(bp) ? TPA_MAX_AGGS_TH : \
                             TPA_MAX_AGGS)
 
 #define BNXT_TPA_MAX_SEGS(bp) \
-       (BNXT_CHIP_P5(bp) ? TPA_MAX_SEGS_TH : \
+       (BNXT_CHIP_P5_P7(bp) ? TPA_MAX_SEGS_TH : \
                              TPA_MAX_SEGS)
 
 /*
@@ -938,7 +938,7 @@ inline uint16_t bnxt_max_rings(struct bnxt *bp)
         * RSS table size in P5 is 512.
         * Cap max Rx rings to the same value for RSS.
         */
-       if (BNXT_CHIP_P5(bp))
+       if (BNXT_CHIP_P5_P7(bp))
                max_rx_rings = RTE_MIN(max_rx_rings, BNXT_RSS_TBL_SIZE_P5);
 
        max_tx_rings = RTE_MIN(max_tx_rings, max_rx_rings);
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index bd30e9fd3e..d79396b009 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -212,7 +212,7 @@ uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
        unsigned int num_rss_rings = RTE_MIN(bp->rx_nr_rings,
                                             BNXT_RSS_TBL_SIZE_P5);
 
-       if (!BNXT_CHIP_P5(bp))
+       if (!BNXT_CHIP_P5_P7(bp))
                return 1;
 
        return RTE_ALIGN_MUL_CEIL(num_rss_rings,
@@ -222,7 +222,7 @@ uint16_t bnxt_rss_ctxts(const struct bnxt *bp)
 
 uint16_t bnxt_rss_hash_tbl_size(const struct bnxt *bp)
 {
-       if (!BNXT_CHIP_P5(bp))
+       if (!BNXT_CHIP_P5_P7(bp))
                return HW_HASH_INDEX_SIZE;
 
        return bnxt_rss_ctxts(bp) * BNXT_RSS_ENTRIES_PER_CTX_P5;
@@ -765,7 +765,7 @@ static int bnxt_start_nic(struct bnxt *bp)
        /* P5 does not support ring groups.
         * But we will use the array to save RSS context IDs.
         */
-       if (BNXT_CHIP_P5(bp))
+       if (BNXT_CHIP_P5_P7(bp))
                bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
 
        rc = bnxt_vnic_queue_db_init(bp);
@@ -1247,12 +1247,6 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 {
        struct bnxt *bp = eth_dev->data->dev_private;
 
-       /* Disable vector mode RX for Stingray2 for now */
-       if (BNXT_CHIP_SR2(bp)) {
-               bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
-               return bnxt_recv_pkts;
-       }
-
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
        /* Vector mode receive cannot be enabled if scattered rx is in use. */
        if (eth_dev->data->scattered_rx)
@@ -1317,16 +1311,11 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev)
 }
 
 static eth_tx_burst_t
-bnxt_transmit_function(struct rte_eth_dev *eth_dev)
+bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
 {
-       struct bnxt *bp = eth_dev->data->dev_private;
-
-       /* Disable vector mode TX for Stingray2 for now */
-       if (BNXT_CHIP_SR2(bp))
-               return bnxt_xmit_pkts;
-
 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
        uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
+       struct bnxt *bp = eth_dev->data->dev_private;
 
        /*
         * Vector mode transmit can be enabled only if not using scatter rx
@@ -2091,7 +2080,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev 
*eth_dev,
                        continue;
 
                rxq = bnxt_qid_to_rxq(bp, reta_conf[idx].reta[sft]);
-               if (BNXT_CHIP_P5(bp)) {
+               if (BNXT_CHIP_P5_P7(bp)) {
                        vnic->rss_table[i * 2] =
                                rxq->rx_ring->rx_ring_struct->fw_ring_id;
                        vnic->rss_table[i * 2 + 1] =
@@ -2138,7 +2127,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
                if (reta_conf[idx].mask & (1ULL << sft)) {
                        uint16_t qid;
 
-                       if (BNXT_CHIP_P5(bp))
+                       if (BNXT_CHIP_P5_P7(bp))
                                qid = bnxt_rss_to_qid(bp,
                                                      vnic->rss_table[i * 2]);
                        else
@@ -3224,7 +3213,7 @@ bnxt_rx_queue_count_op(void *rx_queue)
                        break;
 
                case CMPL_BASE_TYPE_RX_TPA_END:
-                       if (BNXT_CHIP_P5(rxq->bp)) {
+                       if (BNXT_CHIP_P5_P7(rxq->bp)) {
                                struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end;
 
                                p5_tpa_end = (void *)rxcmp;
@@ -3335,7 +3324,7 @@ bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t 
offset)
                        if (desc == offset)
                                return RTE_ETH_RX_DESC_DONE;
 
-                       if (BNXT_CHIP_P5(rxq->bp)) {
+                       if (BNXT_CHIP_P5_P7(rxq->bp)) {
                                struct rx_tpa_v2_end_cmpl_hi *p5_tpa_end;
 
                                p5_tpa_end = (void *)rxcmp;
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
index 28dd5ae6cb..15f0e1b308 100644
--- a/drivers/net/bnxt/bnxt_flow.c
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -1199,7 +1199,7 @@ bnxt_vnic_rss_cfg_update(struct bnxt *bp,
                if (i == bp->rx_cp_nr_rings)
                        return 0;
 
-               if (BNXT_CHIP_P5(bp)) {
+               if (BNXT_CHIP_P5_P7(bp)) {
                        rxq = bp->rx_queues[idx];
                        vnic->rss_table[rss_idx * 2] =
                                rxq->rx_ring->rx_ring_struct->fw_ring_id;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index fe9e629892..2d0a7a2731 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -853,7 +853,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
        bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
        bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-       if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
+       if (!BNXT_CHIP_P5_P7(bp) && !bp->pdev->max_vfs)
                bp->max_l2_ctx += bp->max_rx_em_flows;
        if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
                bp->max_vnics = rte_le_to_cpu_16(BNXT_MAX_VNICS_COS_CLASSIFY);
@@ -1187,7 +1187,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
         * So use the value provided by func_qcaps.
         */
        bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
-       if (!BNXT_CHIP_P5(bp) && !bp->pdev->max_vfs)
+       if (!BNXT_CHIP_P5_P7(bp) && !bp->pdev->max_vfs)
                bp->max_l2_ctx += bp->max_rx_em_flows;
        if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)
                bp->max_vnics = rte_le_to_cpu_16(BNXT_MAX_VNICS_COS_CLASSIFY);
@@ -1744,7 +1744,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
                req.ring_type = ring_type;
                req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
                req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
-               if (BNXT_CHIP_P5(bp)) {
+               if (BNXT_CHIP_P5_P7(bp)) {
                        mb_pool = bp->rx_queues[0]->mb_pool;
                        rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
                                      RTE_PKTMBUF_HEADROOM;
@@ -2118,7 +2118,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct 
bnxt_vnic_info *vnic)
 
        HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
 
-       if (BNXT_CHIP_P5(bp)) {
+       if (BNXT_CHIP_P5_P7(bp)) {
                int dflt_rxq = vnic->start_grp_id;
                struct bnxt_rx_ring_info *rxr;
                struct bnxt_cp_ring_info *cpr;
@@ -2304,7 +2304,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct 
bnxt_vnic_info *vnic)
 {
        int rc = 0;
 
-       if (BNXT_CHIP_P5(bp)) {
+       if (BNXT_CHIP_P5_P7(bp)) {
                int j;
 
                for (j = 0; j < vnic->num_lb_ctxts; j++) {
@@ -2556,7 +2556,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
        struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
        struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
 
-       if (BNXT_CHIP_P5(bp) && !bp->max_tpa_v2) {
+       if ((BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp)) && !bp->max_tpa_v2) {
                if (enable)
                        PMD_DRV_LOG(ERR, "No HW support for LRO\n");
                return -ENOTSUP;
@@ -2584,6 +2584,9 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
                req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
                req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
                req.min_agg_len = rte_cpu_to_le_32(512);
+
+               if (BNXT_CHIP_P5_P7(bp))
+                       req.max_aggs = rte_cpu_to_le_16(bp->max_tpa_v2);
        }
        req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
@@ -2836,7 +2839,7 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int 
queue_index)
        ring = rxr ? rxr->ag_ring_struct : NULL;
        if (ring != NULL && cpr != NULL) {
                bnxt_hwrm_ring_free(bp, ring,
-                                   BNXT_CHIP_P5(bp) ?
+                                   BNXT_CHIP_P5_P7(bp) ?
                                    HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
                                    HWRM_RING_FREE_INPUT_RING_TYPE_RX,
                                    cpr->cp_ring_struct->fw_ring_id);
@@ -3356,8 +3359,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool 
link_up)
 
        /* Get user requested autoneg setting */
        autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
-
-       if (BNXT_CHIP_P5(bp) &&
+       if (BNXT_CHIP_P5_P7(bp) &&
            dev_conf->link_speeds & RTE_ETH_LINK_SPEED_40G) {
                /* 40G is not supported as part of media auto detect.
                 * The speed should be forced and autoneg disabled
@@ -5348,7 +5350,7 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct 
bnxt_vnic_info *vnic)
        if (!(vnic->rss_table && vnic->hash_type))
                return 0;
 
-       if (BNXT_CHIP_P5(bp))
+       if (BNXT_CHIP_P5_P7(bp))
                return bnxt_vnic_rss_configure_p5(bp, vnic);
 
        /*
@@ -5440,7 +5442,7 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
        int rc;
 
        /* Set ring coalesce parameters only for 100G NICs */
-       if (BNXT_CHIP_P5(bp)) {
+       if (BNXT_CHIP_P5_P7(bp)) {
                if (bnxt_hwrm_set_coal_params_p5(bp, &req))
                        return -1;
        } else if (bnxt_stratus_device(bp)) {
@@ -5470,7 +5472,7 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
        int total_alloc_len;
        int rc, i, tqm_rings;
 
-       if (!BNXT_CHIP_P5(bp) ||
+       if (!BNXT_CHIP_P5_P7(bp) ||
            bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
            BNXT_VF(bp) ||
            bp->ctx)
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 6dacb1b37f..90cad6c9c6 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -57,7 +57,7 @@ int bnxt_alloc_ring_grps(struct bnxt *bp)
        /* P5 does not support ring groups.
         * But we will use the array to save RSS context IDs.
         */
-       if (BNXT_CHIP_P5(bp)) {
+       if (BNXT_CHIP_P5_P7(bp)) {
                bp->max_ring_grps = BNXT_MAX_RSS_CTXTS_P5;
        } else if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
                /* 1 ring is for default completion ring */
@@ -354,7 +354,7 @@ static void bnxt_set_db(struct bnxt *bp,
                        uint32_t fid,
                        uint32_t ring_mask)
 {
-       if (BNXT_CHIP_P5(bp)) {
+       if (BNXT_CHIP_P5_P7(bp)) {
                int db_offset = DB_PF_OFFSET;
                switch (ring_type) {
                case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
@@ -559,7 +559,7 @@ static int bnxt_alloc_rx_agg_ring(struct bnxt *bp, int 
queue_index)
 
        ring->fw_rx_ring_id = rxr->rx_ring_struct->fw_ring_id;
 
-       if (BNXT_CHIP_P5(bp)) {
+       if (BNXT_CHIP_P5_P7(bp)) {
                ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG;
                hw_stats_ctx_id = cpr->hw_stats_ctx_id;
        } else {
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 0d0b5e28e4..575e7f193f 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -600,7 +600,7 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
                        if (bp->rx_queues[i]->rx_started)
                                active_queue_cnt++;
 
-               if (BNXT_CHIP_P5(bp)) {
+               if (BNXT_CHIP_P5_P7(bp)) {
                        /*
                         * For P5, we need to ensure that the VNIC default
                         * receive ring corresponds to an active receive queue.
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 0cabfb583c..9d45065f28 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -334,7 +334,7 @@ static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
        uint16_t cp_cons, ag_cons;
        struct rx_pkt_cmpl *rxcmp;
        struct rte_mbuf *last = mbuf;
-       bool is_p5_tpa = tpa_info && BNXT_CHIP_P5(rxq->bp);
+       bool is_p5_tpa = tpa_info && BNXT_CHIP_P5_P7(rxq->bp);
 
        for (i = 0; i < agg_buf; i++) {
                struct rte_mbuf **ag_buf;
@@ -395,7 +395,7 @@ static int bnxt_discard_rx(struct bnxt *bp, struct 
bnxt_cp_ring_info *cpr,
        } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) {
                struct rx_tpa_end_cmpl *tpa_end = cmp;
 
-               if (BNXT_CHIP_P5(bp))
+               if (BNXT_CHIP_P5_P7(bp))
                        return 0;
 
                agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end);
@@ -430,7 +430,7 @@ static inline struct rte_mbuf *bnxt_tpa_end(
                return NULL;
        }
 
-       if (BNXT_CHIP_P5(rxq->bp)) {
+       if (BNXT_CHIP_P5_P7(rxq->bp)) {
                struct rx_tpa_v2_end_cmpl *th_tpa_end;
                struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1;
 
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index d40daf631e..bf93120d28 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -143,7 +143,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp, bool 
reconfig)
 
        entry_length = HW_HASH_KEY_SIZE;
 
-       if (BNXT_CHIP_P5(bp))
+       if (BNXT_CHIP_P5_P7(bp))
                rss_table_size = BNXT_RSS_TBL_SIZE_P5 *
                                 2 * sizeof(*vnic->rss_table);
        else
@@ -418,8 +418,8 @@ static
 int32_t bnxt_vnic_populate_rss_table(struct bnxt *bp,
                                     struct bnxt_vnic_info *vnic)
 {
-       /* RSS table population is different for p4 and p5 platforms */
-       if (BNXT_CHIP_P5(bp))
+       /* RSS table population is different for p4 and p5, p7 platforms */
+       if (BNXT_CHIP_P5_P7(bp))
                return bnxt_vnic_populate_rss_table_p5(bp, vnic);
 
        return bnxt_vnic_populate_rss_table_p4(bp, vnic);
-- 
2.39.2 (Apple Git-143)

Attachment: smime.p7s
Description: S/MIME Cryptographic Signature

Reply via email to