Add Rx burst multi-segment version for CN10K. Signed-off-by: Nithin Dabilpuram <ndabilpu...@marvell.com> Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com> --- doc/guides/nics/cnxk.rst | 2 ++ doc/guides/nics/features/cnxk.ini | 2 ++ doc/guides/nics/features/cnxk_vec.ini | 1 + doc/guides/nics/features/cnxk_vf.ini | 2 ++ drivers/net/cnxk/cn10k_rx.c | 27 ++++++++++++++++++ drivers/net/cnxk/cn10k_rx.h | 54 +++++++++++++++++++++++++++++++++-- 6 files changed, 86 insertions(+), 2 deletions(-)
diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst index 4f1b58c..789ec29 100644 --- a/doc/guides/nics/cnxk.rst +++ b/doc/guides/nics/cnxk.rst @@ -17,11 +17,13 @@ Features Features of the CNXK Ethdev PMD are: - Packet type information +- Jumbo frames - SR-IOV VF - Lock-free Tx queue - Multiple queues for TX and RX - Receiver Side Scaling (RSS) - Link state information +- Scatter-Gather IO support Prerequisites ------------- diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 712f8d5..23564b7 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -15,6 +15,8 @@ Runtime Tx queue setup = Y Queue start/stop = Y RSS hash = Y Inner RSS = Y +Jumbo frame = Y +Scattered Rx = Y Packet type parsing = Y Linux = Y ARMv8 = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 82f2af0..421048d 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -15,6 +15,7 @@ Runtime Tx queue setup = Y Queue start/stop = Y RSS hash = Y Inner RSS = Y +Jumbo frame = Y Packet type parsing = Y Linux = Y ARMv8 = Y diff --git a/doc/guides/nics/features/cnxk_vf.ini b/doc/guides/nics/features/cnxk_vf.ini index 61fed11..e901fa2 100644 --- a/doc/guides/nics/features/cnxk_vf.ini +++ b/doc/guides/nics/features/cnxk_vf.ini @@ -14,6 +14,8 @@ Runtime Tx queue setup = Y Queue start/stop = Y RSS hash = Y Inner RSS = Y +Jumbo frame = Y +Scattered Rx = Y Packet type parsing = Y Linux = Y ARMv8 = Y diff --git a/drivers/net/cnxk/cn10k_rx.c b/drivers/net/cnxk/cn10k_rx.c index 1ff1b04..b98e7a1 100644 --- a/drivers/net/cnxk/cn10k_rx.c +++ b/drivers/net/cnxk/cn10k_rx.c @@ -88,6 +88,15 @@ nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts, void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t pkts) \ { \ return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \ + } \ + \ + static uint16_t __rte_noinline __rte_hot \ + cn10k_nix_recv_pkts_mseg_##name(void *rx_queue, \ + struct rte_mbuf **rx_pkts, \ + uint16_t pkts) \ + { \ + return nix_recv_pkts(rx_queue, rx_pkts, pkts, \ + (flags) | NIX_RX_MULTI_SEG_F); \ } NIX_RX_FASTPATH_MODES @@ -110,6 +119,8 @@ pick_rx_func(struct rte_eth_dev *eth_dev, void cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev) { + struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev); + const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2] = { #define R(name, f3, f2, f1, f0, flags) \ [f3][f2][f1][f0] = cn10k_nix_recv_pkts_##name, @@ -118,6 +129,22 @@ cn10k_eth_set_rx_function(struct rte_eth_dev *eth_dev) #undef R }; + const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2] = { +#define R(name, f3, f2, f1, f0, flags) \ + [f3][f2][f1][f0] = cn10k_nix_recv_pkts_mseg_##name, + + NIX_RX_FASTPATH_MODES +#undef R + }; + pick_rx_func(eth_dev, nix_eth_rx_burst); + + if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) + pick_rx_func(eth_dev, nix_eth_rx_burst_mseg); + + /* Copy multi seg version with no offload for tear down sequence */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + dev->rx_pkt_burst_no_offload = + nix_eth_rx_burst_mseg[0][0][0][0]; rte_mb(); } diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h index f43f320..7887a81 100644 --- a/drivers/net/cnxk/cn10k_rx.h +++ b/drivers/net/cnxk/cn10k_rx.h @@ -98,6 +98,52 @@ nix_update_match_id(const uint16_t match_id, uint64_t ol_flags, } static __rte_always_inline void +nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf, + uint64_t rearm) +{ + const rte_iova_t *iova_list; + struct rte_mbuf *head; + const rte_iova_t *eol; + uint8_t nb_segs; + uint64_t sg; + + sg = *(const uint64_t *)(rx + 1); + nb_segs = (sg >> 48) & 0x3; + mbuf->nb_segs = nb_segs; + mbuf->data_len = sg & 0xFFFF; + sg = sg >> 16; + + eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1)); + /* Skip SG_S and first IOVA*/ + iova_list = ((const rte_iova_t *)(rx + 1)) + 2; + nb_segs--; + + rearm = rearm & ~0xFFFF; + + head = mbuf; + while (nb_segs) { + mbuf->next = ((struct rte_mbuf *)*iova_list) - 1; + mbuf = mbuf->next; + + __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1); + + mbuf->data_len = sg & 0xFFFF; + sg = sg >> 16; + *(uint64_t *)(&mbuf->rearm_data) = rearm; + nb_segs--; + iova_list++; + + if (!nb_segs && (iova_list + 1 < eol)) { + sg = *(const uint64_t *)(iova_list); + nb_segs = (sg >> 48) & 0x3; + head->nb_segs += nb_segs; + iova_list = (const rte_iova_t *)(iova_list + 1); + } + } + mbuf->next = NULL; +} + +static __rte_always_inline void cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, struct rte_mbuf *mbuf, const void *lookup_mem, const uint64_t val, const uint16_t flag) @@ -131,8 +177,12 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag, *(uint64_t *)(&mbuf->rearm_data) = val; mbuf->pkt_len = len; - mbuf->data_len = len; - mbuf->next = NULL; + if (flag & NIX_RX_MULTI_SEG_F) { + nix_cqe_xtract_mseg(rx, mbuf, val); + } else { + mbuf->data_len = len; + mbuf->next = NULL; + } } #define RSS_F NIX_RX_OFFLOAD_RSS_F -- 2.8.4