[PATCH v9 1/1] app/testpmd: support mulitiple mbuf pools per Rx queue
Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array accordingly. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 40 app/test-pmd/testpmd.h | 3 +++ app/test-pmd/util.c| 4 ++-- 3 files changed, 33 insertions(+), 14 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 5b0f0838dc..1549551640 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2647,10 +2647,16 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; unsigned int i, mp_n; int ret; - if (rx_pkt_nb_segs <= 1 || + /* For multiple mempools per Rx queue support, +* rx_pkt_nb_segs greater than 1 and +* Rx offload flag, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT won't be set. +* @see rte_eth_rxconf::rx_mempools +*/ + if (rx_pkt_nb_segs <= 1 && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; @@ -2668,20 +2674,30 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { - rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; + + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /* Handle zero as mbuf data buffer size. */ + rx_seg->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { + rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; + } else { + rx_seg->length = rx_pkt_seg_lengths[i] ? +rx_pkt_seg_lengths[i] : +mbuf_data_size[mp_n]; + } } else { - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; + rx_mempool[i] = mpx ? mpx : mp; } } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } else { + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_nmempool = rx_pkt_nb_segs; + } ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, NULL); rx_conf->rx_seg = NULL; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index e65be323b8..14be10dcef 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -80,6 +80,9 @@ extern uint8_t cl_quit; #define MIN_TOTAL_NUM_MBUFS 1024 +/* Maximum number of pools supported per Rx queue */ +#define MAX_MEMPOOL 8 + typedef uint8_t lcoreid_t; typedef uint16_t portid_t; typedef uint16_t queueid_t; diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index fd98e8b51d..f9df5f69ef 100644 --- a/app/test-pmd/util.c +++ b/app/test-pmd/util.c @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], print_ether_addr(" - dst=", ð_hdr->dst_addr, print_buf, buf_size, &cur_len); MKDUMPSTR(print_buf, buf_size, cur_len, - " - type=0x%04x - length=%u - nb_segs=%d", - eth_type, (unsigned int) mb->pkt_len, + " - pool=%s - type=0x%04x - length=%u - nb_segs=%d", + mb->pool->name, eth_type, (unsigned int) mb->pkt_len, (int)mb->nb_segs); ol_flags = mb->ol_flags; if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) { -- 2.25.1
[PATCH v10 1/1] app/testpmd: support multiple mbuf pools per Rx queue
Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array accordingly. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula v9: - Populate multi-mempool array based on mbuf_data_size_n instead of rx_pkt_nb_segs. --- app/test-pmd/testpmd.c | 62 ++ app/test-pmd/testpmd.h | 3 ++ app/test-pmd/util.c| 4 +-- 3 files changed, 44 insertions(+), 25 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 5b0f0838dc..40e8522e49 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2647,11 +2647,17 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; unsigned int i, mp_n; int ret; - if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + /* Verify Rx queue configuration is single pool and segment or +* multiple pool/segment. +* @see rte_eth_rxconf::rx_mempools +* @see rte_eth_rxconf::rx_seg +*/ + if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT != 0))) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2659,29 +2665,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, rx_conf, mp); goto exit; } - for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; - struct rte_mempool *mpx; - /* -* Use last valid pool for the segments with number -* exceeding the pool index. -*/ - mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; - mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { - rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; - } else { - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + for (i = 0; i < rx_pkt_nb_segs; i++) { + struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + /* +* Use last valid pool for the segments with number +* exceeding the pool index. +*/ + mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; + mpx = mbuf_pool_find(socket_id, mp_n); + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size +* to make sure enough space is accomidate for header. +*/ + rx_seg->length = rx_pkt_seg_lengths[i] ? +rx_pkt_seg_lengths[i] : +mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_seg->offset = i < rx_pkt_nb_offs ? +rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + } + } + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } else { + for (i = 0; i < mbuf_data_size_n; i++) { + mpx = mbuf_pool_find(socket_id, i); + rx_mempool[i] = mpx ? mpx : mp; } + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_nmempool = mbuf_data_size_n; } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; ret = rte_eth_rx_queue_setup(port_id, rx_queue
[PATCH v11 1/1] app/testpmd: support multiple mbuf pools per Rx queue
Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array accordingly. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula v11: - Resolve compilation and warning. v10: - Populate multi-mempool array based on mbuf_data_size_n instead of rx_pkt_nb_segs. --- app/test-pmd/testpmd.c | 63 +++--- app/test-pmd/testpmd.h | 3 ++ app/test-pmd/util.c| 4 +-- 3 files changed, 45 insertions(+), 25 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 5b0f0838dc..62f7c9dba8 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2647,11 +2647,18 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; + struct rte_mempool *mpx; unsigned int i, mp_n; int ret; - if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + /* Verify Rx queue configuration is single pool and segment or +* multiple pool/segment. +* @see rte_eth_rxconf::rx_mempools +* @see rte_eth_rxconf::rx_seg +*/ + if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2659,29 +2666,39 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, rx_conf, mp); goto exit; } - for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; - struct rte_mempool *mpx; - /* -* Use last valid pool for the segments with number -* exceeding the pool index. -*/ - mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; - mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { - rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; - } else { - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + for (i = 0; i < rx_pkt_nb_segs; i++) { + struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + /* +* Use last valid pool for the segments with number +* exceeding the pool index. +*/ + mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; + mpx = mbuf_pool_find(socket_id, mp_n); + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size +* to make sure enough space is accomidate for header. +*/ + rx_seg->length = rx_pkt_seg_lengths[i] ? +rx_pkt_seg_lengths[i] : +mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_seg->offset = i < rx_pkt_nb_offs ? +rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + } + } + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } else { + for (i = 0; i < mbuf_data_size_n; i++) { + mpx = mbuf_pool_find(socket_id, i); + rx_mempool[i] = mpx ? mpx : mp; } + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_nmempool = mbuf_data_size_n; } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf-&
[PATCH] common/cnxk: return on fail to init ROC Model
Return with error on fail to initialize RoC Model. Signed-off-by: Hanumanth Pothula --- drivers/common/cnxk/roc_platform.c | 7 ++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/common/cnxk/roc_platform.c b/drivers/common/cnxk/roc_platform.c index ebb6225f4d..443aa8d396 100644 --- a/drivers/common/cnxk/roc_platform.c +++ b/drivers/common/cnxk/roc_platform.c @@ -37,7 +37,12 @@ roc_plt_init(void) plt_err("Failed to reserve mem for roc_model"); return -ENOMEM; } - roc_model_init(mz->addr); + if (roc_model_init(mz->addr)) { + plt_err("Failed to init roc_model"); + + rte_memzone_free(mz); + return -EINVAL; + } } } else { if (mz == NULL) { -- 2.25.1
[PATCH] common/cnxk: add lower bound check for SSO resources
Observing a crash when the user runs the second test case, with the first test case using all HWGRP and HWS, SSO resources. This happens as there are no HWGRP and HWS resources left for the second test case. Make sure to process a test case only when valid HWGRPS and HWS are present by adding lower bound check for HWGRPS and HWS. Signed-off-by: Hanumanth Pothula --- drivers/common/cnxk/roc_sso.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c index f8a0a96533..8997e6f999 100644 --- a/drivers/common/cnxk/roc_sso.c +++ b/drivers/common/cnxk/roc_sso.c @@ -598,9 +598,9 @@ roc_sso_rsrc_init(struct roc_sso *roc_sso, uint8_t nb_hws, uint16_t nb_hwgrp) struct sso_lf_alloc_rsp *rsp_hwgrp; int rc; - if (roc_sso->max_hwgrp < nb_hwgrp) + if (!nb_hwgrp || roc_sso->max_hwgrp < nb_hwgrp) return -ENOENT; - if (roc_sso->max_hws < nb_hws) + if (!nb_hws || roc_sso->max_hws < nb_hws) return -ENOENT; rc = sso_rsrc_attach(roc_sso, SSO_LF_TYPE_HWS, nb_hws); -- 2.25.1
[PATCH v1 1/1] drivers: remove implementation of Rx metadata negotiation
Presently, Rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in Rx path takes extra cycles. Hence, removing driver implementation of Rx metadata negotiation and falling back to old implementation where mark actions are tracked as part of the flow rule. Signed-off-by: Hanumanth Pothula --- drivers/common/cnxk/roc_npc.c | 19 +++ drivers/common/cnxk/roc_npc.h | 3 +++ drivers/common/cnxk/roc_npc_priv.h | 1 + drivers/common/cnxk/version.map| 2 ++ drivers/net/cnxk/cn10k_ethdev.c| 26 -- drivers/net/cnxk/cn10k_flow.c | 19 +++ drivers/net/cnxk/cn9k_ethdev.c | 25 - drivers/net/cnxk/cn9k_flow.c | 20 drivers/net/cnxk/cnxk_ethdev.h | 1 - 9 files changed, 64 insertions(+), 52 deletions(-) diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c index a795114326..47536c8ce8 100644 --- a/drivers/common/cnxk/roc_npc.c +++ b/drivers/common/cnxk/roc_npc.c @@ -5,6 +5,23 @@ #include "roc_api.h" #include "roc_priv.h" +int +roc_npc_mark_actions_get(struct roc_npc *roc_npc) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + + return npc->mark_actions; +} + +int +roc_npc_mark_actions_sub_return(struct roc_npc *roc_npc, uint32_t count) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + + npc->mark_actions -= count; + return npc->mark_actions; +} + int roc_npc_vtag_actions_get(struct roc_npc *roc_npc) { @@ -488,12 +505,14 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, } mark = act_mark->id + 1; req_act |= ROC_NPC_ACTION_TYPE_MARK; + npc->mark_actions += 1; flow->match_id = mark; break; case ROC_NPC_ACTION_TYPE_FLAG: mark = NPC_FLOW_FLAG_VAL; req_act |= ROC_NPC_ACTION_TYPE_FLAG; + npc->mark_actions += 1; break; case ROC_NPC_ACTION_TYPE_COUNT: diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h index 5e07e26a91..61d0628f5f 100644 --- a/drivers/common/cnxk/roc_npc.h +++ b/drivers/common/cnxk/roc_npc.h @@ -397,6 +397,9 @@ int __roc_api roc_npc_mcam_free_all_resources(struct roc_npc *roc_npc); void __roc_api roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc); void __roc_api roc_npc_flow_mcam_dump(FILE *file, struct roc_npc *roc_npc, struct roc_npc_flow *mcam); +int __roc_api roc_npc_mark_actions_get(struct roc_npc *roc_npc); +int __roc_api roc_npc_mark_actions_sub_return(struct roc_npc *roc_npc, + uint32_t count); int __roc_api roc_npc_vtag_actions_get(struct roc_npc *roc_npc); int __roc_api roc_npc_vtag_actions_sub_return(struct roc_npc *roc_npc, uint32_t count); diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h index 08d763eeb4..714dcb09c9 100644 --- a/drivers/common/cnxk/roc_npc_priv.h +++ b/drivers/common/cnxk/roc_npc_priv.h @@ -393,6 +393,7 @@ struct npc { uint16_t flow_prealloc_size;/* Pre allocated mcam size */ uint16_t flow_max_priority; /* Max priority for flow */ uint16_t switch_header_type; /* Supported switch header type */ + uint32_t mark_actions; uint32_t vtag_strip_actions; /* vtag insert/strip actions */ uint16_t pf_func;/* pf_func of device */ npc_dxcfg_t prx_dxcfg; /* intf, lid, lt, extract */ diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 5d2b75fb5a..3eff3870d1 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -344,6 +344,8 @@ INTERNAL { roc_npc_flow_parse; roc_npc_get_low_priority_mcam; roc_npc_init; + roc_npc_mark_actions_get; + roc_npc_mark_actions_sub_return; roc_npc_vtag_actions_get; roc_npc_vtag_actions_sub_return; roc_npc_mcam_alloc_entries; diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index b84fed6d90..512b9c2597 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -39,9 +39,6 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) flags |= NIX_RX_OFFLOAD_SECURITY_F; - if (dev->rx_mark_update) - flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F; - return flags; } @@ -562,27 +559,6 @@ cn10k_nix_dev_start(struct rte_eth_dev *eth_dev) return 0; } -static int -cn10k_nix_rx_metadata_negotiate(struct rte_eth_d
[PATCH v2 1/1] drivers: remove implementation of Rx metadata negotiation
Presently, Rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in Rx path takes extra cycles. Hence, removing driver implementation of Rx metadata negotiation and falling back to old implementation where mark actions are tracked as part of the flow rule. Signed-off-by: Hanumanth Pothula --- v2: Remove explicit initializations. --- drivers/common/cnxk/roc_npc.c | 19 +++ drivers/common/cnxk/roc_npc.h | 3 +++ drivers/common/cnxk/roc_npc_priv.h | 1 + drivers/common/cnxk/version.map| 2 ++ drivers/net/cnxk/cn10k_ethdev.c| 26 -- drivers/net/cnxk/cn10k_flow.c | 19 +++ drivers/net/cnxk/cn9k_ethdev.c | 25 - drivers/net/cnxk/cn9k_flow.c | 20 drivers/net/cnxk/cnxk_ethdev.h | 1 - 9 files changed, 64 insertions(+), 52 deletions(-) diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c index a795114326..47536c8ce8 100644 --- a/drivers/common/cnxk/roc_npc.c +++ b/drivers/common/cnxk/roc_npc.c @@ -5,6 +5,23 @@ #include "roc_api.h" #include "roc_priv.h" +int +roc_npc_mark_actions_get(struct roc_npc *roc_npc) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + + return npc->mark_actions; +} + +int +roc_npc_mark_actions_sub_return(struct roc_npc *roc_npc, uint32_t count) +{ + struct npc *npc = roc_npc_to_npc_priv(roc_npc); + + npc->mark_actions -= count; + return npc->mark_actions; +} + int roc_npc_vtag_actions_get(struct roc_npc *roc_npc) { @@ -488,12 +505,14 @@ npc_parse_actions(struct roc_npc *roc_npc, const struct roc_npc_attr *attr, } mark = act_mark->id + 1; req_act |= ROC_NPC_ACTION_TYPE_MARK; + npc->mark_actions += 1; flow->match_id = mark; break; case ROC_NPC_ACTION_TYPE_FLAG: mark = NPC_FLOW_FLAG_VAL; req_act |= ROC_NPC_ACTION_TYPE_FLAG; + npc->mark_actions += 1; break; case ROC_NPC_ACTION_TYPE_COUNT: diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h index 5e07e26a91..61d0628f5f 100644 --- a/drivers/common/cnxk/roc_npc.h +++ b/drivers/common/cnxk/roc_npc.h @@ -397,6 +397,9 @@ int __roc_api roc_npc_mcam_free_all_resources(struct roc_npc *roc_npc); void __roc_api roc_npc_flow_dump(FILE *file, struct roc_npc *roc_npc); void __roc_api roc_npc_flow_mcam_dump(FILE *file, struct roc_npc *roc_npc, struct roc_npc_flow *mcam); +int __roc_api roc_npc_mark_actions_get(struct roc_npc *roc_npc); +int __roc_api roc_npc_mark_actions_sub_return(struct roc_npc *roc_npc, + uint32_t count); int __roc_api roc_npc_vtag_actions_get(struct roc_npc *roc_npc); int __roc_api roc_npc_vtag_actions_sub_return(struct roc_npc *roc_npc, uint32_t count); diff --git a/drivers/common/cnxk/roc_npc_priv.h b/drivers/common/cnxk/roc_npc_priv.h index 08d763eeb4..714dcb09c9 100644 --- a/drivers/common/cnxk/roc_npc_priv.h +++ b/drivers/common/cnxk/roc_npc_priv.h @@ -393,6 +393,7 @@ struct npc { uint16_t flow_prealloc_size;/* Pre allocated mcam size */ uint16_t flow_max_priority; /* Max priority for flow */ uint16_t switch_header_type; /* Supported switch header type */ + uint32_t mark_actions; uint32_t vtag_strip_actions; /* vtag insert/strip actions */ uint16_t pf_func;/* pf_func of device */ npc_dxcfg_t prx_dxcfg; /* intf, lid, lt, extract */ diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 5d2b75fb5a..3eff3870d1 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -344,6 +344,8 @@ INTERNAL { roc_npc_flow_parse; roc_npc_get_low_priority_mcam; roc_npc_init; + roc_npc_mark_actions_get; + roc_npc_mark_actions_sub_return; roc_npc_vtag_actions_get; roc_npc_vtag_actions_sub_return; roc_npc_mcam_alloc_entries; diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c index b84fed6d90..512b9c2597 100644 --- a/drivers/net/cnxk/cn10k_ethdev.c +++ b/drivers/net/cnxk/cn10k_ethdev.c @@ -39,9 +39,6 @@ nix_rx_offload_flags(struct rte_eth_dev *eth_dev) if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) flags |= NIX_RX_OFFLOAD_SECURITY_F; - if (dev->rx_mark_update) - flags |= NIX_RX_OFFLOAD_MARK_UPDATE_F; - return flags; } @@ -562,27 +559,6 @@ cn10k_nix_dev_start(struct rte_eth_dev *eth_dev) return 0; } -static int -cn10k_nix_rx_metad
[PATCH v1 1/1] net/thunderx: update dmac control register to appropriately
By default dmac control register is set to reject packets on mac address match, leading all unicast packets to drop. Update DMAC control register to allow packets on MAC address match rather than dropping. Signed-off-by: Hanumanth Pothula --- drivers/net/thunderx/base/nicvf_mbox.c | 12 drivers/net/thunderx/base/nicvf_mbox.h | 10 ++ drivers/net/thunderx/nicvf_ethdev.c| 26 ++ 3 files changed, 48 insertions(+) diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c index 5993eec4e6..0e0176974d 100644 --- a/drivers/net/thunderx/base/nicvf_mbox.c +++ b/drivers/net/thunderx/base/nicvf_mbox.c @@ -485,3 +485,15 @@ nicvf_mbox_reset_xcast(struct nicvf *nic) mbx.msg.msg = NIC_MBOX_MSG_RESET_XCAST; nicvf_mbox_send_msg_to_pf(nic, &mbx); } + +int +nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; + mbx.xcast.mode = mode; + mbx.xcast.mac = mac; + + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h index 322c8159cb..47f3d13755 100644 --- a/drivers/net/thunderx/base/nicvf_mbox.h +++ b/drivers/net/thunderx/base/nicvf_mbox.h @@ -45,6 +45,8 @@ #defineNIC_MBOX_MSG_CFG_DONE 0xF0/* VF configuration done */ #defineNIC_MBOX_MSG_SHUTDOWN 0xF1/* VF is being shutdown */ #define NIC_MBOX_MSG_RESET_XCAST 0xF2/* Reset DCAM filtering mode */ +#defineNIC_MBOX_MSG_ADD_MCAST 0xF3/* ADD MAC to DCAM filters */ +#defineNIC_MBOX_MSG_SET_XCAST 0xF4/* Set MCAST/BCAST Rx mode */ #defineNIC_MBOX_MSG_MAX0x100 /* Maximum number of messages */ /* Get vNIC VF configuration */ @@ -190,6 +192,12 @@ struct change_link_mode_msg { }; +struct xcast { + uint8_tmsg; + uint8_tmode; + uint64_t mac:48; +}; + struct nic_mbx { /* 128 bit shared memory between PF and each VF */ union { @@ -209,6 +217,7 @@ union { struct reset_stat_cfg reset_stat; struct set_link_state set_link; struct change_link_mode_msg mode; + struct xcast xcast; }; }; @@ -239,5 +248,6 @@ void nicvf_mbox_cfg_done(struct nicvf *nic); void nicvf_mbox_link_change(struct nicvf *nic); void nicvf_mbox_reset_xcast(struct nicvf *nic); int nicvf_mbox_change_mode(struct nicvf *nic, struct change_link_mode *cfg); +int nicvf_mbox_set_xcast(struct nicvf *nic, uint8_t mode, uint64_t mac); #endif /* __THUNDERX_NICVF_MBOX__ */ diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index a504d41dfe..49016327a0 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -58,6 +58,10 @@ RTE_LOG_REGISTER_SUFFIX(nicvf_logtype_driver, driver, NOTICE); #define NICVF_QLM_MODE_SGMII 7 #define NICVF_QLM_MODE_XFI 12 +#define BCAST_ACCEPT 0x01 +#define CAM_ACCEPT(1 << 3) +#define BGX_MCAST_MODE(x) ((x) << 1) + enum nicvf_link_speed { NICVF_LINK_SPEED_SGMII, NICVF_LINK_SPEED_XAUI, @@ -2183,9 +2187,22 @@ nicvf_eth_dev_uninit(struct rte_eth_dev *dev) nicvf_dev_close(dev); return 0; } + +static inline uint64_t ether_addr_to_u64(uint8_t *addr) +{ + uint64_t u = 0; + int i; + + for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) + u = u << 8 | addr[i]; + + return u; +} + static int nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) { + uint8_t dmac_ctrl_reg = 0; int ret; struct rte_pci_device *pci_dev; struct nicvf *nic = nicvf_pmd_priv(eth_dev); @@ -2309,6 +2326,15 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) goto malloc_fail; } + /* set DMAC CTRL reg to allow MAC */ + dmac_ctrl_reg = BCAST_ACCEPT | BGX_MCAST_MODE(2) | CAM_ACCEPT; + ret = nicvf_mbox_set_xcast(nic, dmac_ctrl_reg, + ether_addr_to_u64(nic->mac_addr)); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to set mac addr"); + goto malloc_fail; + } + ret = nicvf_set_first_skip(eth_dev); if (ret) { PMD_INIT_LOG(ERR, "Failed to configure first skip"); -- 2.25.1
[PATCH v3 1/3] ethdev: introduce pool sort capability
This patch adds support for the pool sort capability. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With pool sort capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. The following two capabilities are added to the rte_eth_rxseg_capa structure, 1. pool_sort --> tells pool sort capability is supported by HW. 2. max_npool --> max number of pools supported by HW. Defined new structure rte_eth_rxseg_sort, to be used only when pool sort capability is present. If required this may be extended further to support more configurations. Signed-off-by: Hanumanth Pothula v3: - Implemented Pool Sort capability as new Rx offload capability, RTE_ETH_RX_OFFLOAD_BUFFER_SORT. v2: - Along with spec changes, uploading testpmd and driver changes. --- lib/ethdev/rte_ethdev.c | 69 ++--- lib/ethdev/rte_ethdev.h | 24 +- 2 files changed, 88 insertions(+), 5 deletions(-) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1979dc0850..5152c08f1e 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1634,6 +1634,58 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +static int +rte_eth_rx_queue_check_sort(const struct rte_eth_rxseg_sort *rx_seg, +uint16_t n_seg, uint32_t *mbp_buf_size, +const struct rte_eth_dev_info *dev_info) +{ + const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; + uint16_t seg_idx; + + if (!seg_capa->multi_pools || n_seg > seg_capa->max_npool) { + RTE_ETHDEV_LOG(ERR, + "Invalid capabilities, multi_pools:%d different length segments %u exceed supported %u\n", + seg_capa->multi_pools, n_seg, seg_capa->max_nseg); + return -EINVAL; + } + + for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { + struct rte_mempool *mpl = rx_seg[seg_idx].mp; + uint32_t length = rx_seg[seg_idx].length; + + if (mpl == NULL) { + RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); + return -EINVAL; + } + + if (mpl->private_data_size < + sizeof(struct rte_pktmbuf_pool_private)) { + RTE_ETHDEV_LOG(ERR, + "%s private_data_size %u < %u\n", + mpl->name, mpl->private_data_size, + (unsigned int)sizeof + (struct rte_pktmbuf_pool_private)); + return -ENOSPC; + } + + *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); + /* On segment length == 0, update segment's length with +* the pool's length - headeroom space, to make sure enough +* space is accomidate for header. +**/ + length = length != 0 ? length : (*mbp_buf_size - RTE_PKTMBUF_HEADROOM); + if (*mbp_buf_size < length + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u))\n", + mpl->name, *mbp_buf_size, + length); + return -EINVAL; + } + } + + return 0; +} + static int rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, @@ -1764,7 +1816,6 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, return -EINVAL; } } else { - const struct rte_eth_rxseg_split *rx_seg; uint16_t n_seg; /* Extended multi-segment configura
[PATCH v3 2/3] app/testpmd: Add support for pool sort capability
This patch adds support for the pool sort capability. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. Populate Rx Sort/Split attributes based on the Rx offload value. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 31 ++- app/test-pmd/util.c| 4 ++-- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..57f1d806b1 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2661,7 +2661,8 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, int ret; if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT || +rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SORT) == 0) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2670,7 +2671,8 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, goto exit; } for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + struct rte_eth_rxseg_split *rx_split = &rx_useg[i].split; + struct rte_eth_rxseg_sort *rx_sort = &rx_useg[i].sort; struct rte_mempool *mpx; /* * Use last valid pool for the segments with number @@ -2678,13 +2680,24 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size +* to make sure enough space is accomidate for header. +*/ + rx_split->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_split->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_split->mp = mpx ? mpx : mp; + } else if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SORT) { + rx_sort->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_sort->mp = mpx ? mpx : mp; + } } rx_conf->rx_nseg = rx_pkt_nb_segs; rx_conf->rx_seg = rx_useg; diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index fd98e8b51d..f9df5f69ef 100644 --- a/app/test-pmd/util.c +++ b/app/test-pmd/util.c @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], print_ether_addr(" - dst=", ð_hdr->dst_addr, print_buf, buf_size, &cur_len); MKDUMPSTR(print_buf, buf_size, cur_len, - " - type=0x%04x - length=%u - nb_segs=%d", - eth_type, (unsigned int) mb->pkt_len, + " - pool=%s - type=0x%04x - length=%u - nb_segs=%d", + mb->pool->name, eth_type, (unsigned int) mb->pkt_len, (int)mb->nb_segs); ol_flags = mb->ol_flags; if (ol_flags & RTE_MBUF_F_RX_RSS_HASH) { -- 2.25.1
[PATCH v3 3/3] net/cnxk: introduce pool sort capability
Presently, HW is programmed only to receive packets from LPB pool. Making all packets received from LPB pool. But, CNXK HW supports two pools, - SPB -> packets with smaller size (less than 4K) - LPB -> packets with bigger size (greater than 4K) Patch enables pool sorting capability, pool is selected based on packet's length. So, basically, PMD programs HW for receiving packets from both SPB and LPB pools based on the packet's length. This is achieved by enabling rx buffer split offload, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. This allows the application to send more than one pool(in our case two) to the driver, with different segment(packet) lengths, which helps the driver to configure both pools based on segment lengths. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. Signed-off-by: Hanumanth Pothula --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + drivers/net/cnxk/cnxk_ethdev.c| 93 --- drivers/net/cnxk/cnxk_ethdev.h| 4 +- drivers/net/cnxk/cnxk_ethdev_ops.c| 7 ++ 5 files changed, 96 insertions(+), 10 deletions(-) diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 1876fe86c7..e1584ed740 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +pool sort = Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 5d0976e6ce..a63d35aae7 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +pool sort = Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index cfcc4df916..376c5274d3 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -537,6 +537,64 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) plt_free(txq_sp); } +static int +cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, +struct rte_mempool **lpb_pool, struct rte_mempool **spb_pool, +uint16_t *lpb_len, uint16_t *spb_len) +{ + struct rte_eth_rxseg_sort rx_seg0; + struct rte_eth_rxseg_sort rx_seg1; + const char *platform_ops; + struct rte_mempool_ops *ops; + + if (*lpb_pool || !rx_conf->rx_seg || rx_conf->rx_nseg != CNXK_NIX_NUM_POOLS_MAX || + !rx_conf->rx_seg[0].sort.mp || !rx_conf->rx_seg[1].sort.mp) { + plt_err("invalid arguments"); + return -EINVAL; + } + + rx_seg0 = rx_conf->rx_seg[0].sort; + rx_seg1 = rx_conf->rx_seg[1].sort; + + if (rx_seg0.length >= rx_seg0.mp->elt_size || rx_seg1.length >= rx_seg1.mp->elt_size) { + plt_err("mismatch in packet length & pool length seg0_len:%u pool0_len:%u"\ + "seg1_len:%u pool1_len:%u", rx_seg0.length, rx_seg0.mp->elt_size, + rx_seg1.length, rx_seg1.mp->elt_size); + return -EINVAL; + } + + if (rx_seg0.length > rx_seg1.length) { + *lpb_pool = rx_seg0.mp; + *spb_pool = rx_seg1.mp; + + *lpb_len = rx_seg0.length; + *spb_len = rx_seg1.length; + } else { + *lpb_pool = rx_seg1.mp; + *spb_pool = rx_seg0.mp; + + *lpb_len = rx_seg1.length; + *spb_len = rx_seg0.length; + } + + if ((*spb_pool)->pool_id == 0) { + plt_err("Invalid pool_id"); + return -EINVAL; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + ops = rte_mempool_get_ops((*spb_pool)->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + return -EINVAL; + } + + plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, +(*lpb_pool)->name, *lpb_len, *spb_len); + + return 0; +} + int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint32_t nb_desc, uint16_t fp_rx_q_sz, @@ -553,6 +611,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t first_skip; int rc = -EINVAL; size_t rxq_sz; + uint16_t lpb_len = 0; + ui
[PATCH v4 1/3] ethdev: Add support for mulitiple mbuf pools per Rx queue
This patch adds support for multiple mempool capability. Some of the HW has support for choosing memory pools based on the packet's size. Thiscapability allows PMD to choose a memory pool based on the packet's length. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With multiple mempool capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on the packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. Signed-off-by: Hanumanth Pothula v4: - Renamed Offload capability name from RTE_ETH_RX_OFFLOAD_BUFFER_SORT to RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. - In struct rte_eth_rxconf, defined new pointer, which holds array of type struct rte_eth_rx_mempool(memory pools). This array is used by PMD to program multiple mempools. v3: - Implemented Pool Sort capability as new Rx offload capability, RTE_ETH_RX_OFFLOAD_BUFFER_SORT. v2: - Along with spec changes, uploading testpmd and driver changes. --- lib/ethdev/rte_ethdev.c | 78 ++--- lib/ethdev/rte_ethdev.h | 24 + 2 files changed, 89 insertions(+), 13 deletions(-) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1979dc0850..8618d6b01d 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1634,6 +1634,45 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +static int +rte_eth_rx_queue_check_mempool(const struct rte_eth_rx_mempool *rx_mempool, + uint16_t n_pool, uint32_t *mbp_buf_size, + const struct rte_eth_dev_info *dev_info) +{ + uint16_t pool_idx; + + if (n_pool > dev_info->max_pools) { + RTE_ETHDEV_LOG(ERR, + "Invalid capabilities, max pools supported %u\n", + dev_info->max_pools); + return -EINVAL; + } + + for (pool_idx = 0; pool_idx < n_pool; pool_idx++) { + struct rte_mempool *mpl = rx_mempool[pool_idx].mp; + + if (mpl == NULL) { + RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); + return -EINVAL; + } + + *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); + if (*mbp_buf_size < dev_info->min_rx_bufsize + + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", + mpl->name, *mbp_buf_size, + RTE_PKTMBUF_HEADROOM + dev_info->min_rx_bufsize, + RTE_PKTMBUF_HEADROOM, + dev_info->min_rx_bufsize); + return -EINVAL; + } + + } + + return 0; +} + static int rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, @@ -1733,7 +1772,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, if (mp != NULL) { /* Single pool configuration check. */ - if (rx_conf != NULL && rx_conf->rx_nseg != 0) { + if (rx_conf != NULL && + (rx_conf->rx_nseg != 0 || rx_conf->rx_npool)) { RTE_ETHDEV_LOG(ERR, "Ambiguous segment configuration\n"); return -EINVAL; @@ -1763,30 +1803,42 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, dev_info.min_rx_bufsize); return -EINVAL; } - } else { - const struct rte_eth_rxseg_split *rx_seg; - uint16_t n_seg; + } else if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT || + rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { - /* Extended multi-segment configuration check. */ - if (rx_conf == NULL || rx_conf->rx_seg == NULL || rx_conf->rx_nseg == 0) { + /* Extende
[PATCH v4 2/3] app/testpmd: Add support for mulitiple mbuf pools per Rx queue
This patch adds support for the mulitiple mempool. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array and also print pool name on which packet is received. Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 41 + app/test-pmd/testpmd.h | 3 +++ app/test-pmd/util.c| 4 ++-- 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 77741fc41f..d16a552e6d 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2624,11 +2624,13 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_eth_rx_mempool rx_mempool[MAX_MEMPOOL] = {}; unsigned int i, mp_n; int ret; if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT || +rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) == 0) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2637,7 +2639,8 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, goto exit; } for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + struct rte_eth_rxseg_split *rx_split = &rx_useg[i].split; + struct rte_eth_rx_mempool *mempool = &rx_mempool[i]; struct rte_mempool *mpx; /* * Use last valid pool for the segments with number @@ -2645,16 +2648,30 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size +* to make sure enough space is accomidate for header. +*/ + rx_split->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_split->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_split->mp = mpx ? mpx : mp; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) + mempool->mp = mpx ? mpx : mp; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + rx_conf->rx_mempool = rx_mempool; + rx_conf->rx_npool = rx_pkt_nb_segs; + } ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, NULL); rx_conf->rx_seg = NULL; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index ddf5e21849..15a26171e2 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -82,6 +82,9 @@ extern uint8_t cl_quit; #define MIN_TOTAL_NUM_MBUFS 1024 +/* Maximum number of pools supprted per Rx queue */ +#define MAX_MEMPOOL 8 + typedef uint8_t lcoreid_t; typedef uint16_t portid_t; typedef uint16_t queueid_t; diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index fd98e8b51d..f9df5f69ef 100644 --- a/app/test-pmd/util.c +++ b/app/test-pmd/util.c @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], print_ether_addr(" - dst=", ð_hdr->dst_addr, print_b
[PATCH v4 3/3] net/cnxk: Add support for mulitiple mbuf pools
Presently, HW is programmed only to receive packets from LPB pool. Making all packets received from LPB pool. But, CNXK HW supports two pools, - SPB -> packets with smaller size (less than 4K) - LPB -> packets with bigger size (greater than 4K) Patch enables multiple mempool capability, pool is selected based on the packet's length. So, basically, PMD programs HW for receiving packets from both SPB and LPB pools based on the packet's length. This is achieved by enabling rx multiple mempool offload, RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. This allows the application to send more than one pool(in our case two) to the driver, with different segment(packet) lengths, which helps the driver to configure both pools based on segment lengths. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. Signed-off-by: Hanumanth Pothula --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + drivers/net/cnxk/cnxk_ethdev.c| 77 +++ drivers/net/cnxk/cnxk_ethdev.h| 4 +- drivers/net/cnxk/cnxk_ethdev_ops.c| 3 ++ 5 files changed, 76 insertions(+), 10 deletions(-) diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 1876fe86c7..ed778ba398 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +multiple mempools= Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 5d0976e6ce..c2270fe338 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +multiple mempools= Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index a089cc463b..5c962d6388 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -537,6 +537,51 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) plt_free(txq_sp); } +static int +cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, +struct rte_mempool **lpb_pool, struct rte_mempool **spb_pool) +{ + struct rte_mempool *pool0; + struct rte_mempool *pool1; + const char *platform_ops; + struct rte_mempool_ops *ops; + + if (*lpb_pool || !rx_conf->rx_mempool || + rx_conf->rx_npool != CNXK_NIX_NUM_POOLS_MAX) { + plt_err("invalid arguments"); + return -EINVAL; + } + + pool0 = rx_conf->rx_mempool[0].mp; + pool1 = rx_conf->rx_mempool[1].mp; + + if (pool0->elt_size > pool1->elt_size) { + *lpb_pool = pool0; + *spb_pool = pool1; + + } else { + *lpb_pool = pool1; + *spb_pool = pool0; + } + + if ((*spb_pool)->pool_id == 0) { + plt_err("Invalid pool_id"); + return -EINVAL; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + ops = rte_mempool_get_ops((*spb_pool)->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + return -EINVAL; + } + + plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, +(*lpb_pool)->name, (*lpb_pool)->elt_size, (*spb_pool)->elt_size); + + return 0; +} + int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint32_t nb_desc, uint16_t fp_rx_q_sz, @@ -553,6 +598,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t first_skip; int rc = -EINVAL; size_t rxq_sz; + struct rte_mempool *lpb_pool = mp; + struct rte_mempool *spb_pool = NULL; /* Sanity checks */ if (rx_conf->rx_deferred_start == 1) { @@ -560,15 +607,21 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, goto fail; } + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + rc = cnxk_nix_process_rx_conf(rx_conf, &lpb_pool, &spb_pool); + if (rc) + goto fail; + } + platform_ops = rte_mbuf_platform_mempool_ops(); /* This driver needs cnxk_npa mempool ops to work */ - ops = rte_mempool_get_ops(mp->ops_index); + ops = rte_mempool_get_ops(lpb_pool->ops_index);
[PATCH v5 1/3] ethdev: support mulitiple mbuf pools per Rx queue
This patch adds support for multiple mempool capability. Some of the HW has support for choosing memory pools based on the packet's size. The capability allows PMD to choose a memory pool based on the packet's length. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With multiple mempool capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on the packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. Signed-off-by: Hanumanth Pothula v5: - Declared memory pools as struct rte_mempool **rx_mempools rather than as struct rte_mempool *mp. - Added the feature in release notes. - Updated conditions and strings as per review comments. v4: - Renamed Offload capability name from RTE_ETH_RX_OFFLOAD_BUFFER_SORT to RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. - In struct rte_eth_rxconf, defined new pointer, which holds array of type struct rte_eth_rx_mempool(memory pools). This array is used by PMD to program multiple mempools. v3: - Implemented Pool Sort capability as new Rx offload capability, RTE_ETH_RX_OFFLOAD_BUFFER_SORT. v2: - Along with spec changes, uploading testpmd and driver changes. --- doc/guides/rel_notes/release_22_11.rst | 6 +++ lib/ethdev/rte_ethdev.c| 74 ++ lib/ethdev/rte_ethdev.h| 22 3 files changed, 92 insertions(+), 10 deletions(-) diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst index 2e076ba2ad..26ca22efe0 100644 --- a/doc/guides/rel_notes/release_22_11.rst +++ b/doc/guides/rel_notes/release_22_11.rst @@ -55,6 +55,12 @@ New Features Also, make sure to start the actual text at the margin. === +* ** Added support ethdev support for mulitiple mbuf pools per Rx queue.** + + * Added new Rx offload flag ''RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL'' to support +mulitiple mbuf pools per Rx queue. Thisi capability allows PMD to choose +a memory pool based on the packet's length + * **Updated Wangxun ngbe driver.** * Added support to set device link down/up. diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1979dc0850..eed4834e6b 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1634,6 +1634,44 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +static int +rte_eth_rx_queue_check_mempool(struct rte_mempool **rx_mempool, + uint16_t n_pool, uint32_t *mbp_buf_size, + const struct rte_eth_dev_info *dev_info) +{ + uint16_t pool_idx; + + if (n_pool > dev_info->max_pools) { + RTE_ETHDEV_LOG(ERR, + "Too many Rx mempools %u vs maximum %u\n", + n_pool, dev_info->max_pools); + return -EINVAL; + } + + for (pool_idx = 0; pool_idx < n_pool; pool_idx++) { + struct rte_mempool *mpl = rx_mempool[pool_idx]; + + if (mpl == NULL) { + RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); + return -EINVAL; + } + + *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); + if (*mbp_buf_size < dev_info->min_rx_bufsize + + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", + mpl->name, *mbp_buf_size, + RTE_PKTMBUF_HEADROOM + dev_info->min_rx_bufsize, + RTE_PKTMBUF_HEADROOM, + dev_info->min_rx_bufsize); + return -EINVAL; + } + } + + return 0; +} + static int rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, @@ -1733,9 +1771,12 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, if (mp != NULL) { /* Single pool configuration check. */ -
[PATCH v5 2/3] net/cnxk: support mulitiple mbuf pools per Rx queue
Presently, HW is programmed only to receive packets from LPB pool. Making all packets received from LPB pool. But, CNXK HW supports two pools, - SPB -> packets with smaller size (less than 4K) - LPB -> packets with bigger size (greater than 4K) Patch enables multiple mempool capability, pool is selected based on the packet's length. So, basically, PMD programs HW for receiving packets from both SPB and LPB pools based on the packet's length. This is achieved by enabling rx multiple mempool offload, RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. This allows the application to send more than one pool(in our case two) to the driver, with different segment(packet) lengths, which helps the driver to configure both pools based on segment lengths. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. Signed-off-by: Hanumanth Pothula --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + drivers/net/cnxk/cnxk_ethdev.c| 84 --- drivers/net/cnxk/cnxk_ethdev.h| 4 +- drivers/net/cnxk/cnxk_ethdev_ops.c| 3 + 5 files changed, 83 insertions(+), 10 deletions(-) diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 1876fe86c7..ed778ba398 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +multiple mempools= Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 5d0976e6ce..c2270fe338 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +multiple mempools= Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index ce896338d9..6d525036ff 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -541,6 +541,58 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) plt_free(txq_sp); } +static int +cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, +struct rte_mempool **lpb_pool, +struct rte_mempool **spb_pool) +{ + struct rte_mempool *pool0; + struct rte_mempool *pool1; + struct rte_mempool **mp = rx_conf->rx_mempools; + const char *platform_ops; + struct rte_mempool_ops *ops; + + if (*lpb_pool || + rx_conf->rx_npool != CNXK_NIX_NUM_POOLS_MAX) { + plt_err("invalid arguments"); + return -EINVAL; + } + + if (mp == NULL || mp[0] == NULL || mp[1] == NULL) { + plt_err("invalid memory pools\n"); + return -EINVAL; + } + + pool0 = mp[0]; + pool1 = mp[1]; + + if (pool0->elt_size > pool1->elt_size) { + *lpb_pool = pool0; + *spb_pool = pool1; + + } else { + *lpb_pool = pool1; + *spb_pool = pool0; + } + + if ((*spb_pool)->pool_id == 0) { + plt_err("Invalid pool_id"); + return -EINVAL; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + ops = rte_mempool_get_ops((*spb_pool)->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + return -EINVAL; + } + + plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, +(*lpb_pool)->name, (*lpb_pool)->elt_size, (*spb_pool)->elt_size); + + return 0; +} + int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint32_t nb_desc, uint16_t fp_rx_q_sz, @@ -557,6 +609,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t first_skip; int rc = -EINVAL; size_t rxq_sz; + struct rte_mempool *lpb_pool = mp; + struct rte_mempool *spb_pool = NULL; /* Sanity checks */ if (rx_conf->rx_deferred_start == 1) { @@ -564,15 +618,21 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, goto fail; } + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + rc = cnxk_nix_process_rx_conf(rx_conf, &lpb_pool, &spb_pool); + if (rc) + goto fail; + } + platform_ops = rte_mbuf_platform_mem
[PATCH v5 3/3] app/testpmd: support mulitiple mbuf pools per Rx queue
This patch adds support for the mulitiple mempool. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array and also print pool name on which packet is received. Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 44 ++ app/test-pmd/testpmd.h | 3 +++ app/test-pmd/util.c| 4 ++-- 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 77741fc41f..1dbddf7b43 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2624,11 +2624,13 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; unsigned int i, mp_n; int ret; if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + (rx_conf->offloads & (RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | +RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL)) == 0) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2637,7 +2639,9 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, goto exit; } for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + struct rte_eth_rxseg_split *rx_split = &rx_useg[i].split; + struct rte_mempool *mempool; + struct rte_mempool *mpx; /* * Use last valid pool for the segments with number @@ -2645,16 +2649,32 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size +* to make sure enough space is accomidate for header. +*/ + rx_split->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_split->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_split->mp = mpx ? mpx : mp; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + mempool = mpx ? mpx : mp; + rx_mempool[i] = mempool; + } + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_npool = rx_pkt_nb_segs; + } ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, NULL); rx_conf->rx_seg = NULL; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index ddf5e21849..15a26171e2 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -82,6 +82,9 @@ extern uint8_t cl_quit; #define MIN_TOTAL_NUM_MBUFS 1024 +/* Maximum number of pools supprted per Rx queue */ +#define MAX_MEMPOOL 8 + typedef uint8_t lcoreid_t; typedef uint16_t portid_t; typedef uint16_t queueid_t; diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index fd98e8b51d..f9df5f69ef 100644 --- a/app/test-pmd/util.c +++ b/app/test-pmd/util.c @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], print_ether_addr(" - dst=", ð_hdr->dst_addr, print_buf, buf_size,
[PATCH v6 1/3] ethdev: support mulitiple mbuf pools per Rx queue
This patch adds support for multiple mempool capability. Some of the HW has support for choosing memory pools based on the packet's size. The capability allows PMD to choose a memory pool based on the packet's length. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With multiple mempool capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on the packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. Signed-off-by: Hanumanth Pothula v6: - Updated release notes, release_22_11.rst. v5: - Declared memory pools as struct rte_mempool **rx_mempools rather than as struct rte_mempool *mp. - Added the feature in release notes. - Updated conditions and strings as per review comments. v4: - Renamed Offload capability name from RTE_ETH_RX_OFFLOAD_BUFFER_SORT to RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. - In struct rte_eth_rxconf, defined new pointer, which holds array of type struct rte_eth_rx_mempool(memory pools). This array is used by PMD to program multiple mempools. v3: - Implemented Pool Sort capability as new Rx offload capability, RTE_ETH_RX_OFFLOAD_BUFFER_SORT. v2: - Along with spec changes, uploading testpmd and driver changes. --- doc/guides/rel_notes/release_22_11.rst | 6 +++ lib/ethdev/rte_ethdev.c| 74 ++ lib/ethdev/rte_ethdev.h| 22 3 files changed, 92 insertions(+), 10 deletions(-) diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst index 2e076ba2ad..8bb19155d9 100644 --- a/doc/guides/rel_notes/release_22_11.rst +++ b/doc/guides/rel_notes/release_22_11.rst @@ -55,6 +55,12 @@ New Features Also, make sure to start the actual text at the margin. === +* ** Added support ethdev support for mulitiple mbuf pools per Rx queue.** + + * Added new Rx offload flag ``RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL`` to support +mulitiple mbuf pools per Rx queue. Thisi capability allows PMD to choose +a memory pool based on the packet's length + * **Updated Wangxun ngbe driver.** * Added support to set device link down/up. diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1979dc0850..eed4834e6b 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1634,6 +1634,44 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +static int +rte_eth_rx_queue_check_mempool(struct rte_mempool **rx_mempool, + uint16_t n_pool, uint32_t *mbp_buf_size, + const struct rte_eth_dev_info *dev_info) +{ + uint16_t pool_idx; + + if (n_pool > dev_info->max_pools) { + RTE_ETHDEV_LOG(ERR, + "Too many Rx mempools %u vs maximum %u\n", + n_pool, dev_info->max_pools); + return -EINVAL; + } + + for (pool_idx = 0; pool_idx < n_pool; pool_idx++) { + struct rte_mempool *mpl = rx_mempool[pool_idx]; + + if (mpl == NULL) { + RTE_ETHDEV_LOG(ERR, "null Rx mempool pointer\n"); + return -EINVAL; + } + + *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); + if (*mbp_buf_size < dev_info->min_rx_bufsize + + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u (RTE_PKTMBUF_HEADROOM=%u + min_rx_bufsize(dev)=%u)\n", + mpl->name, *mbp_buf_size, + RTE_PKTMBUF_HEADROOM + dev_info->min_rx_bufsize, + RTE_PKTMBUF_HEADROOM, + dev_info->min_rx_bufsize); + return -EINVAL; + } + } + + return 0; +} + static int rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, @@ -1733,9 +1771,12 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, if (mp != NULL) { /* Single pool confi
[PATCH v6 2/3] net/cnxk: support mulitiple mbuf pools per Rx queue
Presently, HW is programmed only to receive packets from LPB pool. Making all packets received from LPB pool. But, CNXK HW supports two pools, - SPB -> packets with smaller size (less than 4K) - LPB -> packets with bigger size (greater than 4K) Patch enables multiple mempool capability, pool is selected based on the packet's length. So, basically, PMD programs HW for receiving packets from both SPB and LPB pools based on the packet's length. This is achieved by enabling rx multiple mempool offload, RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL. This allows the application to send more than one pool(in our case two) to the driver, with different segment(packet) lengths, which helps the driver to configure both pools based on segment lengths. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. Signed-off-by: Hanumanth Pothula --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + drivers/net/cnxk/cnxk_ethdev.c| 84 --- drivers/net/cnxk/cnxk_ethdev.h| 4 +- drivers/net/cnxk/cnxk_ethdev_ops.c| 3 + 5 files changed, 83 insertions(+), 10 deletions(-) diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 1876fe86c7..ed778ba398 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +multiple mempools= Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 5d0976e6ce..c2270fe338 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +multiple mempools= Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index ce896338d9..6d525036ff 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -541,6 +541,58 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) plt_free(txq_sp); } +static int +cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, +struct rte_mempool **lpb_pool, +struct rte_mempool **spb_pool) +{ + struct rte_mempool *pool0; + struct rte_mempool *pool1; + struct rte_mempool **mp = rx_conf->rx_mempools; + const char *platform_ops; + struct rte_mempool_ops *ops; + + if (*lpb_pool || + rx_conf->rx_npool != CNXK_NIX_NUM_POOLS_MAX) { + plt_err("invalid arguments"); + return -EINVAL; + } + + if (mp == NULL || mp[0] == NULL || mp[1] == NULL) { + plt_err("invalid memory pools\n"); + return -EINVAL; + } + + pool0 = mp[0]; + pool1 = mp[1]; + + if (pool0->elt_size > pool1->elt_size) { + *lpb_pool = pool0; + *spb_pool = pool1; + + } else { + *lpb_pool = pool1; + *spb_pool = pool0; + } + + if ((*spb_pool)->pool_id == 0) { + plt_err("Invalid pool_id"); + return -EINVAL; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + ops = rte_mempool_get_ops((*spb_pool)->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + return -EINVAL; + } + + plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, +(*lpb_pool)->name, (*lpb_pool)->elt_size, (*spb_pool)->elt_size); + + return 0; +} + int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint32_t nb_desc, uint16_t fp_rx_q_sz, @@ -557,6 +609,8 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t first_skip; int rc = -EINVAL; size_t rxq_sz; + struct rte_mempool *lpb_pool = mp; + struct rte_mempool *spb_pool = NULL; /* Sanity checks */ if (rx_conf->rx_deferred_start == 1) { @@ -564,15 +618,21 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, goto fail; } + if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + rc = cnxk_nix_process_rx_conf(rx_conf, &lpb_pool, &spb_pool); + if (rc) + goto fail; + } + platform_ops = rte_mbuf_platform_mem
[PATCH v6 3/3] app/testpmd: support mulitiple mbuf pools per Rx queue
This patch adds support for the mulitiple mempool. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array and also print pool name on which packet is received. Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 44 ++ app/test-pmd/testpmd.h | 3 +++ app/test-pmd/util.c| 4 ++-- 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 77741fc41f..1dbddf7b43 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2624,11 +2624,13 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; unsigned int i, mp_n; int ret; if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + (rx_conf->offloads & (RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT | +RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL)) == 0) { rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2637,7 +2639,9 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, goto exit; } for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + struct rte_eth_rxseg_split *rx_split = &rx_useg[i].split; + struct rte_mempool *mempool; + struct rte_mempool *mpx; /* * Use last valid pool for the segments with number @@ -2645,16 +2649,32 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size +* to make sure enough space is accomidate for header. +*/ + rx_split->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n] - RTE_PKTMBUF_HEADROOM; + rx_split->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_split->mp = mpx ? mpx : mp; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + mempool = mpx ? mpx : mp; + rx_mempool[i] = mempool; + } + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } + if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_MUL_MEMPOOL) { + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_npool = rx_pkt_nb_segs; + } ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, socket_id, rx_conf, NULL); rx_conf->rx_seg = NULL; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index ddf5e21849..15a26171e2 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -82,6 +82,9 @@ extern uint8_t cl_quit; #define MIN_TOTAL_NUM_MBUFS 1024 +/* Maximum number of pools supprted per Rx queue */ +#define MAX_MEMPOOL 8 + typedef uint8_t lcoreid_t; typedef uint16_t portid_t; typedef uint16_t queueid_t; diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c index fd98e8b51d..f9df5f69ef 100644 --- a/app/test-pmd/util.c +++ b/app/test-pmd/util.c @@ -150,8 +150,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], print_ether_addr(" - dst=", ð_hdr->dst_addr, print_buf, buf_size,
[PATCH v3 1/1] app/testpmd: control passing Rx metadata to PMD
Presently, Rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in rx path takes extra cycles. Hence, introducing command line argument, 'nic-to-pmd-rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. Signed-off-by: Hanumanth Pothula v3: - Updated run_app.rst with the new command line argument, nic-to-pmd-rx-metadata. - Updated commit text. v2: - taken cared alignment issues - renamed command line argument from rx-metadata to nic-to-pmd-rx-metadata - renamed variable name from rx-metadata to nic_to_pmd_rx_metadata --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ doc/guides/testpmd_app_ug/run_app.rst | 3 +++ 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index cfd7cd1e50..2b1314771f 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -202,6 +202,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --nic-to-pmd-rx-metadata: let the NIC deliver per-packet Rx metadata to PMD\n"); } #ifdef RTE_LIB_CMDLINE @@ -695,6 +696,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "nic-to-pmd-rx-metadata", 0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1434,6 +1436,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "nic-to-pmd-rx-metadata")) + nic_to_pmd_rx_metadata = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 77741fc41f..938072bb88 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* Send Rx metadata */ +uint8_t nic_to_pmd_rx_metadata; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1595,7 +1598,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (nic_to_pmd_rx_metadata) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index ddf5e21849..8522024364 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -615,6 +615,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t nic_to_pmd_rx_metadata; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index 8b41b960c8..24e2e8aa69 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -539,6 +539,9 @@ The command line options are: The default value is 0. Hairpin will use single port mode and implicit Tx flow mode. +* ``--nic_to_pmd_rx_metadata`` + +Enable passing Rx metadata to PMD. Testpmd Multi-Process Command-line Options ~~ -- 2.25.1
[PATCH v12 1/1] app/testpmd: support multiple mbuf pools per Rx queue
Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array accordingly. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula v12: - Process multi-segment configuration on number segments (rx_pkt_nb_segs) greater than 1 or buffer split offload flag (RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) set. v11: - Resolve compilation and warning. v10: - Populate multi-mempool array based on mbuf_data_size_n instead of rx_pkt_nb_segs. --- app/test-pmd/testpmd.c | 61 +++--- app/test-pmd/testpmd.h | 3 +++ app/test-pmd/util.c| 4 +-- 3 files changed, 44 insertions(+), 24 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 5b0f0838dc..cb3b6be8db 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2647,11 +2647,19 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; + struct rte_mempool *mpx; unsigned int i, mp_n; int ret; - if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + /* Verify Rx queue configuration is single pool and segment or +* multiple pool/segment. +* @see rte_eth_rxconf::rx_mempools +* @see rte_eth_rxconf::rx_seg +*/ + if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { + /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2659,33 +2667,42 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, rx_conf, mp); goto exit; } - for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; - struct rte_mempool *mpx; - /* -* Use last valid pool for the segments with number -* exceeding the pool index. -*/ - mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; - mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { - rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; - } else { + if (rx_pkt_nb_segs > 1 || + rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /* multi-segment configuration */ + for (i = 0; i < rx_pkt_nb_segs; i++) { + struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + /* +* Use last valid pool for the segments with number +* exceeding the pool index. +*/ + mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; + mpx = mbuf_pool_find(socket_id, mp_n); + /* Handle zero as mbuf data buffer size. */ rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; +rx_pkt_seg_lengths[i] : +mbuf_data_size[mp_n]; + rx_seg->offset = i < rx_pkt_nb_offs ? +rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + } + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + } else { + /* multi-pool configuration */ + for (i = 0; i < mbuf_data_size_n; i++) { + mpx = mbuf_pool_find(socket_id, i); + rx_mempool[i] = mpx ? mpx : mp; } + rx_conf->rx_mempools = rx_mempool; + rx_conf->rx_nmempool = mbuf_data_size_n; } - rx_conf->rx_nseg = rx_pkt_nb_segs; - rx_conf->rx_seg = rx_useg; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
[PATCH v13 1/1] app/testpmd: support multiple mbuf pools per Rx queue
Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array accordingly. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula v13: - Make sure protocol-based header split feature is not broken by updating changes with latest code base. v12: - Process multi-segment configuration on number segments (rx_pkt_nb_segs) greater than 1 or buffer split offload flag (RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) set. v11: - Resolve compilation and warning. v10: - Populate multi-mempool array based on mbuf_data_size_n instead of rx_pkt_nb_segs. --- app/test-pmd/testpmd.c | 65 -- app/test-pmd/testpmd.h | 3 ++ app/test-pmd/util.c| 4 +-- 3 files changed, 48 insertions(+), 24 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 5b0f0838dc..78ea19fcbb 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2647,11 +2647,19 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; + struct rte_mempool *mpx; unsigned int i, mp_n; int ret; - if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + /* Verify Rx queue configuration is single pool and segment or +* multiple pool/segment. +* @see rte_eth_rxconf::rx_mempools +* @see rte_eth_rxconf::rx_seg +*/ + if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { + /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2659,33 +2667,46 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, rx_conf, mp); goto exit; } - for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; - struct rte_mempool *mpx; - /* -* Use last valid pool for the segments with number -* exceeding the pool index. -*/ - mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; - mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { - rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; - } else { - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; + + if (rx_pkt_nb_segs > 1 || + rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + for (i = 0; i < rx_pkt_nb_segs; i++) { + struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + /* +* Use last valid pool for the segments with number +* exceeding the pool index. +*/ + mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; + mpx = mbuf_pool_find(socket_id, mp_n); + /* Handle zero as mbuf data buffer size. */ + rx_seg->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { + rx_seg->proto_hdr = rx_pkt_hdr_protos[i]; + } else { + rx_seg->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n]; + } } - } rx_conf->rx_nseg = rx_pkt_nb_segs; rx_conf->rx_seg = rx_useg; + } else { + /* multi-pool configuration */ + for (i = 0; i < mbuf_data_size_n; i++) { + mpx = mbuf_pool_find(socket
[PATCH v14 1/1] app/testpmd: support multiple mbuf pools per Rx queue
Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD/NIC to choose a memory pool based on the packet's length. On multiple mempool support enabled, populate mempool array accordingly. Also, print pool name on which packet is received. Signed-off-by: Hanumanth Pothula v14: - Rebased on tip of next-net/main v13: - Make sure protocol-based header split feature is not broken by updating changes with latest code base. v12: - Process multi-segment configuration on number segments (rx_pkt_nb_segs) greater than 1 or buffer split offload flag (RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) set. v11: - Resolve compilation and warning. v10: - Populate multi-mempool array based on mbuf_data_size_n instead of rx_pkt_nb_segs. --- app/test-pmd/testpmd.c | 70 +++--- app/test-pmd/testpmd.h | 3 ++ app/test-pmd/util.c| 4 +-- 3 files changed, 51 insertions(+), 26 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index d494870e59..ef281ccd20 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2653,12 +2653,20 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; + struct rte_mempool *mpx; unsigned int i, mp_n; uint32_t prev_hdrs = 0; int ret; - if (rx_pkt_nb_segs <= 1 || - (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + /* Verify Rx queue configuration is single pool and segment or +* multiple pool/segment. +* @see rte_eth_rxconf::rx_mempools +* @see rte_eth_rxconf::rx_seg +*/ + if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { + /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, @@ -2666,34 +2674,48 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, rx_conf, mp); goto exit; } - for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; - struct rte_mempool *mpx; - /* -* Use last valid pool for the segments with number -* exceeding the pool index. -*/ - mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; - mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; - if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { - rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs; - prev_hdrs |= rx_seg->proto_hdr; - } else { - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; + + if (rx_pkt_nb_segs > 1 || + rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + /* multi-segment configuration */ + for (i = 0; i < rx_pkt_nb_segs; i++) { + struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + /* +* Use last valid pool for the segments with number +* exceeding the pool index. +*/ + mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; + mpx = mbuf_pool_find(socket_id, mp_n); + /* Handle zero as mbuf data buffer size. */ + rx_seg->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + if (rx_pkt_hdr_protos[i] != 0 && rx_pkt_seg_lengths[i] == 0) { + rx_seg->proto_hdr = rx_pkt_hdr_protos[i] & ~prev_hdrs; + prev_hdrs |= rx_seg->proto_hdr; + } else { + rx_seg->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n]; + } + } +
[PATCH v1 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know wheater device supports multi-mempool feature or not. Bugzilla ID: 1128 Signed-off-by: Hanumanth Pothula --- app/test-pmd/testpmd.c | 8 +++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 78ea19fcbb..79c0951b62 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2648,16 +2648,22 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, { union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; + struct rte_eth_dev_info dev_info; struct rte_mempool *mpx; unsigned int i, mp_n; int ret; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + /* Verify Rx queue configuration is single pool and segment or * multiple pool/segment. +* @see rte_eth_dev_info::max_rx_mempools * @see rte_eth_rxconf::rx_mempools * @see rte_eth_rxconf::rx_seg */ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + if (!(dev_info.max_rx_mempools != 0) && !(rx_pkt_nb_segs > 1 || ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; -- 2.25.1
[PATCH v2 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know wheater device supports multi-mempool feature or not. Bugzilla ID: 1128 Signed-off-by: Hanumanth Pothula v2: - Rebased on tip of next-net/main --- app/test-pmd/testpmd.c | 8 +++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..fd634bd5e6 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2655,16 +2655,22 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; struct rte_mempool *mpx; + struct rte_eth_dev_info dev_info; unsigned int i, mp_n; uint32_t prev_hdrs = 0; int ret; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + /* Verify Rx queue configuration is single pool and segment or * multiple pool/segment. +* @see rte_eth_dev_info::max_rx_mempools * @see rte_eth_rxconf::rx_mempools * @see rte_eth_rxconf::rx_seg */ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + if (!(dev_info.max_rx_mempools != 0) && !(rx_pkt_nb_segs > 1 || ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; -- 2.25.1
[PATCH v3 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know whether device supports multi-mempool feature or not. Bugzilla ID: 1128 Signed-off-by: Hanumanth Pothula v3: - Simplified conditional check. - Corrected spell, whether. v2: - Rebased on tip of next-net/main. --- app/test-pmd/testpmd.c | 8 +++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..6c3d0948ec 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2655,16 +2655,22 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; struct rte_mempool *mpx; + struct rte_eth_dev_info dev_info; unsigned int i, mp_n; uint32_t prev_hdrs = 0; int ret; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + /* Verify Rx queue configuration is single pool and segment or * multiple pool/segment. +* @see rte_eth_dev_info::max_rx_mempools * @see rte_eth_rxconf::rx_mempools * @see rte_eth_rxconf::rx_seg */ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || + if ((dev_info.max_rx_mempools == 0) && !(rx_pkt_nb_segs > 1 || ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; -- 2.25.1
[PATCH v3 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know whether device supports multi-mempool feature or not. Bugzilla ID: 1128 Signed-off-by: Hanumanth Pothula v4: - updated if condition. v3: - Simplified conditional check. - Corrected spell, whether. v2: - Rebased on tip of next-net/main. --- app/test-pmd/testpmd.c | 10 -- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..9fc14e6d6b 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2655,17 +2655,23 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; struct rte_mempool *mpx; + struct rte_eth_dev_info dev_info; unsigned int i, mp_n; uint32_t prev_hdrs = 0; int ret; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + /* Verify Rx queue configuration is single pool and segment or * multiple pool/segment. +* @see rte_eth_dev_info::max_rx_mempools * @see rte_eth_rxconf::rx_mempools * @see rte_eth_rxconf::rx_seg */ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || - ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { + if ((dev_info.max_rx_mempools == 0) && (rx_pkt_nb_segs < 1 || + ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0))) { /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; -- 2.25.1
[PATCH v4 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know whether device supports multi-mempool feature or not. Bugzilla ID: 1128 Signed-off-by: Hanumanth Pothula v4: - updated if condition. v3: - Simplified conditional check. - Corrected spell, whether. v2: - Rebased on tip of next-net/main. --- app/test-pmd/testpmd.c | 10 -- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..c1b4dbd716 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -2655,17 +2655,23 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; struct rte_mempool *mpx; + struct rte_eth_dev_info dev_info; unsigned int i, mp_n; uint32_t prev_hdrs = 0; int ret; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + /* Verify Rx queue configuration is single pool and segment or * multiple pool/segment. +* @see rte_eth_dev_info::max_rx_mempools * @see rte_eth_rxconf::rx_mempools * @see rte_eth_rxconf::rx_seg */ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || - ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { + if ((dev_info.max_rx_mempools == 0) && (rx_pkt_nb_segs <= 1 || + ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0))) { /* Single pool/segment configuration */ rx_conf->rx_seg = NULL; rx_conf->rx_nseg = 0; -- 2.25.1
[PATCH v5 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know whether device supports multi-mempool feature or not. Also, add new testpmd command line argument, multi-mempool, to control multi-mempool feature. By default its disabled. Bugzilla ID: 1128 Fixes: 4f04edcda769 ("app/testpmd: support multiple mbuf pools per Rx queue") Signed-off-by: Hanumanth Pothula --- v5: - Added testpmd argument to enable multi-mempool feature. - Simplified logic to distinguish between multi-mempool, multi-segment and single pool/segment. v4: - updated if condition. v3: - Simplified conditional check. - Corrected spell, whether. v2: - Rebased on tip of next-net/main. --- app/test-pmd/parameters.c | 3 ++ app/test-pmd/testpmd.c| 58 +-- app/test-pmd/testpmd.h| 1 + 3 files changed, 41 insertions(+), 21 deletions(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index aed4cdcb84..26d6450db4 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -700,6 +700,7 @@ launch_args_parse(int argc, char** argv) { "rx-mq-mode", 1, 0, 0 }, { "record-core-cycles", 0, 0, 0 }, { "record-burst-stats", 0, 0, 0 }, + { "multi-mempool", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, { 0, 0, 0, 0 }, @@ -1449,6 +1450,8 @@ launch_args_parse(int argc, char** argv) record_core_cycles = 1; if (!strcmp(lgopts[opt_idx].name, "record-burst-stats")) record_burst_stats = 1; + if (!strcmp(lgopts[opt_idx].name, "multi-mempool")) + multi_mempool = 1; if (!strcmp(lgopts[opt_idx].name, PARAM_NUM_PROCS)) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..9dfc4c9d0e 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -497,6 +497,11 @@ uint8_t record_burst_stats; */ uint32_t rxq_share; +/* + * Multi-mempool support, disabled by default. + */ +uint8_t multi_mempool; + unsigned int num_sockets = 0; unsigned int socket_ids[RTE_MAX_NUMA_NODES]; @@ -2655,28 +2660,23 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; struct rte_mempool *rx_mempool[MAX_MEMPOOL] = {}; struct rte_mempool *mpx; + struct rte_eth_dev_info dev_info; unsigned int i, mp_n; uint32_t prev_hdrs = 0; int ret; + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + /* Verify Rx queue configuration is single pool and segment or * multiple pool/segment. +* @see rte_eth_dev_info::max_rx_mempools * @see rte_eth_rxconf::rx_mempools * @see rte_eth_rxconf::rx_seg */ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || - ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { - /* Single pool/segment configuration */ - rx_conf->rx_seg = NULL; - rx_conf->rx_nseg = 0; - ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, -nb_rx_desc, socket_id, -rx_conf, mp); - goto exit; - } - - if (rx_pkt_nb_segs > 1 || - rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + if ((rx_pkt_nb_segs > 1) && + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { /* multi-segment configuration */ for (i = 0; i < rx_pkt_nb_segs; i++) { struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; @@ -2701,7 +2701,14 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, } rx_conf->rx_nseg = rx_pkt_nb_segs; rx_conf->rx_seg = rx_useg; - } else { + rx_conf->rx_mempools = NULL; + rx_conf->rx_nmempool = 0; + ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, + socket_id, rx_conf, NULL); + rx_conf->rx_seg = NULL; + rx_conf->rx_nseg = 0; + } else if ((multi_mempool == 1) && (dev_info.max_rx_mempools != 0) && + (mbuf_data_size_n > 1)) { /* multi-pool configuration */ for (i = 0; i < mbuf_data_size_n; i
[PATCH v6 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know whether device supports multi-mempool feature or not. Also, add new testpmd command line argument, multi-mempool, to control multi-mempool feature. By default its disabled. Bugzilla ID: 1128 Fixes: 4f04edcda769 ("app/testpmd: support multiple mbuf pools per Rx queue") Signed-off-by: Hanumanth Pothula --- v6: - Updated run_app.rst file with multi-mempool argument. - defined and populated multi_mempool at related arguments. - invoking rte_eth_dev_info_get() withing multi-mempool condition v5: - Added testpmd argument to enable multi-mempool feature. - Simplified logic to distinguish between multi-mempool, multi-segment and single pool/segment. v4: - updated if condition. v3: - Simplified conditional check. - Corrected spell, whether. v2: - Rebased on tip of next-net/main. --- app/test-pmd/parameters.c | 4 ++ app/test-pmd/testpmd.c| 66 +-- app/test-pmd/testpmd.h| 1 + doc/guides/testpmd_app_ug/run_app.rst | 4 ++ 4 files changed, 50 insertions(+), 25 deletions(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index aed4cdcb84..d0f7b2f11d 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -155,6 +155,7 @@ usage(char* progname) printf(" --rxhdrs=eth[,ipv4]*: set RX segment protocol to split.\n"); printf(" --txpkts=X[,Y]*: set TX segment sizes" " or total packet length.\n"); + printf(" --multi-mempool: enable multi-mempool support\n"); printf(" --txonly-multi-flow: generate multiple flows in txonly mode\n"); printf(" --tx-ip=src,dst: IP addresses in Tx-only mode\n"); printf(" --tx-udp=src[,dst]: UDP ports in Tx-only mode\n"); @@ -669,6 +670,7 @@ launch_args_parse(int argc, char** argv) { "rxpkts", 1, 0, 0 }, { "rxhdrs", 1, 0, 0 }, { "txpkts", 1, 0, 0 }, + { "multi-mempool", 0, 0, 0 }, { "txonly-multi-flow", 0, 0, 0 }, { "rxq-share", 2, 0, 0 }, { "eth-link-speed", 1, 0, 0 }, @@ -1295,6 +1297,8 @@ launch_args_parse(int argc, char** argv) else rte_exit(EXIT_FAILURE, "bad txpkts\n"); } + if (!strcmp(lgopts[opt_idx].name, "multi-mempool")) + multi_mempool = 1; if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow")) txonly_multi_flow = 1; if (!strcmp(lgopts[opt_idx].name, "rxq-share")) { diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..0bf2e4bd0d 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -245,6 +245,7 @@ uint32_t max_rx_pkt_len; */ uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ +uint8_t multi_mempool; /**< Enables multi-mempool feature */ uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT]; @@ -258,6 +259,8 @@ uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { }; uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ + + enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; /**< Split policy for packets to TX. */ @@ -2659,24 +2662,9 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint32_t prev_hdrs = 0; int ret; - /* Verify Rx queue configuration is single pool and segment or -* multiple pool/segment. -* @see rte_eth_rxconf::rx_mempools -* @see rte_eth_rxconf::rx_seg -*/ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || - ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { - /* Single pool/segment configuration */ - rx_conf->rx_seg = NULL; - rx_conf->rx_nseg = 0; - ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, -nb_rx_desc, socket_id, -rx_conf, mp); - goto exit; - } - if (rx_pkt_nb_segs > 1 || - rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) { + if ((rx_pkt_nb_segs > 1) && + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT)) { /* multi-segment configuration */
[PATCH v7 1/1] app/testpmd: add valid check to verify multi mempool feature
Validate ethdev parameter 'max_rx_mempools' to know whether device supports multi-mempool feature or not. Also, add new testpmd command line argument, multi-mempool, to control multi-mempool feature. By default its disabled. Bugzilla ID: 1128 Fixes: 4f04edcda769 ("app/testpmd: support multiple mbuf pools per Rx queue") Signed-off-by: Hanumanth Pothula --- v7: - Update testpmd argument name from multi-mempool to multi-rx-mempool. - Upated defination of testpmd argument, mbuf-size. - Resolved indentations. v6: - Updated run_app.rst file with multi-mempool argument. - defined and populated multi_mempool at related arguments. - invoking rte_eth_dev_info_get() withing multi-mempool condition v5: - Added testpmd argument to enable multi-mempool feature. - Simplified logic to distinguish between multi-mempool, multi-segment and single pool/segment. v4: - updated if condition. v3: - Simplified conditional check. - Corrected spell, whether. v2: - Rebased on tip of next-net/main. --- app/test-pmd/parameters.c | 7 ++- app/test-pmd/testpmd.c| 64 --- app/test-pmd/testpmd.h| 1 + doc/guides/testpmd_app_ug/run_app.rst | 4 ++ 4 files changed, 50 insertions(+), 26 deletions(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index aed4cdcb84..af9ec39cf9 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -88,7 +88,8 @@ usage(char* progname) "in NUMA mode.\n"); printf(" --mbuf-size=N,[N1[,..Nn]: set the data size of mbuf to " "N bytes. If multiple numbers are specified the extra pools " - "will be created to receive with packet split features\n"); + "will be created to receive packets based on the features " + "supported, like buufer-split, multi-mempool.\n"); printf(" --total-num-mbufs=N: set the number of mbufs to be allocated " "in mbuf pools.\n"); printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n"); @@ -155,6 +156,7 @@ usage(char* progname) printf(" --rxhdrs=eth[,ipv4]*: set RX segment protocol to split.\n"); printf(" --txpkts=X[,Y]*: set TX segment sizes" " or total packet length.\n"); + printf(" --multi-rx-mempool: enable multi-mempool support\n"); printf(" --txonly-multi-flow: generate multiple flows in txonly mode\n"); printf(" --tx-ip=src,dst: IP addresses in Tx-only mode\n"); printf(" --tx-udp=src[,dst]: UDP ports in Tx-only mode\n"); @@ -669,6 +671,7 @@ launch_args_parse(int argc, char** argv) { "rxpkts", 1, 0, 0 }, { "rxhdrs", 1, 0, 0 }, { "txpkts", 1, 0, 0 }, + { "multi-rx-mempool", 0, 0, 0 }, { "txonly-multi-flow", 0, 0, 0 }, { "rxq-share", 2, 0, 0 }, { "eth-link-speed", 1, 0, 0 }, @@ -1295,6 +1298,8 @@ launch_args_parse(int argc, char** argv) else rte_exit(EXIT_FAILURE, "bad txpkts\n"); } + if (!strcmp(lgopts[opt_idx].name, "multi-rx-mempool")) + multi_rx_mempool = 1; if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow")) txonly_multi_flow = 1; if (!strcmp(lgopts[opt_idx].name, "rxq-share")) { diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 4e25f77c6a..716937925e 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -245,6 +245,7 @@ uint32_t max_rx_pkt_len; */ uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ +uint8_t multi_rx_mempool; /**< Enables multi-mempool feature */ uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ uint32_t rx_pkt_hdr_protos[MAX_SEGS_BUFFER_SPLIT]; @@ -2659,24 +2660,9 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint32_t prev_hdrs = 0; int ret; - /* Verify Rx queue configuration is single pool and segment or -* multiple pool/segment. -* @see rte_eth_rxconf::rx_mempools -* @see rte_eth_rxconf::rx_seg -*/ - if (!(mbuf_data_size_n > 1) && !(rx_pkt_nb_segs > 1 || - ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) != 0))) { - /* Single po
[PATCH v4 1/2] ethdev: control Rx metadata negotiation
Presently, Rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in Rx path takes extra cycles. Hence, adding a new eth port configuration filed, 'nic_to_pmd_rx_metadata', to control NIC to PMD Rx metadata negotiation. Also, reset dev_configured flag as part of device reset, this helps in reconfiguring porting cleanly. Rx metadata negotiation, rte_eth_rx_metadata_negotiate(), is allowed only when dev_configured flag is reset. Signed-off-by: Hanumanth Pothula --- v4: - As per spec rte_eth_rx_metadata_negotiate() is processed only when dev_configured is set. Hence can't enable automatically when a flow command requests metadata. - Add new testpmd command to allow NIC to PMD Rx metadata negotiation. v3: - Updated run_app.rst with the new command line argument, nic-to-pmd-rx-metadata. - Updated commit text. v2: - taken cared alignment issues - renamed command line argument from rx-metadata to nic-to-pmd-rx-metadata - renamed variable name from rx-metadata to nic_to_pmd_rx_metadata --- lib/ethdev/rte_ethdev.c | 2 ++ lib/ethdev/rte_ethdev.h | 1 + 2 files changed, 3 insertions(+) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 5d5e18db1e..18c59044bc 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1629,6 +1629,8 @@ rte_eth_dev_reset(uint16_t port_id) port_id, rte_strerror(-ret)); } ret = dev->dev_ops->dev_reset(dev); + if (!ret) + dev->data->dev_configured = 0; return eth_err(port_id, ret); } diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index c129ca1eaf..f1160a8aca 100644 --- a/lib/ethdev/rte_ethdev.h +++ b/lib/ethdev/rte_ethdev.h @@ -1487,6 +1487,7 @@ struct rte_eth_conf { is needed,and the variable must be set RTE_ETH_DCB_PFC_SUPPORT. */ uint32_t dcb_capability_en; struct rte_eth_intr_conf intr_conf; /**< Interrupt mode configuration. */ + uint8_t nic_to_pmd_rx_metadata; /**< send rx metadata to PMD. */ }; /** -- 2.25.1
[PATCH v4 2/2] app/testpmd: add command to process Rx metadata negotiation
Presently, Rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in Rx path takes extra cycles. Hence, add new testpmd command, 'enable port nic_to_pmd_rx_metadata' This command helps in sending Rx metadata to PMD and thereby Rx metadata flow command requests are processed. Signed-off-by: Hanumanth Pothula --- app/test-pmd/cmdline.c | 58 + app/test-pmd/config.c | 9 app/test-pmd/testpmd.c | 5 +- doc/guides/testpmd_app_ug/testpmd_funcs.rst | 6 +++ 4 files changed, 76 insertions(+), 2 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index b32dc8bfd4..56946b2520 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -610,6 +610,9 @@ static void cmd_help_long_parsed(void *parsed_result, "set port (port_id) fec_mode auto|off|rs|baser\n" "set fec mode for a specific port\n\n" + "enable port nic_to_pmd_rx_metadata" + "Allow nic to pmd Rx metadata negotiation\n\n" + , list_pkt_forwarding_modes() ); } @@ -12621,6 +12624,60 @@ static cmdline_parse_inst_t cmd_show_port_flow_transfer_proxy = { } }; +/* Allow negotiating Rx metadata between NIC and PMD */ +struct cmd_config_port_rx_metadata { + cmdline_fixed_string_t enable; + cmdline_fixed_string_t port; + uint16_t port_id; + cmdline_fixed_string_t nic_to_pmd_rx_metadata; +}; + +static void +cmd_config_port_rx_metadata_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_port_rx_metadata *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + if (!port_is_stopped(res->port_id)) { + fprintf(stderr, "Please stop port %u first\n", res->port_id); + return; + } + + ports[res->port_id].dev_conf.nic_to_pmd_rx_metadata = 1; + + reset_port(res->port_id); +} + + +static cmdline_parse_token_string_t cmd_config_port_rx_metadata_enable = + TOKEN_STRING_INITIALIZER(struct cmd_config_port_rx_metadata, enable, + "enable"); +static cmdline_parse_token_string_t cmd_config_port_rx_metadata_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_port_rx_metadata, port, + "port"); +static cmdline_parse_token_num_t cmd_config_port_rx_metadata_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_port_rx_metadata, port_id, + RTE_UINT16); +static cmdline_parse_token_string_t cmd_config_port_rx_metadata_nic_to_pmd_rx_metadata = + TOKEN_STRING_INITIALIZER(struct cmd_config_port_rx_metadata, nic_to_pmd_rx_metadata, + "nic_to_pmd_rx_metadata"); + +static cmdline_parse_inst_t cmd_config_port_rx_metadata_parse = { + .f = cmd_config_port_rx_metadata_parsed, + .data = NULL, + .help_str = "enable port nic_to_pmd_rx_metadata", + .tokens = { + (void *)&cmd_config_port_rx_metadata_enable, + (void *)&cmd_config_port_rx_metadata_port, + (void *)&cmd_config_port_rx_metadata_id, + (void *)&cmd_config_port_rx_metadata_nic_to_pmd_rx_metadata, + NULL, + }, +}; + /* */ /* list of instructions */ @@ -12851,6 +12908,7 @@ static cmdline_parse_ctx_t builtin_ctx[] = { (cmdline_parse_inst_t *)&cmd_show_capability, (cmdline_parse_inst_t *)&cmd_set_flex_is_pattern, (cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern, + (cmdline_parse_inst_t *)&cmd_config_port_rx_metadata_parse, NULL, }; diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index acccb6b035..47fce3accb 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -3249,6 +3249,15 @@ port_flow_create(portid_t port_id, } id = port->flow_list->id + 1; } + + if (port->dev_conf.nic_to_pmd_rx_metadata == 0 && + (actions->type == RTE_FLOW_ACTION_TYPE_MARK || + actions->type == RTE_FLOW_ACTION_TYPE_FLAG)) { + fprintf(stderr, + "rx metadata is not negotiated with PMD\n"); + return -EINVAL; + } + if (tunnel_ops->enabled) { pft = port_flow_tunnel_offload
[PATCH v5 1/2] ethdev: fix ethdev configuration state on reset
Presently, on device reset, ethdev configuration state, dev_configured, is not reset. On device reset, reset ethdev configuration state to make sure device reconfiguration happens cleanly. Signed-off-by: Hanumanth Pothula --- v5: - Move nic-to-pmd-rx-metadata declaration to struct rte_port. v4: - As per spec rte_eth_rx_metadata_negotiate() is processed only when dev_configured is set. Hence can't enable automatically when a flow command requests metadata. - Add new testpmd command to allow NIC to PMD Rx metadata negotiation. v3: - Updated run_app.rst with the new command line argument, nic-to-pmd-rx-metadata. - Updated commit text. v2: - taken cared alignment issues - renamed command line argument from rx-metadata to nic-to-pmd-rx-metadata - renamed variable name from rx-metadata to nic_to_pmd_rx_metadata --- lib/ethdev/rte_ethdev.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 5d5e18db1e..18c59044bc 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1629,6 +1629,8 @@ rte_eth_dev_reset(uint16_t port_id) port_id, rte_strerror(-ret)); } ret = dev->dev_ops->dev_reset(dev); + if (!ret) + dev->data->dev_configured = 0; return eth_err(port_id, ret); } -- 2.25.1
[PATCH v5 2/2] app/testpmd: add command to process Rx metadata negotiation
Presently, Rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in Rx path takes extra cycles. Hence, add new testpmd command, 'enable port nic_to_pmd_rx_metadata' This command helps in sending Rx metadata to PMD and thereby Rx metadata flow command requests are processed. Signed-off-by: Hanumanth Pothula --- app/test-pmd/cmdline.c | 58 + app/test-pmd/config.c | 9 app/test-pmd/testpmd.c | 5 +- app/test-pmd/testpmd.h | 1 + doc/guides/testpmd_app_ug/testpmd_funcs.rst | 6 +++ 5 files changed, 77 insertions(+), 2 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index b32dc8bfd4..e3abc9e830 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -610,6 +610,9 @@ static void cmd_help_long_parsed(void *parsed_result, "set port (port_id) fec_mode auto|off|rs|baser\n" "set fec mode for a specific port\n\n" + "enable port nic_to_pmd_rx_metadata" + "Allow nic to pmd Rx metadata negotiation\n\n" + , list_pkt_forwarding_modes() ); } @@ -12621,6 +12624,60 @@ static cmdline_parse_inst_t cmd_show_port_flow_transfer_proxy = { } }; +/* Allow negotiating Rx metadata between NIC and PMD */ +struct cmd_config_port_rx_metadata { + cmdline_fixed_string_t enable; + cmdline_fixed_string_t port; + uint16_t port_id; + cmdline_fixed_string_t nic_to_pmd_rx_metadata; +}; + +static void +cmd_config_port_rx_metadata_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_port_rx_metadata *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + if (!port_is_stopped(res->port_id)) { + fprintf(stderr, "Please stop port %u first\n", res->port_id); + return; + } + + ports[res->port_id].nic_to_pmd_rx_metadata = 1; + + reset_port(res->port_id); +} + + +static cmdline_parse_token_string_t cmd_config_port_rx_metadata_enable = + TOKEN_STRING_INITIALIZER(struct cmd_config_port_rx_metadata, enable, + "enable"); +static cmdline_parse_token_string_t cmd_config_port_rx_metadata_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_port_rx_metadata, port, + "port"); +static cmdline_parse_token_num_t cmd_config_port_rx_metadata_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_port_rx_metadata, port_id, + RTE_UINT16); +static cmdline_parse_token_string_t cmd_config_port_rx_metadata_nic_to_pmd_rx_metadata = + TOKEN_STRING_INITIALIZER(struct cmd_config_port_rx_metadata, nic_to_pmd_rx_metadata, + "nic_to_pmd_rx_metadata"); + +static cmdline_parse_inst_t cmd_config_port_rx_metadata_parse = { + .f = cmd_config_port_rx_metadata_parsed, + .data = NULL, + .help_str = "enable port nic_to_pmd_rx_metadata", + .tokens = { + (void *)&cmd_config_port_rx_metadata_enable, + (void *)&cmd_config_port_rx_metadata_port, + (void *)&cmd_config_port_rx_metadata_id, + (void *)&cmd_config_port_rx_metadata_nic_to_pmd_rx_metadata, + NULL, + }, +}; + /* */ /* list of instructions */ @@ -12851,6 +12908,7 @@ static cmdline_parse_ctx_t builtin_ctx[] = { (cmdline_parse_inst_t *)&cmd_show_capability, (cmdline_parse_inst_t *)&cmd_set_flex_is_pattern, (cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern, + (cmdline_parse_inst_t *)&cmd_config_port_rx_metadata_parse, NULL, }; diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index acccb6b035..60df47407e 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -3249,6 +3249,15 @@ port_flow_create(portid_t port_id, } id = port->flow_list->id + 1; } + + if (port->nic_to_pmd_rx_metadata == 0 && + (actions->type == RTE_FLOW_ACTION_TYPE_MARK || + actions->type == RTE_FLOW_ACTION_TYPE_FLAG)) { + fprintf(stderr, + "rx metadata is not negotiated with PMD\n"); + return -EINVAL; + } + if (tunnel_ops->enabled) {
[PATCH] app/testpmd: add command line argument 'rx-metadata'
Performance drop is observed by sending per packet rx metadata (offloads). Hence, introducing command line argument, 'rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. Signed-off-by: Hanumanth Pothula --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..daf1218977 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --rx-metadata: send rx metadata to driver \n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +711,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "rx-metadata",0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1512,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "rx-metadata")) + rx_metadata_negotiate = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..ebbde5dfc9 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* send rx metadata */ +uint8_t rx_metadata_negotiate; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1628,7 +1631,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (rx_metadata_negotiate) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index fb2f5195d3..8a9168c51e 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -621,6 +621,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t rx_metadata_negotiate; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ -- 2.25.1
[PATCH] app/testpmd: add command line argument 'rx-metadata'
Presently, rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in rx path takes extra cycles. Hence, introducing command line argument, 'rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. ci: skip_checkpatch Signed-off-by: Hanumanth Pothula Change-Id: If7b6bbc7489d3e9df89c63e000d71ea2f7fe9afd --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..daf1218977 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --rx-metadata: send rx metadata to driver \n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +711,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "rx-metadata",0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1512,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "rx-metadata")) + rx_metadata_negotiate = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..ebbde5dfc9 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* send rx metadata */ +uint8_t rx_metadata_negotiate; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1628,7 +1631,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (rx_metadata_negotiate) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index fb2f5195d3..8a9168c51e 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -621,6 +621,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t rx_metadata_negotiate; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ -- 2.25.1
[PATCH] app/testpmd: add command line argument 'rx-metadata'
Presently, rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in rx path takes extra cycles. Hence, introducing command line argument, 'rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. Signed-off-by: Hanumanth Pothula --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..daf1218977 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --rx-metadata: send rx metadata to driver \n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +711,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "rx-metadata",0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1512,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "rx-metadata")) + rx_metadata_negotiate = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..ebbde5dfc9 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* send rx metadata */ +uint8_t rx_metadata_negotiate; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1628,7 +1631,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (rx_metadata_negotiate) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index fb2f5195d3..8a9168c51e 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -621,6 +621,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t rx_metadata_negotiate; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ -- 2.25.1
[PATCH] app/testpmd: add command line argument 'nic-to-pmd-rx-metadata'
Presently, rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in rx path takes extra cycles. Hence, introducing command line argument, 'nic-to-pmd-rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. Signed-off-by: Hanumanth Pothula v2: - taken care alignment issues - renamed command line argument from rx-metadata to nic-to-pmd-rx-metadata - renamed variable name from rx-metadata to nic_to_pmd_rx_metadata --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..a381945492 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --nic-to-pmd-rx-metadata: let the NIC deliver per-packet Rx metadata to PMD\n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +711,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "nic-to-pmd-rx-metadata", 0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1512,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "nic-to-pmd-rx-metadata")) + nic_to_pmd_rx_metadata = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..2b17d4f757 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* Send Rx metadata */ +uint8_t nic_to_pmd_rx_metadata; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1628,7 +1631,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (nic_to_pmd_rx_metadata) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index fb2f5195d3..294a9c8cf4 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -621,6 +621,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t nic_to_pmd_rx_metadata; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ -- 2.25.1
[PATCH v2 2/2] app/testpmd: add command line argument 'nic-to-pmd-rx-metadata'
Presently, rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in rx path takes extra cycles. Hence, introducing command line argument, 'nic-to-pmd-rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. Signed-off-by: Hanumanth Pothula v2: - taken cared alignment issues - renamed command line argument from rx-metadata to nic-to-pmd-rx-metadata - renamed variable name from rx-metadata to nic_to_pmd_rx_metadata --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..a381945492 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --nic-to-pmd-rx-metadata: let the NIC deliver per-packet Rx metadata to PMD\n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +711,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "nic-to-pmd-rx-metadata", 0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1512,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "nic-to-pmd-rx-metadata")) + nic_to_pmd_rx_metadata = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..2b17d4f757 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* Send Rx metadata */ +uint8_t nic_to_pmd_rx_metadata; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1628,7 +1631,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (nic_to_pmd_rx_metadata) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index fb2f5195d3..294a9c8cf4 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -621,6 +621,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t nic_to_pmd_rx_metadata; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ -- 2.25.1
[PATCH v2 1/2] version: 22.11-rc0
From: David Marchand Start a new release cycle with empty release notes. The ABI version becomes 23.0. The map files are updated to the new ABI major number (23). The ABI exceptions are dropped and CI ABI checks are disabled because compatibility is not preserved. Special handling of removed drivers is also dropped in check-abi.sh and a note has been added in libabigail.abignore as a reminder. Signed-off-by: David Marchand Acked-by: Thomas Monjalon --- .github/workflows/build.yml| 4 +- .travis.yml| 21 +--- ABI_VERSION| 2 +- VERSION| 2 +- devtools/check-abi.sh | 4 - devtools/libabigail.abignore | 37 -- doc/guides/rel_notes/index.rst | 1 + doc/guides/rel_notes/release_22_11.rst | 136 + drivers/baseband/acc100/version.map| 2 +- drivers/baseband/fpga_5gnr_fec/version.map | 2 +- drivers/baseband/fpga_lte_fec/version.map | 2 +- drivers/baseband/la12xx/version.map| 2 +- drivers/baseband/null/version.map | 2 +- drivers/baseband/turbo_sw/version.map | 2 +- drivers/bus/fslmc/version.map | 2 +- drivers/bus/ifpga/version.map | 2 +- drivers/bus/pci/version.map| 2 +- drivers/bus/vdev/version.map | 2 +- drivers/bus/vmbus/version.map | 2 +- drivers/common/qat/version.map | 2 +- drivers/compress/isal/version.map | 2 +- drivers/compress/mlx5/version.map | 2 +- drivers/compress/octeontx/version.map | 2 +- drivers/compress/zlib/version.map | 2 +- drivers/crypto/armv8/version.map | 2 +- drivers/crypto/bcmfs/version.map | 2 +- drivers/crypto/caam_jr/version.map | 2 +- drivers/crypto/ccp/version.map | 2 +- drivers/crypto/ipsec_mb/version.map| 2 +- drivers/crypto/mlx5/version.map| 2 +- drivers/crypto/mvsam/version.map | 2 +- drivers/crypto/nitrox/version.map | 2 +- drivers/crypto/null/version.map| 2 +- drivers/crypto/octeontx/version.map| 2 +- drivers/crypto/openssl/version.map | 2 +- drivers/crypto/scheduler/version.map | 2 +- drivers/crypto/virtio/version.map | 2 +- drivers/dma/cnxk/version.map | 2 +- drivers/dma/dpaa/version.map | 2 +- drivers/dma/dpaa2/version.map | 2 +- drivers/dma/hisilicon/version.map | 2 +- drivers/dma/idxd/version.map | 2 +- drivers/dma/ioat/version.map | 2 +- drivers/dma/skeleton/version.map | 2 +- drivers/event/dlb2/version.map | 2 +- drivers/event/dpaa/version.map | 2 +- drivers/event/dpaa2/version.map| 2 +- drivers/event/dsw/version.map | 2 +- drivers/event/octeontx/version.map | 2 +- drivers/event/opdl/version.map | 2 +- drivers/event/skeleton/version.map | 2 +- drivers/event/sw/version.map | 2 +- drivers/gpu/cuda/version.map | 2 +- drivers/mempool/bucket/version.map | 2 +- drivers/mempool/dpaa2/version.map | 2 +- drivers/mempool/octeontx/version.map | 2 +- drivers/mempool/ring/version.map | 2 +- drivers/mempool/stack/version.map | 2 +- drivers/net/af_packet/version.map | 2 +- drivers/net/af_xdp/version.map | 2 +- drivers/net/ark/version.map| 2 +- drivers/net/atlantic/version.map | 2 +- drivers/net/avp/version.map| 2 +- drivers/net/axgbe/version.map | 2 +- drivers/net/bnx2x/version.map | 2 +- drivers/net/bnxt/version.map | 2 +- drivers/net/bonding/version.map| 2 +- drivers/net/cnxk/version.map | 12 +- drivers/net/cxgbe/version.map | 2 +- drivers/net/dpaa/version.map | 2 +- drivers/net/dpaa2/version.map | 2 +- drivers/net/e1000/version.map | 2 +- drivers/net/ena/version.map| 2 +- drivers/net/enetc/version.map | 2 +- drivers/net/enetfec/version.map| 2 +- drivers/net/enic/version.map | 2 +- drivers/net/failsafe/version.map | 2 +- drivers/net/fm10k/version.map | 2 +- drivers/net/hinic/version.map | 2 +- drivers/net/hns3/version.map | 2 +- drivers/net/i40e/version.map | 2 +- drivers/net/iavf/version.map | 2 +- drivers/net/ice/version.map| 2 +- drivers/net/igc/version.map| 2 +- drivers/net/ionic/version.map | 2 +- drivers
[PATCH v2 1/1] app/testpmd: add command line argument 'nic-to-pmd-rx-metadata'
Presently, rx metadata is sent to PMD by default, leading to a performance drop as processing for the same in rx path takes extra cycles. Hence, introducing command line argument, 'nic-to-pmd-rx-metadata' to control passing rx metadata to PMD. By default it’s disabled. Signed-off-by: Hanumanth Pothula v2: - taken cared alignment issues - renamed command line argument from rx-metadata to nic-to-pmd-rx-metadata - renamed variable name from rx-metadata to nic_to_pmd_rx_metadata --- app/test-pmd/parameters.c | 4 app/test-pmd/testpmd.c| 6 +- app/test-pmd/testpmd.h| 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..a381945492 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,7 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --nic-to-pmd-rx-metadata: let the NIC deliver per-packet Rx metadata to PMD\n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +711,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "nic-to-pmd-rx-metadata", 0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1512,8 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "nic-to-pmd-rx-metadata")) + nic_to_pmd_rx_metadata = 1; break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..2b17d4f757 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* Send Rx metadata */ +uint8_t nic_to_pmd_rx_metadata; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -1628,7 +1631,8 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) int ret; int i; - eth_rx_metadata_negotiate_mp(pid); + if (nic_to_pmd_rx_metadata) + eth_rx_metadata_negotiate_mp(pid); port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index fb2f5195d3..294a9c8cf4 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -621,6 +621,8 @@ extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ +extern uint8_t nic_to_pmd_rx_metadata; + #ifdef RTE_LIB_GRO #define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 #define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ -- 2.25.1
[PATCH v1 1/1] ethdev: introduce pool sort capability
Presently, the 'Buffer Split' feature supports sending multiple segments of the received packet to PMD, which programs the HW to receive the packet in segments from different pools. This patch extends the feature to support the pool sort capability. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With pool sort capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. The following two capabilities are added to the rte_eth_rxseg_capa structure, 1. pool_sort --> tells pool sort capability is supported by HW. 2. max_npool --> max number of pools supported by HW. Defined new structure rte_eth_rxseg_sort, to be used only when pool sort capability is present. If required this may be extended further to support more configurations. Signed-off-by: Hanumanth Pothula Change-Id: I5a2485a7919616902c468c767b5c01834d4a2c27 --- lib/ethdev/rte_ethdev.c | 81 ++--- lib/ethdev/rte_ethdev.h | 46 +-- 2 files changed, 119 insertions(+), 8 deletions(-) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1979dc0850..e21a651787 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1634,6 +1634,54 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +static int +rte_eth_rx_queue_check_sort(const struct rte_eth_rxseg_sort *rx_seg, +uint16_t n_seg, uint32_t *mbp_buf_size, +const struct rte_eth_dev_info *dev_info) +{ + const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; + uint16_t seg_idx; + + if (!seg_capa->multi_pools || n_seg > seg_capa->max_npool) { + RTE_ETHDEV_LOG(ERR, + "Invalid capabilities, multi_pools:%d differnt length segments %u exceed supported %u\n", + seg_capa->multi_pools, n_seg, seg_capa->max_nseg); + return -EINVAL; + } + + for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { + struct rte_mempool *mpl = rx_seg[seg_idx].mp; + uint32_t length = rx_seg[seg_idx].length; + + if (mpl == NULL) { + RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); + return -EINVAL; + } + + if (mpl->private_data_size < + sizeof(struct rte_pktmbuf_pool_private)) { + RTE_ETHDEV_LOG(ERR, + "%s private_data_size %u < %u\n", + mpl->name, mpl->private_data_size, + (unsigned int)sizeof + (struct rte_pktmbuf_pool_private)); + return -ENOSPC; + } + + *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); + length = length != 0 ? length : (*mbp_buf_size - RTE_PKTMBUF_HEADROOM); + if (*mbp_buf_size < length + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u))\n", + mpl->name, *mbp_buf_size, + length); + return -EINVAL; + } + } + + return 0; +} + static int rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, @@ -1693,7 +1741,11 @@ rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, } offset += seg_idx != 0 ? 0 : RTE_PKTMBUF_HEADROOM; *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); - length = length != 0 ? length : *mbp_buf_size; + /* On segment length == 0, update segment's length with +* the pool's length - headeroom space, to make sure enough +
[PATCH v2 1/3] ethdev: introduce pool sort capability
Presently, the 'Buffer Split' feature supports sending multiple segments of the received packet to PMD, which programs the HW to receive the packet in segments from different pools. This patch extends the feature to support the pool sort capability. Some of the HW has support for choosing memory pools based on the packet's size. The pool sort capability allows PMD to choose a memory pool based on the packet's length. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. For example, let's say HW has a capability of three pools, - pool-1 size is 2K - pool-2 size is > 2K and < 4K - pool-3 size is > 4K Here, pool-1 can accommodate packets with sizes < 2K pool-2 can accommodate packets with sizes > 2K and < 4K pool-3 can accommodate packets with sizes > 4K With pool sort capability enabled in SW, an application may create three pools of different sizes and send them to PMD. Allowing PMD to program HW based on packet lengths. So that packets with less than 2K are received on pool-1, packets with lengths between 2K and 4K are received on pool-2 and finally packets greater than 4K are received on pool-3. The following two capabilities are added to the rte_eth_rxseg_capa structure, 1. pool_sort --> tells pool sort capability is supported by HW. 2. max_npool --> max number of pools supported by HW. Defined new structure rte_eth_rxseg_sort, to be used only when pool sort capability is present. If required this may be extended further to support more configurations. Signed-off-by: Hanumanth Pothula v2: - Along with spec changes, uploading testpmd and driver changes. --- lib/ethdev/rte_ethdev.c | 87 +++-- lib/ethdev/rte_ethdev.h | 45 +++-- 2 files changed, 118 insertions(+), 14 deletions(-) diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index 1979dc0850..7fd5443eb8 100644 --- a/lib/ethdev/rte_ethdev.c +++ b/lib/ethdev/rte_ethdev.c @@ -1635,7 +1635,55 @@ rte_eth_dev_is_removed(uint16_t port_id) } static int -rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, +rte_eth_rx_queue_check_sort(const struct rte_eth_rxseg *rx_seg, +uint16_t n_seg, uint32_t *mbp_buf_size, +const struct rte_eth_dev_info *dev_info) +{ + const struct rte_eth_rxseg_capa *seg_capa = &dev_info->rx_seg_capa; + uint16_t seg_idx; + + if (!seg_capa->multi_pools || n_seg > seg_capa->max_npool) { + RTE_ETHDEV_LOG(ERR, + "Invalid capabilities, multi_pools:%d differnt length segments %u exceed supported %u\n", + seg_capa->multi_pools, n_seg, seg_capa->max_nseg); + return -EINVAL; + } + + for (seg_idx = 0; seg_idx < n_seg; seg_idx++) { + struct rte_mempool *mpl = rx_seg[seg_idx].sort.mp; + uint32_t length = rx_seg[seg_idx].sort.length; + + if (mpl == NULL) { + RTE_ETHDEV_LOG(ERR, "null mempool pointer\n"); + return -EINVAL; + } + + if (mpl->private_data_size < + sizeof(struct rte_pktmbuf_pool_private)) { + RTE_ETHDEV_LOG(ERR, + "%s private_data_size %u < %u\n", + mpl->name, mpl->private_data_size, + (unsigned int)sizeof + (struct rte_pktmbuf_pool_private)); + return -ENOSPC; + } + + *mbp_buf_size = rte_pktmbuf_data_room_size(mpl); + length = length != 0 ? length : (*mbp_buf_size - RTE_PKTMBUF_HEADROOM); + if (*mbp_buf_size < length + RTE_PKTMBUF_HEADROOM) { + RTE_ETHDEV_LOG(ERR, + "%s mbuf_data_room_size %u < %u))\n", + mpl->name, *mbp_buf_size, + length); + return -EINVAL; + } + } + + return 0; +} + +static int +rte_eth_rx_queue_check_split(const struct rte_eth_rxseg *rx_seg, uint16_t n_seg, uint32_t *mbp_buf_size, const struct rte_eth_dev_info *dev_info) { @@ -1654,12 +1702,12 @@ rte_eth_rx_queue_check_split(const struct rte_eth_rxseg_split *rx_seg, * Check the sizes and offsets against buffer sizes * for each segment specified in extended configuration. */ - mp_first = rx_seg[0].mp; + mp_first = rx_seg[0].split.mp; offset_mask = RTE_BIT
[PATCH v2 2/3] app/testpmd: add command line argument 'rxseg-mode'
With rxseg-mode command line argument, application can choose either buffer split or pool sort capability. This might be helpful if HW has support for both capabilities and application wants to enable one of them. By default, buffer-split capability is enabled to enable pool-sort capability pass command line argument, '--rxseg-mode=2'. Signed-off-by: Hanumanth Pothula --- app/test-pmd/parameters.c | 16 app/test-pmd/testpmd.c| 35 ++- app/test-pmd/testpmd.h| 2 ++ app/test-pmd/util.c | 4 ++-- 4 files changed, 46 insertions(+), 11 deletions(-) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index e3c9757f3f..c3876a9b5f 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -213,6 +213,9 @@ usage(char* progname) printf(" --hairpin-mode=0xXX: bitmask set the hairpin port mode.\n" "0x10 - explicit Tx rule, 0x02 - hairpin ports paired\n" "0x01 - hairpin ports loop, 0x00 - hairpin port self\n"); + printf(" --rxseg-mode: provide rxseg capbility\n" + "1 - Bufer-split capability\n" + "2 - Pool-sort capability\n"); } #ifdef RTE_LIB_CMDLINE @@ -710,6 +713,7 @@ launch_args_parse(int argc, char** argv) { "record-burst-stats", 0, 0, 0 }, { PARAM_NUM_PROCS, 1, 0, 0 }, { PARAM_PROC_ID,1, 0, 0 }, + { "rxseg-mode", 1, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1510,6 +1514,18 @@ launch_args_parse(int argc, char** argv) num_procs = atoi(optarg); if (!strcmp(lgopts[opt_idx].name, PARAM_PROC_ID)) proc_id = atoi(optarg); + if (!strcmp(lgopts[opt_idx].name, "rxseg-mode")) { + char *end = NULL; + unsigned int n; + + errno = 0; + n = strtoul(optarg, &end, 0); + if (errno != 0 || end == optarg || + n < RTE_ETH_RXSEG_MODE_SPLIT || n > RTE_ETH_RXSEG_MODE_SORT) + rte_exit(EXIT_FAILURE, "invalid rxseg mode\n"); + else + rxseg_mode = (uint8_t)n; + } break; case 'h': usage(argv[0]); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index addcbcac85..b5b4fcd66e 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -411,6 +411,9 @@ uint8_t clear_ptypes = true; /* Hairpin ports configuration mode. */ uint16_t hairpin_mode; +/* send Rxseg mode */ +uint8_t rxseg_mode = RTE_ETH_RXSEG_MODE_SPLIT; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -2656,7 +2659,7 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + struct rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; unsigned int i, mp_n; int ret; @@ -2670,24 +2673,38 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, goto exit; } for (i = 0; i < rx_pkt_nb_segs; i++) { - struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + struct rte_eth_rxseg_split *rx_split = &rx_useg[i].split; + struct rte_eth_rxseg_sort *rx_sort = &rx_useg[i].sort; struct rte_mempool *mpx; + /* * Use last valid pool for the segments with number * exceeding the pool index. */ mp_n = (i >= mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; mpx = mbuf_pool_find(socket_id, mp_n); - /* Handle zero as mbuf data buffer size. */ - rx_seg->length = rx_pkt_seg_lengths[i] ? - rx_pkt_seg_lengths[i] : - mbuf_data_size[mp_n]; - rx_seg->offset = i < rx_pkt_nb_offs ? - rx_pkt_seg_offsets[i] : 0; - rx_seg->mp = mpx ? mpx : mp; + if (rxseg_mode == RTE_ETH_RXSEG_MODE_SPLIT) { + /** +* On Segment length zero, update length as, +* buffer size - headroom size
[PATCH v2 3/3] net/cnxk: introduce pool sort capability
Presently, HW is programmed only to receive packets from LPB pool. Making all packets received from LPB pool. But, CNXK HW supports two pools, - SPB -> packets with smaller size (less than 4K) - LPB -> packets with bigger size (greater than 4K) Patch enables pool sorting capability, pool is selected based on packet's length. So, basically, PMD programs HW for receiving packets from both SPB and LPB pools based on the packet's length. This is achieved by enabling rx buffer split offload, RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT. This allows the application to send more than one pool(in our case two) to the driver, with different segment(packet) lengths, which helps the driver to configure both pools based on segment lengths. This is often useful for saving the memory where the application can create a different pool to steer the specific size of the packet, thus enabling effective use of memory. Signed-off-by: Hanumanth Pothula --- doc/guides/nics/features/cnxk.ini | 1 + doc/guides/nics/features/cnxk_vec.ini | 1 + drivers/net/cnxk/cnxk_ethdev.c| 93 --- drivers/net/cnxk/cnxk_ethdev.h| 4 +- drivers/net/cnxk/cnxk_ethdev_ops.c| 7 ++ 5 files changed, 96 insertions(+), 10 deletions(-) diff --git a/doc/guides/nics/features/cnxk.ini b/doc/guides/nics/features/cnxk.ini index 1876fe86c7..e1584ed740 100644 --- a/doc/guides/nics/features/cnxk.ini +++ b/doc/guides/nics/features/cnxk.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +pool sort = Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/doc/guides/nics/features/cnxk_vec.ini b/doc/guides/nics/features/cnxk_vec.ini index 5d0976e6ce..a63d35aae7 100644 --- a/doc/guides/nics/features/cnxk_vec.ini +++ b/doc/guides/nics/features/cnxk_vec.ini @@ -4,6 +4,7 @@ ; Refer to default.ini for the full list of available PMD features. ; [Features] +pool sort = Y Speed capabilities = Y Rx interrupt = Y Lock-free Tx queue = Y diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index 24182909f1..6bf04dde96 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -537,6 +537,64 @@ cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid) plt_free(txq_sp); } +static int +cnxk_nix_process_rx_conf(const struct rte_eth_rxconf *rx_conf, +struct rte_mempool **lpb_pool, struct rte_mempool **spb_pool, +uint16_t *lpb_len, uint16_t *spb_len) +{ + struct rte_eth_rxseg_sort rx_seg0; + struct rte_eth_rxseg_sort rx_seg1; + const char *platform_ops; + struct rte_mempool_ops *ops; + + if (*lpb_pool || !rx_conf->rx_seg || rx_conf->rx_nseg != CNXK_NIX_NUM_POOLS_MAX || + !rx_conf->rx_seg[0].sort.mp || !rx_conf->rx_seg[1].sort.mp) { + plt_err("invalid arguments"); + return -EINVAL; + } + + rx_seg0 = rx_conf->rx_seg[0].sort; + rx_seg1 = rx_conf->rx_seg[1].sort; + + if (rx_seg0.length >= rx_seg0.mp->elt_size || rx_seg1.length >= rx_seg1.mp->elt_size) { + plt_err("mismatch in packet length & pool length seg0_len:%u pool0_len:%u"\ + "seg1_len:%u pool1_len:%u", rx_seg0.length, rx_seg0.mp->elt_size, + rx_seg1.length, rx_seg1.mp->elt_size); + return -EINVAL; + } + + if (rx_seg0.length > rx_seg1.length) { + *lpb_pool = rx_seg0.mp; + *spb_pool = rx_seg1.mp; + + *lpb_len = rx_seg0.length; + *spb_len = rx_seg1.length; + } else { + *lpb_pool = rx_seg1.mp; + *spb_pool = rx_seg0.mp; + + *lpb_len = rx_seg1.length; + *spb_len = rx_seg0.length; + } + + if ((*spb_pool)->pool_id == 0) { + plt_err("Invalid pool_id"); + return -EINVAL; + } + + platform_ops = rte_mbuf_platform_mempool_ops(); + ops = rte_mempool_get_ops((*spb_pool)->ops_index); + if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) { + plt_err("mempool ops should be of cnxk_npa type"); + return -EINVAL; + } + + plt_info("spb_pool:%s lpb_pool:%s lpb_len:%u spb_len:%u\n", (*spb_pool)->name, +(*lpb_pool)->name, *lpb_len, *spb_len); + + return 0; +} + int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint32_t nb_desc, uint16_t fp_rx_q_sz, @@ -553,6 +611,10 @@ cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid, uint16_t first_skip; int rc = -EINVAL; size_t rxq_sz; + uint16_t lpb_len = 0; + ui
[PATCH v1 1/1] net/cnxk: resolve fail to set large Rx/Tx queues
While configuring NIX, local variables 'nb_rxq' and 'nb_txq' are declared as 8bit variables, leading to an integer overflow when an application sends Rxq/Txq value greater than 255. Hence, declare local variables, 'nb_rxq' and 'nb_txq' as 16bit variable. Also, during the cleanup, make sure PFC tree is not created. Signed-off-by: Hanumanth Pothula --- drivers/net/cnxk/cnxk_ethdev.c | 2 +- drivers/net/cnxk/cnxk_ethdev_ops.c | 6 -- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c index cfcc4df916..c0a8e901a3 100644 --- a/drivers/net/cnxk/cnxk_ethdev.c +++ b/drivers/net/cnxk/cnxk_ethdev.c @@ -1074,7 +1074,7 @@ cnxk_nix_configure(struct rte_eth_dev *eth_dev) struct roc_nix_fc_cfg fc_cfg = {0}; struct roc_nix *nix = &dev->nix; struct rte_ether_addr *ea; - uint8_t nb_rxq, nb_txq; + uint16_t nb_rxq, nb_txq; uint64_t rx_cfg; void *qs; int rc; diff --git a/drivers/net/cnxk/cnxk_ethdev_ops.c b/drivers/net/cnxk/cnxk_ethdev_ops.c index 1592971073..b417d61771 100644 --- a/drivers/net/cnxk/cnxk_ethdev_ops.c +++ b/drivers/net/cnxk/cnxk_ethdev_ops.c @@ -1142,8 +1142,10 @@ nix_priority_flow_ctrl_sq_conf(struct rte_eth_dev *eth_dev, uint16_t qid, if (qid >= eth_dev->data->nb_tx_queues) return -ENOTSUP; - /* Check if RX pause frame is enabled or not */ - if (!pfc->rx_pause_en) { + /* Check if RX pause frame is enabled or not and +* confirm user requested for PFC. +*/ + if (!pfc->rx_pause_en && rx_pause) { if ((roc_nix_tm_tree_type_get(nix) == ROC_NIX_TM_DEFAULT) && eth_dev->data->nb_tx_queues > 1) { /* -- 2.25.1
[PATCH v1 1/1] net/cnxk: program DF bit appropriately in vector mode
In vector mode, DF bit is not programmed correctly, as the return value of vsetq_lane_u64() is ignored, which actually contains the updated value, leading HW to free mbufs though NIX_TX_OFFLOAD_MBUF_NOFF_F flag is set. Hence, save return value of vsetq_lane_u64() appropriately so that DF bit is programmed correctly. Signed-off-by: Hanumanth Pothula --- drivers/net/cnxk/cn10k_tx.h | 8 drivers/net/cnxk/cn9k_tx.h | 8 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h index ea13866b20..fb33e7150b 100644 --- a/drivers/net/cnxk/cn10k_tx.h +++ b/drivers/net/cnxk/cn10k_tx.h @@ -2477,28 +2477,28 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, mbuf3 = (uint64_t *)tx_pkts[3]; if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) - vsetq_lane_u64(0x8, xmask01, 0); + xmask01 = vsetq_lane_u64(0x8, xmask01, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf0)->pool, (void **)&mbuf0, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1)) - vsetq_lane_u64(0x8, xmask01, 1); + xmask01 = vsetq_lane_u64(0x8, xmask01, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf1)->pool, (void **)&mbuf1, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2)) - vsetq_lane_u64(0x8, xmask23, 0); + xmask23 = vsetq_lane_u64(0x8, xmask23, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf2)->pool, (void **)&mbuf2, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3)) - vsetq_lane_u64(0x8, xmask23, 1); + xmask23 = vsetq_lane_u64(0x8, xmask23, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf3)->pool, diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h index 6ce81f5c96..a609814dfb 100644 --- a/drivers/net/cnxk/cn9k_tx.h +++ b/drivers/net/cnxk/cn9k_tx.h @@ -1705,28 +1705,28 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, mbuf3 = (uint64_t *)tx_pkts[3]; if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) - vsetq_lane_u64(0x8, xmask01, 0); + xmask01 = vsetq_lane_u64(0x8, xmask01, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf0)->pool, (void **)&mbuf0, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1)) - vsetq_lane_u64(0x8, xmask01, 1); + xmask01 = vsetq_lane_u64(0x8, xmask01, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf1)->pool, (void **)&mbuf1, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2)) - vsetq_lane_u64(0x8, xmask23, 0); + xmask23 = vsetq_lane_u64(0x8, xmask23, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf2)->pool, (void **)&mbuf2, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3)) - vsetq_lane_u64(0x8, xmask23, 1); + xmask23 = vsetq_lane_u64(0x8, xmask23, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf3)->pool, -- 2.25.1
[PATCH] event/octeontx: resolve possible integer overflow
The last argument passed to ssovf_parsekv() is an unsigned char*, but it is accessed as an integer. This can lead to an integer overflow. Hence, make ensure the argument is accessed as a char and for better error handling use strtol instead of atoi. Signed-off-by: Hanumanth Pothula --- drivers/event/octeontx/ssovf_evdev.c | 12 ++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 3a933b1db7..ccb447d33a 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -719,8 +719,16 @@ ssovf_close(struct rte_eventdev *dev) static int ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) { - int *flag = opaque; - *flag = !!atoi(value); + uint8_t *flag = (uint8_t *)opaque; + char *end; + + errno = 0; + *flag = (uint8_t)strtol(value, &end, 2); + if ((errno != 0) || (value == end)) { + ssovf_log_err("fail to get key val ret:%d err:%d", *flag, errno); + return -EINVAL; + } + return 0; } -- 2.25.1
[PATCH v2 1/1] event/octeontx: resolve possible integer overflow
The last argument passed to ssovf_parsekv() is an unsigned char*, but it is accessed as an integer. This can lead to an integer overflow. Hence, make ensure the argument is accessed as a char and for better error handling use strtol instead of atoi. Signed-off-by: Hanumanth Pothula --- v2: use strtoul instead of strtol --- drivers/event/octeontx/ssovf_evdev.c | 16 +--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 3a933b1db7..d2ab8011e1 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -717,10 +717,20 @@ ssovf_close(struct rte_eventdev *dev) } static int -ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) +ssovf_parsekv(const char *key, const char *value, void *opaque) { - int *flag = opaque; - *flag = !!atoi(value); + uint8_t *flag = opaque; + uint64_t v; + char *end; + + errno = 0; + v = (uint8_t)strtoul(value, &end, 0); + if ((errno != 0) || (value == end) || *end != '\0') { + ssovf_log_err("invalid %s value %s", key, value); + return -EINVAL; + } + + *flag = !!v; return 0; } -- 2.25.1
[PATCH v4 1/1] event/octeontx: fix possible integer overflow
The last argument passed to ssovf_parsekv() is an unsigned char*, but it is accessed as an integer. This can lead to an integer overflow. Hence, make ensure the argument is accessed as a char and for better error handling use strtol instead of atoi. Bugzilla ID: 1512 Fixes: 3516327e00fd ("event/octeontx: add selftest to device arguments") Signed-off-by: Hanumanth Pothula --- v2: Use strtoul instead of strtol v3: Add value boundry check. Here, value can be either 0 or 1. v4: Commit text update --- drivers/event/octeontx/ssovf_evdev.c | 16 +--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 3a933b1db7..957fcab04e 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -717,10 +717,20 @@ ssovf_close(struct rte_eventdev *dev) } static int -ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) +ssovf_parsekv(const char *key, const char *value, void *opaque) { - int *flag = opaque; - *flag = !!atoi(value); + uint8_t *flag = opaque; + uint64_t v; + char *end; + + errno = 0; + v = strtoul(value, &end, 0); + if ((errno != 0) || (value == end) || *end != '\0' || v > 1) { + ssovf_log_err("invalid %s value %s", key, value); + return -EINVAL; + } + + *flag = !!v; return 0; } -- 2.25.1
[PATCH v3 1/1] event/octeontx: resolve possible integer overflow
The last argument passed to ssovf_parsekv() is an unsigned char*, but it is accessed as an integer. This can lead to an integer overflow. Hence, make ensure the argument is accessed as a char and for better error handling use strtol instead of atoi. Signed-off-by: Hanumanth Pothula --- v2: use strtoul instead of strtol v3: Add value boundry check. Here, value can be either 0 or 1. --- drivers/event/octeontx/ssovf_evdev.c | 16 +--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c index 3a933b1db7..957fcab04e 100644 --- a/drivers/event/octeontx/ssovf_evdev.c +++ b/drivers/event/octeontx/ssovf_evdev.c @@ -717,10 +717,20 @@ ssovf_close(struct rte_eventdev *dev) } static int -ssovf_parsekv(const char *key __rte_unused, const char *value, void *opaque) +ssovf_parsekv(const char *key, const char *value, void *opaque) { - int *flag = opaque; - *flag = !!atoi(value); + uint8_t *flag = opaque; + uint64_t v; + char *end; + + errno = 0; + v = strtoul(value, &end, 0); + if ((errno != 0) || (value == end) || *end != '\0' || v > 1) { + ssovf_log_err("invalid %s value %s", key, value); + return -EINVAL; + } + + *flag = !!v; return 0; } -- 2.25.1