Adds "--rxq-share=X" parameter to enable shared RxQ, share if device supports, otherwise fallback to standard RxQ.
Share group number grows per X ports. X defaults to MAX, implies all ports join share group 1. Forwarding engine "shared-rxq" should be used which Rx only and update stream statistics correctly. Signed-off-by: Xueming Li <xuemi...@nvidia.com> --- app/test-pmd/config.c | 6 +++++- app/test-pmd/parameters.c | 13 +++++++++++++ app/test-pmd/testpmd.c | 12 ++++++++++++ app/test-pmd/testpmd.h | 2 ++ doc/guides/testpmd_app_ug/run_app.rst | 7 +++++++ 5 files changed, 39 insertions(+), 1 deletion(-) diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 9c66329e96e..96fc2ab888b 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -2709,7 +2709,11 @@ rxtx_config_display(void) printf(" RX threshold registers: pthresh=%d hthresh=%d " " wthresh=%d\n", pthresh_tmp, hthresh_tmp, wthresh_tmp); - printf(" RX Offloads=0x%"PRIx64"\n", offloads_tmp); + printf(" RX Offloads=0x%"PRIx64, offloads_tmp); + if (rx_conf->share_group > 0) + printf(" share group=%u", + rx_conf->share_group); + printf("\n"); } /* per tx queue config only for first queue to be less verbose */ diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 3f94a82e321..30dae326310 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -167,6 +167,7 @@ usage(char* progname) printf(" --tx-ip=src,dst: IP addresses in Tx-only mode\n"); printf(" --tx-udp=src[,dst]: UDP ports in Tx-only mode\n"); printf(" --eth-link-speed: force link speed.\n"); + printf(" --rxq-share: number of ports per shared rxq groups, defaults to MAX(1 group)\n"); printf(" --disable-link-check: disable check on link status when " "starting/stopping ports.\n"); printf(" --disable-device-start: do not automatically start port\n"); @@ -607,6 +608,7 @@ launch_args_parse(int argc, char** argv) { "rxpkts", 1, 0, 0 }, { "txpkts", 1, 0, 0 }, { "txonly-multi-flow", 0, 0, 0 }, + { "rxq-share", 2, 0, 0 }, { "eth-link-speed", 1, 0, 0 }, { "disable-link-check", 0, 0, 0 }, { "disable-device-start", 0, 0, 0 }, @@ -1271,6 +1273,17 @@ launch_args_parse(int argc, char** argv) } if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow")) txonly_multi_flow = 1; + if (!strcmp(lgopts[opt_idx].name, "rxq-share")) { + if (optarg == NULL) { + rxq_share = UINT32_MAX; + } else { + n = atoi(optarg); + if (n >= 0) + rxq_share = (uint32_t)n; + else + rte_exit(EXIT_FAILURE, "rxq-share must be >= 0\n"); + } + } if (!strcmp(lgopts[opt_idx].name, "no-flush-rx")) no_flush_rx = 1; if (!strcmp(lgopts[opt_idx].name, "eth-link-speed")) { diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 97ae52e17ec..9c26301d397 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -498,6 +498,11 @@ uint8_t record_core_cycles; */ uint8_t record_burst_stats; +/* + * Number of ports per shared Rx queue group, 0 disable. + */ +uint32_t rxq_share; + unsigned int num_sockets = 0; unsigned int socket_ids[RTE_MAX_NUMA_NODES]; @@ -3401,6 +3406,13 @@ rxtx_port_config(struct rte_port *port) for (qid = 0; qid < nb_rxq; qid++) { offloads = port->rx_conf[qid].offloads; port->rx_conf[qid] = port->dev_info.default_rxconf; + + if (rxq_share > 0 && + (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) + /* Non-zero share group to enable RxQ share. */ + port->rx_conf[qid].share_group = nb_ports / rxq_share + + 1; + if (offloads != 0) port->rx_conf[qid].offloads = offloads; diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 5863b2f43f3..3dfaaad94c0 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -477,6 +477,8 @@ extern enum tx_pkt_split tx_pkt_split; extern uint8_t txonly_multi_flow; +extern uint32_t rxq_share; + extern uint16_t nb_pkt_per_burst; extern uint16_t nb_pkt_flowgen_clones; extern int nb_flows_flowgen; diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index 640eadeff73..ff5908dcd50 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -389,6 +389,13 @@ The command line options are: Generate multiple flows in txonly mode. +* ``--rxq-share=[X]`` + + Create queues in shared Rx queue mode if device supports. + Group number grows per X ports. X defaults to MAX, implies all ports + join share group 1. Forwarding engine "shared-rxq" should be used + which Rx only and update stream statistics correctly. + * ``--eth-link-speed`` Set a forced link speed to the ethernet port:: -- 2.33.0