Supports shared Rx queue.

If shared Rx queue is enabled, group received packets by stream
according to mbuf->port value and then and forward in stream basis as
before.

If shared Rx queue is not enabled, just forward in stream basis.

Signed-off-by: Xueming Li <xuemi...@nvidia.com>
---
 app/test-pmd/rxonly.c | 34 +++++++++++++---------------------
 1 file changed, 13 insertions(+), 21 deletions(-)

diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index c78fc4609a..80ae0ecf93 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -41,32 +41,24 @@
 #include "testpmd.h"
 
 /*
- * Received a burst of packets.
+ * Process a burst of received packets from same stream.
  */
 static void
-pkt_burst_receive(struct fwd_stream *fs)
+rxonly_forward_stream(struct fwd_stream *fs, uint16_t nb_rx,
+                     struct rte_mbuf **pkts_burst)
 {
-       struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
-       uint16_t nb_rx;
-       uint16_t i;
-       uint64_t start_tsc = 0;
-
-       get_start_cycles(&start_tsc);
-
-       /*
-        * Receive a burst of packets.
-        */
-       nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
-                                nb_pkt_per_burst);
-       inc_rx_burst_stats(fs, nb_rx);
-       if (unlikely(nb_rx == 0))
-               return;
+       RTE_SET_USED(fs);
+       rte_pktmbuf_free_bulk(pkts_burst, nb_rx);
+}
 
-       fs->rx_packets += nb_rx;
-       for (i = 0; i < nb_rx; i++)
-               rte_pktmbuf_free(pkts_burst[i]);
 
-       get_end_cycles(fs, start_tsc);
+/*
+ * Wrapper of real fwd engine.
+ */
+static void
+pkt_burst_receive(struct fwd_stream *fs)
+{
+       return do_burst_fwd(fs, rxonly_forward_stream);
 }
 
 struct fwd_engine rx_only_engine = {
-- 
2.25.1

Reply via email to