vhost removes limit of TX burst size(32 pkts) and supports to make
an best effort to transmit pkts.

Cc: yuanhan....@linux.intel.com
Cc: maxime.coque...@redhat.com

Signed-off-by: Zhiyong Yang <zhiyong.y...@intel.com>
---
 drivers/net/vhost/rte_eth_vhost.c | 24 ++++++++++++++++++++++--
 1 file changed, 22 insertions(+), 2 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c 
b/drivers/net/vhost/rte_eth_vhost.c
index e98cffd..1e1fa34 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -52,6 +52,7 @@
 #define ETH_VHOST_QUEUES_ARG           "queues"
 #define ETH_VHOST_CLIENT_ARG           "client"
 #define ETH_VHOST_DEQUEUE_ZERO_COPY    "dequeue-zero-copy"
+#define VHOST_MAX_PKT_BURST 32
 
 static const char *valid_arguments[] = {
        ETH_VHOST_IFACE_ARG,
@@ -434,8 +435,27 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t 
nb_bufs)
                goto out;
 
        /* Enqueue packets to guest RX queue */
-       nb_tx = rte_vhost_enqueue_burst(r->vid,
-                       r->virtqueue_id, bufs, nb_bufs);
+       if (likely(nb_bufs <= VHOST_MAX_PKT_BURST))
+               nb_tx = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
+                                               bufs, nb_bufs);
+       else {
+               uint16_t nb_send = nb_bufs;
+
+               while (nb_send) {
+                       uint16_t nb_pkts;
+                       uint16_t num = (uint16_t)RTE_MIN(nb_send,
+                                       VHOST_MAX_PKT_BURST);
+
+                       nb_pkts = rte_vhost_enqueue_burst(r->vid,
+                                                         r->virtqueue_id,
+                                                         &bufs[nb_tx], num);
+
+                       nb_tx += nb_pkts;
+                       nb_send -= nb_pkts;
+                       if (nb_pkts < num)
+                               break;
+               }
+       }
 
        r->stats.pkts += nb_tx;
        r->stats.missed_pkts += nb_bufs - nb_tx;
-- 
2.7.4

Reply via email to