drain_eth_rx() uses rte_vhost_avail_entries() to calculate
the available entries to determine if a retry is required.
However, this function only works with split rings, and
calculating packed rings will return the wrong value and cause
unnecessary retries resulting in a significant performance penalty.

This patch fix that by using the difference between tx/rx burst
as the retry condition.

Fixes: be800696c26e ("examples/vhost: use burst enqueue and dequeue from lib")
Cc: sta...@dpdk.org

Signed-off-by: Yuan Wang <yuanx.w...@intel.com>
Tested-by: Wei Ling <weix.l...@intel.com>
---
V4: Fix fiexs tag.
V3: Fix mbuf index.
V2: Rebase to 22.07 rc1.
---
 examples/vhost/main.c | 28 +++++++++++-----------------
 1 file changed, 11 insertions(+), 17 deletions(-)

diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index e7fee5aa1b..0fa6c096c8 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -634,7 +634,7 @@ us_vhost_usage(const char *prgname)
 {
        RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
        "               --vm2vm [0|1|2]\n"
-       "               --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
+       "               --rx-retry [0|1] --mergeable [0|1] --stats [0-N]\n"
        "               --socket-file <path>\n"
        "               --nb-devices ND\n"
        "               -p PORTMASK: Set mask for ports to be used by 
application\n"
@@ -1383,27 +1383,21 @@ drain_eth_rx(struct vhost_dev *vdev)
        if (!rx_count)
                return;
 
-       /*
-        * When "enable_retry" is set, here we wait and retry when there
-        * is no enough free slots in the queue to hold @rx_count packets,
-        * to diminish packet loss.
-        */
-       if (enable_retry &&
-           unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
-                       VIRTIO_RXQ))) {
-               uint32_t retry;
+       enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+                                               VIRTIO_RXQ, pkts, rx_count);
 
-               for (retry = 0; retry < burst_rx_retry_num; retry++) {
+       /* Retry if necessary */
+       if (enable_retry && unlikely(enqueue_count < rx_count)) {
+               uint32_t retry = 0;
+
+               while (enqueue_count < rx_count && retry++ < 
burst_rx_retry_num) {
                        rte_delay_us(burst_rx_delay_time);
-                       if (rx_count <= rte_vhost_avail_entries(vdev->vid,
-                                       VIRTIO_RXQ))
-                               break;
+                       enqueue_count += 
vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
+                                                       VIRTIO_RXQ, 
&pkts[enqueue_count],
+                                                       rx_count - 
enqueue_count);
                }
        }
 
-       enqueue_count = vdev_queue_ops[vdev->vid].enqueue_pkt_burst(vdev,
-                                       VIRTIO_RXQ, pkts, rx_count);
-
        if (enable_stats) {
                __atomic_add_fetch(&vdev->stats.rx_total_atomic, rx_count,
                                __ATOMIC_SEQ_CST);
-- 
2.25.1

Reply via email to