This patch refactors the error handling in the Vhost
dequeue path to ease its maintenance and readability.

Suggested-by: David Marchand <david.march...@redhat.com>
Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/vhost/virtio_net.c | 27 ++++++++++++---------------
 1 file changed, 12 insertions(+), 15 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index a340e5a772..3a4955fd30 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3593,6 +3593,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mbuf *rarp_mbuf = NULL;
        struct vhost_virtqueue *vq;
        int16_t success = 1;
+       uint16_t nb_rx = 0;
 
        dev = get_device(vid);
        if (!dev)
@@ -3602,25 +3603,23 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                VHOST_DATA_LOG(dev->ifname, ERR,
                        "%s: built-in vhost net backend is disabled.",
                        __func__);
-               return 0;
+               goto out_no_unlock;
        }
 
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
                VHOST_DATA_LOG(dev->ifname, ERR,
                        "%s: invalid virtqueue idx %d.",
                        __func__, queue_id);
-               return 0;
+               goto out_no_unlock;
        }
 
        vq = dev->virtqueue[queue_id];
 
        if (unlikely(rte_rwlock_read_trylock(&vq->access_lock) != 0))
-               return 0;
+               goto out_no_unlock;
 
-       if (unlikely(!vq->enabled)) {
-               count = 0;
+       if (unlikely(!vq->enabled))
                goto out_access_unlock;
-       }
 
        vhost_user_iotlb_rd_lock(vq);
 
@@ -3630,7 +3629,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
                virtio_dev_vring_translate(dev, vq);
 
-               count = 0;
                goto out_no_unlock;
        }
 
@@ -3657,7 +3655,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
                if (rarp_mbuf == NULL) {
                        VHOST_DATA_LOG(dev->ifname, ERR, "failed to make RARP 
packet.");
-                       count = 0;
                        goto out;
                }
                /*
@@ -3672,17 +3669,17 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
        if (vq_is_packed(dev)) {
                if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-                       count = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, 
pkts, count);
+                       nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, 
pkts, count);
                else
-                       count = virtio_dev_tx_packed_compliant(dev, vq, 
mbuf_pool, pkts, count);
+                       nb_rx = virtio_dev_tx_packed_compliant(dev, vq, 
mbuf_pool, pkts, count);
        } else {
                if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-                       count = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, 
pkts, count);
+                       nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, 
pkts, count);
                else
-                       count = virtio_dev_tx_split_compliant(dev, vq, 
mbuf_pool, pkts, count);
+                       nb_rx = virtio_dev_tx_split_compliant(dev, vq, 
mbuf_pool, pkts, count);
        }
 
-       vhost_queue_stats_update(dev, vq, pkts, count);
+       vhost_queue_stats_update(dev, vq, pkts, nb_rx);
 
 out:
        vhost_user_iotlb_rd_unlock(vq);
@@ -3691,10 +3688,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        rte_rwlock_read_unlock(&vq->access_lock);
 
        if (unlikely(rarp_mbuf != NULL))
-               count += 1;
+               nb_rx += 1;
 
 out_no_unlock:
-       return count;
+       return nb_rx;
 }
 
 static __rte_always_inline uint16_t
-- 
2.47.1

Reply via email to