This patchs renames the local variables free_entries to
avail_entries in the dequeue path.

Indeed, this variable represents the number of new packets
available in the Virtio transmit queue, so these entries
are actually used, not free.

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/vhost/virtio_net.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 68a26eb17d..84cdf7e3b1 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -2774,7 +2774,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        bool legacy_ol_flags)
 {
        uint16_t i;
-       uint16_t free_entries;
+       uint16_t avail_entries;
        uint16_t dropped = 0;
        static bool allocerr_warned;
 
@@ -2782,9 +2782,9 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
+       avail_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
                        vq->last_avail_idx;
-       if (free_entries == 0)
+       if (avail_entries == 0)
                return 0;
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
@@ -2792,7 +2792,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
 
        count = RTE_MIN(count, MAX_PKT_BURST);
-       count = RTE_MIN(count, free_entries);
+       count = RTE_MIN(count, avail_entries);
        VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
                        dev->ifname, count);
 
@@ -3288,7 +3288,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 {
        static bool allocerr_warned;
        bool dropped = false;
-       uint16_t free_entries;
+       uint16_t avail_entries;
        uint16_t pkt_idx, slot_idx = 0;
        uint16_t nr_done_pkts = 0;
        uint16_t pkt_err = 0;
@@ -3302,9 +3302,9 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
+       avail_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
                        vq->last_avail_idx;
-       if (free_entries == 0)
+       if (avail_entries == 0)
                goto out;
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
@@ -3312,7 +3312,7 @@ virtio_dev_tx_async_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        async_iter_reset(async);
 
        count = RTE_MIN(count, MAX_PKT_BURST);
-       count = RTE_MIN(count, free_entries);
+       count = RTE_MIN(count, avail_entries);
        VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
                        dev->ifname, count);
 
-- 
2.35.3

Reply via email to