On 1/15/25 5:46 PM, David Marchand wrote:
On Wed, Jan 15, 2025 at 1:59 PM Maxime Coquelin
<maxime.coque...@redhat.com> wrote:

With previous refactoring, we can now simplify the RARP
packet injection handling in both the sync and async
dequeue paths.

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
  lib/vhost/virtio_net.c | 42 ++++++++++++++++++------------------------
  1 file changed, 18 insertions(+), 24 deletions(-)

diff --git a/lib/vhost/virtio_net.c b/lib/vhost/virtio_net.c
index 59ea2d16a5..fab45ebd54 100644
--- a/lib/vhost/virtio_net.c
+++ b/lib/vhost/virtio_net.c
@@ -3662,21 +3662,23 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                  * learning table will get updated first.
                  */
                 pkts[0] = rarp_mbuf;

Well, ideally it would be pkts[nb_rx], but see comment below.

-               vhost_queue_stats_update(dev, vq, pkts, 1);
-               pkts++;
-               count -= 1;
+               nb_rx += 1;
         }

With this change, the rarp_mbuf variable is unneeded.
You can store to pkts[nb_rx] when calling rte_net_make_rarp_packet()
(and at the same time, move the comment about injecting the packet to
the head of the array).

I agree with this further simplification.

Thanks,
Maxime





         if (vq_is_packed(dev)) {
                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-                       nb_rx = virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool, 
pkts, count);
+                       nb_rx += virtio_dev_tx_packed_legacy(dev, vq, mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx);
                 else
-                       nb_rx = virtio_dev_tx_packed_compliant(dev, vq, 
mbuf_pool, pkts, count);
+                       nb_rx += virtio_dev_tx_packed_compliant(dev, vq, 
mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx);
         } else {
                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-                       nb_rx = virtio_dev_tx_split_legacy(dev, vq, mbuf_pool, 
pkts, count);
+                       nb_rx += virtio_dev_tx_split_legacy(dev, vq, mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx);
                 else
-                       nb_rx = virtio_dev_tx_split_compliant(dev, vq, 
mbuf_pool, pkts, count);
+                       nb_rx += virtio_dev_tx_split_compliant(dev, vq, 
mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx);
         }

         vhost_queue_stats_update(dev, vq, pkts, nb_rx);
@@ -3687,9 +3689,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
  out_access_unlock:
         rte_rwlock_read_unlock(&vq->access_lock);

-       if (unlikely(rarp_mbuf != NULL))
-               nb_rx += 1;
-
  out_no_unlock:
         return nb_rx;
  }
@@ -4285,25 +4284,23 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t 
queue_id,
                  * learning table will get updated first.
                  */
                 pkts[0] = rarp_mbuf;
-               vhost_queue_stats_update(dev, vq, pkts, 1);
-               pkts++;
-               count -= 1;
+               nb_rx += 1;
         }

Idem.


         if (vq_is_packed(dev)) {
                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-                       nb_rx = virtio_dev_tx_async_packed_legacy(dev, vq, 
mbuf_pool,
-                                       pkts, count, dma_id, vchan_id);
+                       nb_rx += virtio_dev_tx_async_packed_legacy(dev, vq, 
mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx, dma_id, 
vchan_id);
                 else
-                       nb_rx = virtio_dev_tx_async_packed_compliant(dev, vq, 
mbuf_pool,
-                                       pkts, count, dma_id, vchan_id);
+                       nb_rx += virtio_dev_tx_async_packed_compliant(dev, vq, 
mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx, dma_id, 
vchan_id);
         } else {
                 if (dev->flags & VIRTIO_DEV_LEGACY_OL_FLAGS)
-                       nb_rx = virtio_dev_tx_async_split_legacy(dev, vq, 
mbuf_pool,
-                                       pkts, count, dma_id, vchan_id);
+                       nb_rx += virtio_dev_tx_async_split_legacy(dev, vq, 
mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx, dma_id, 
vchan_id);
                 else
-                       nb_rx = virtio_dev_tx_async_split_compliant(dev, vq, 
mbuf_pool,
-                                       pkts, count, dma_id, vchan_id);
+                       nb_rx += virtio_dev_tx_async_split_compliant(dev, vq, 
mbuf_pool,
+                                       pkts + nb_rx, count - nb_rx, dma_id, 
vchan_id);
         }

         *nr_inflight = vq->async->pkts_inflight_n;
@@ -4315,9 +4312,6 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t 
queue_id,
  out_access_unlock:
         rte_rwlock_read_unlock(&vq->access_lock);

-       if (unlikely(rarp_mbuf != NULL))
-               nb_rx += 1;
-
  out_no_unlock:
         return nb_rx;
  }
--
2.47.1




Reply via email to