Relax used ring contention by reusing the shadow used
ring feature used by enqueue path.

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 45 ++++++++++---------------------------------
 1 file changed, 10 insertions(+), 35 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 98ad8e936..7e70a927f 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -1019,35 +1019,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        return error;
 }
 
-static __rte_always_inline void
-update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                uint32_t used_idx, uint32_t desc_idx)
-{
-       vq->used->ring[used_idx].id  = desc_idx;
-       vq->used->ring[used_idx].len = 0;
-       vhost_log_cache_used_vring(dev, vq,
-                       offsetof(struct vring_used, ring[used_idx]),
-                       sizeof(vq->used->ring[used_idx]));
-}
-
-static __rte_always_inline void
-update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
-               uint32_t count)
-{
-       if (unlikely(count == 0))
-               return;
-
-       rte_smp_wmb();
-       rte_smp_rmb();
-
-       vhost_log_cache_sync(dev, vq);
-
-       vq->used->idx += count;
-       vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
-                       sizeof(vq->used->idx));
-       vhost_vring_call(dev, vq);
-}
-
 static __rte_always_inline struct zcopy_mbuf *
 get_zmbuf(struct vhost_virtqueue *vq)
 {
@@ -1146,6 +1117,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                goto out_access_unlock;
 
        vq->batch_copy_nb_elems = 0;
+       vq->shadow_used_idx = 0;
 
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
                vhost_user_iotlb_rd_lock(vq);
@@ -1164,8 +1136,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
                        if (mbuf_is_consumed(zmbuf->mbuf)) {
                                used_idx = vq->last_used_idx++ & (vq->size - 1);
-                               update_used_ring(dev, vq, used_idx,
-                                                zmbuf->desc_idx);
+                               update_shadow_used_ring(vq, zmbuf->desc_idx, 0);
                                nr_updated += 1;
 
                                TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
@@ -1176,7 +1147,9 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                        }
                }
 
-               update_used_idx(dev, vq, nr_updated);
+               flush_shadow_used_ring(dev, vq);
+               vhost_vring_call(dev, vq);
+               vq->shadow_used_idx = 0;
        }
 
        /*
@@ -1233,7 +1206,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                desc_indexes[i] = vq->avail->ring[avail_idx];
 
                if (likely(dev->dequeue_zero_copy == 0))
-                       update_used_ring(dev, vq, used_idx, desc_indexes[i]);
+                       update_shadow_used_ring(vq, desc_indexes[i], 0);
        }
 
        /* Prefetch descriptor index. */
@@ -1326,8 +1299,10 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
        if (likely(dev->dequeue_zero_copy == 0)) {
                do_data_copy_dequeue(vq);
-               vq->last_used_idx += i;
-               update_used_idx(dev, vq, i);
+               if (unlikely(i < count))
+                       vq->shadow_used_idx = i;
+               flush_shadow_used_ring(dev, vq);
+               vhost_vring_call(dev, vq);
        }
 
 out:
-- 
2.14.4

Reply via email to