Buffer vhost enqueue shadow ring update, flush shadow ring until
buffered descriptors number exceed one burst. Thus virtio can receive
packets at a faster frequency.

Signed-off-by: Marvin Liu <yong....@intel.com>

diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index b33f29ba0..86552cbeb 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -143,6 +143,7 @@ struct vhost_virtqueue {
                struct vring_used_elem_packed *shadow_used_packed;
        };
        uint16_t                shadow_used_idx;
+       uint16_t                enqueue_shadow_count;
        struct vhost_vring_addr ring_addrs;
 
        struct batch_copy_elem  *batch_copy_elems;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 7116c389d..dffd466d5 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -157,6 +157,24 @@ flush_shadow_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
        vhost_log_cache_sync(dev, vq);
 }
 
+static __rte_always_inline void
+update_enqueue_shadow_packed(struct vhost_virtqueue *vq, uint16_t desc_idx,
+       uint32_t len, uint16_t count)
+{
+       /* enqueue shadow flush action aligned with burst num */
+       if (!vq->shadow_used_idx)
+               vq->enqueue_shadow_count = vq->last_used_idx &
+                                               PACKED_BURST_MASK;
+
+       uint16_t i = vq->shadow_used_idx++;
+
+       vq->shadow_used_packed[i].id  = desc_idx;
+       vq->shadow_used_packed[i].len = len;
+       vq->shadow_used_packed[i].count = count;
+
+       vq->enqueue_shadow_count += count;
+}
+
 static __rte_always_inline void
 update_shadow_packed(struct vhost_virtqueue *vq,
                         uint16_t desc_idx, uint32_t len, uint16_t count)
@@ -197,6 +215,22 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq)
        vq->batch_copy_nb_elems = 0;
 }
 
+static __rte_always_inline void
+flush_enqueue_packed(struct virtio_net *dev,
+       struct vhost_virtqueue *vq, uint32_t len[], uint16_t id[],
+       uint16_t count[], uint16_t num_buffers)
+{
+       int i;
+       for (i = 0; i < num_buffers; i++) {
+               update_enqueue_shadow_packed(vq, id[i], len[i], count[i]);
+
+               if (vq->enqueue_shadow_count >= PACKED_DESCS_BURST) {
+                       do_data_copy_enqueue(dev, vq);
+                       flush_shadow_packed(dev, vq);
+               }
+       }
+}
+
 /* avoid write operation when necessary, to lessen cache issues */
 #define ASSIGN_UNLESS_EQUAL(var, val) do {     \
        if ((var) != (val))                     \
@@ -798,6 +832,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                max_tries = 1;
 
        uint16_t num_buffers = 0;
+       uint32_t buffer_len[max_tries];
+       uint16_t buffer_buf_id[max_tries];
+       uint16_t buffer_desc_count[max_tries];
 
        while (size > 0) {
                /*
@@ -820,6 +857,10 @@ vhost_enqueue_single_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 
                size -= len;
 
+               buffer_len[num_buffers] = len;
+               buffer_buf_id[num_buffers] = buf_id;
+               buffer_desc_count[num_buffers] = desc_count;
+
                avail_idx += desc_count;
                if (avail_idx >= vq->size)
                        avail_idx -= vq->size;
@@ -834,6 +875,9 @@ vhost_enqueue_single_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
                return 0;
        }
 
+       flush_enqueue_packed(dev, vq, buffer_len, buffer_buf_id,
+                       buffer_desc_count, num_buffers);
+
        return 0;
 }
 
-- 
2.17.1

Reply via email to