Modified vhost zero copy dequeue function which followed fast dequeue
function.

Signed-off-by: Marvin Liu <yong....@intel.com>

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index a3b1e85fe..7094944cf 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2143,6 +2143,147 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
        return i;
 }
 
+static __rte_always_inline void
+free_zmbuf(struct vhost_virtqueue *vq) {
+       struct zcopy_mbuf *next = NULL;
+       struct zcopy_mbuf *zmbuf;
+
+       for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+            zmbuf != NULL; zmbuf = next) {
+               next = TAILQ_NEXT(zmbuf, next);
+
+               uint16_t last_used_idx = vq->last_used_idx;
+
+               if (mbuf_is_consumed(zmbuf->mbuf)) {
+                       uint16_t flags = 0;
+
+                       if (vq->used_wrap_counter)
+                               flags = VIRTIO_TX_FLAG_PACKED;
+                       else
+                               flags = VIRTIO_TX_WRAP_FLAG_PACKED;
+
+                       vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
+                       vq->desc_packed[last_used_idx].len = 0;
+
+                       rte_smp_wmb();
+                       vq->desc_packed[last_used_idx].flags = flags;
+
+                       vq->last_used_idx += zmbuf->desc_count;
+                       if (vq->last_used_idx >= vq->size) {
+                               vq->used_wrap_counter ^= 1;
+                               vq->last_used_idx -= vq->size;
+                       }
+
+                       TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+                       restore_mbuf(zmbuf->mbuf);
+                       rte_pktmbuf_free(zmbuf->mbuf);
+                       put_zmbuf(zmbuf);
+                       vq->nr_zmbuf -= 1;
+               }
+       }
+}
+
+static __rte_always_inline int
+virtio_dev_tx_fast_packed_zmbuf(struct virtio_net *dev, struct vhost_virtqueue 
*vq,
+                       struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts)
+{
+       struct zcopy_mbuf *zmbuf, *zmbuf1, *zmbuf2, *zmbuf3;
+       int ret;
+       uintptr_t desc_addr[4];
+       uint16_t ids[4];
+
+       uint16_t avail_idx = vq->last_avail_idx;
+
+       ret = vhost_dequeue_fast_packed(dev, vq, mbuf_pool, pkts, avail_idx,
+                               desc_addr, ids);
+
+       if (ret)
+               return ret;
+
+       zmbuf = get_zmbuf(vq);
+       zmbuf1 = get_zmbuf(vq);
+       zmbuf2 = get_zmbuf(vq);
+       zmbuf3 = get_zmbuf(vq);
+
+       if (!zmbuf || !zmbuf1 || !zmbuf2 || !zmbuf3) {
+               rte_pktmbuf_free(pkts[0]);
+               rte_pktmbuf_free(pkts[1]);
+               rte_pktmbuf_free(pkts[2]);
+               rte_pktmbuf_free(pkts[3]);
+               return -1;
+       }
+
+       zmbuf->mbuf = pkts[0];
+       zmbuf->desc_idx = avail_idx;
+       zmbuf->desc_count = 1;
+
+       zmbuf1->mbuf = pkts[1];
+       zmbuf1->desc_idx = avail_idx + 1;
+       zmbuf1->desc_count = 1;
+
+       zmbuf2->mbuf = pkts[2];
+       zmbuf2->desc_idx = avail_idx + 2;
+       zmbuf2->desc_count = 1;
+
+       zmbuf3->mbuf = pkts[3];
+       zmbuf3->desc_idx = avail_idx + 3;
+       zmbuf3->desc_count = 1;
+
+       rte_mbuf_refcnt_update(pkts[0], 1);
+       rte_mbuf_refcnt_update(pkts[1], 1);
+       rte_mbuf_refcnt_update(pkts[2], 1);
+       rte_mbuf_refcnt_update(pkts[3], 1);
+
+       vq->nr_zmbuf += PACKED_DESC_PER_CACHELINE;
+       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf1, next);
+       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf2, next);
+       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf3, next);
+
+       vq->last_avail_idx += PACKED_DESC_PER_CACHELINE;
+       if (vq->last_avail_idx >= vq->size) {
+               vq->last_avail_idx -= vq->size;
+               vq->avail_wrap_counter ^= 1;
+       }
+
+       return 0;
+}
+
+static __rte_always_inline int
+virtio_dev_tx_normal_packed_zmbuf(struct virtio_net *dev,
+                       struct vhost_virtqueue *vq,
+                       struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts)
+{
+       uint16_t buf_id, desc_count;
+       struct zcopy_mbuf *zmbuf;
+
+       if (vhost_dequeue_normal_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+                                       &desc_count))
+                       return -1;
+
+       zmbuf = get_zmbuf(vq);
+       if (!zmbuf) {
+               rte_pktmbuf_free(*pkts);
+               return -1;
+       }
+       zmbuf->mbuf = *pkts;
+       zmbuf->desc_idx = vq->last_avail_idx;
+       zmbuf->desc_count = desc_count;
+
+       rte_mbuf_refcnt_update(*pkts, 1);
+
+       vq->nr_zmbuf += 1;
+       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+
+       vq->last_avail_idx += desc_count;
+       if (vq->last_avail_idx >= vq->size) {
+               vq->last_avail_idx -= vq->size;
+               vq->avail_wrap_counter ^= 1;
+       }
+
+       return 0;
+}
+
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
-- 
2.17.1

Reply via email to