Rewrite vhost dequeue function which without used ring update. Signed-off-by: Marvin Liu <yong....@intel.com>
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index b877510da..410837122 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -1613,6 +1613,62 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return i; } +static __rte_always_inline int +vhost_dequeue_normal_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, + uint16_t *buf_id, uint16_t *desc_count) +{ + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint32_t dummy_len; + uint16_t nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_packed(dev, vq, + vq->last_avail_idx, desc_count, + buf_vec, &nr_vec, + buf_id, &dummy_len, + VHOST_ACCESS_RO) < 0)) { + return -1; + } + + *pkts = rte_pktmbuf_alloc(mbuf_pool); + if (unlikely(*pkts == NULL)) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); + return -1; + } + + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts, + mbuf_pool); + if (unlikely(err)) { + rte_pktmbuf_free(*pkts); + return -1; + } + + return 0; +} + +static __rte_always_inline int +virtio_dev_tx_normal_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts) +{ + + uint16_t buf_id, desc_count; + + if (vhost_dequeue_normal_packed(dev, vq, mbuf_pool, pkts, &buf_id, + &desc_count)) + return -1; + + vq->last_avail_idx += desc_count; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + + return 0; +} + static __rte_noinline uint16_t virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) -- 2.17.1