On Fri, Mar 16, 2018 at 04:21:15PM +0100, Jens Freimann wrote: > Implement code to dequeue and process descriptors from > the vring if VIRTIO_F_PACKED is enabled.
VIRTIO_F_RING_PACKED. > > Check if descriptor was made available by driver by looking at > VIRTIO_F_DESC_AVAIL flag in descriptor. If so dequeue and set > the used flag VIRTIO_F_DESC_USED to the current value of the > used wrap counter. > > Used ring wrap counter needs to be toggled when last descriptor is > written out. This allows the host/guest to detect new descriptors even > after the ring has wrapped. > > Signed-off-by: Jens Freimann <jfreim...@redhat.com> > --- > lib/librte_vhost/vhost.c | 1 + > lib/librte_vhost/vhost.h | 1 + > lib/librte_vhost/virtio_net.c | 228 > ++++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 230 insertions(+) > > diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c > index a300812..8cba10d 100644 > --- a/lib/librte_vhost/vhost.c > +++ b/lib/librte_vhost/vhost.c > @@ -198,6 +198,7 @@ struct virtio_net * > > vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; > vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; > + vq->used_wrap_counter = 1; > > vhost_user_iotlb_init(dev, vring_idx); > /* Backends are set to -1 indicating an inactive device. */ > diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h > index d35c4b1..f77fefe 100644 > --- a/lib/librte_vhost/vhost.h > +++ b/lib/librte_vhost/vhost.h > @@ -108,6 +108,7 @@ struct vhost_virtqueue { > > struct batch_copy_elem *batch_copy_elems; > uint16_t batch_copy_nb_elems; > + uint32_t used_wrap_counter; I didn't look into this, is uint32_t the best choice for defining used_wrap_counter compared with uint16_t or uint8_t? > > rte_rwlock_t iotlb_lock; > rte_rwlock_t iotlb_pending_lock; > diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c > index 700aca7..8f59e4f 100644 > --- a/lib/librte_vhost/virtio_net.c > +++ b/lib/librte_vhost/virtio_net.c > @@ -19,6 +19,7 @@ > > #include "iotlb.h" > #include "vhost.h" > +#include "virtio-1.1.h" > > #define MAX_PKT_BURST 32 > > @@ -1118,6 +1119,233 @@ > } > } > > +static inline uint16_t > +dequeue_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, It's better to have the word "packed" in the function name. > + struct rte_mempool *mbuf_pool, struct rte_mbuf *m, > + struct vring_desc_packed *descs) > +{ > + struct vring_desc_packed *desc; > + uint64_t desc_addr; > + uint32_t desc_avail, desc_offset; > + uint32_t mbuf_avail, mbuf_offset; > + uint32_t cpy_len; > + struct rte_mbuf *cur = m, *prev = m; > + struct virtio_net_hdr *hdr = NULL; > + uint16_t head_idx = vq->last_used_idx & (vq->size - 1); The ring size may not be a power of 2. > + int wrap_counter = vq->used_wrap_counter; > + int rc = 0; > + > + rte_spinlock_lock(&vq->access_lock); > + > + if (unlikely(vq->enabled == 0)) > + goto out; > + > + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) > + vhost_user_iotlb_rd_lock(vq); > + > + desc = &descs[vq->last_used_idx & (vq->size - 1)]; > + if (unlikely((desc->len < dev->vhost_hlen)) || > + (desc->flags & VRING_DESC_F_INDIRECT)) { > + RTE_LOG(ERR, VHOST_DATA, > + "INDIRECT not supported yet\n"); > + rc = -1; > + goto out; If INDIRECT isn't supported, we will need to make sure that INDIRECT and RING_PACKED won't be negotiated at the same time. Thanks