Hi Cheng,

> -----Original Message-----
> From: Jiang, Cheng1 <cheng1.ji...@intel.com>
> Sent: Wednesday, April 14, 2021 2:14 PM
> To: maxime.coque...@redhat.com; Xia, Chenbo <chenbo....@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu...@intel.com>; Yang, YvonneX
> <yvonnex.y...@intel.com>; Wang, Yinan <yinan.w...@intel.com>; Liu,
> Yong <yong....@intel.com>; Jiang, Cheng1 <cheng1.ji...@intel.com>
> Subject: [PATCH v7 2/4] vhost: add support for packed ring in async vhost
> 
> For now async vhost data path only supports split ring. This patch
> enables packed ring in async vhost data path to make async vhost
> compatible with virtio 1.1 spec.
> 
> Signed-off-by: Cheng Jiang <cheng1.ji...@intel.com>
> ---
>  lib/librte_vhost/rte_vhost_async.h |   1 +
>  lib/librte_vhost/vhost.c           |  49 ++--
>  lib/librte_vhost/vhost.h           |  15 +-
>  lib/librte_vhost/virtio_net.c      | 432 +++++++++++++++++++++++++++--
>  4 files changed, 456 insertions(+), 41 deletions(-)
> 
> diff --git a/lib/librte_vhost/rte_vhost_async.h
> b/lib/librte_vhost/rte_vhost_async.h
> index c855ff875..6faa31f5a 100644
> --- a/lib/librte_vhost/rte_vhost_async.h
> +++ b/lib/librte_vhost/rte_vhost_async.h
> @@ -89,6 +89,7 @@ struct rte_vhost_async_channel_ops {
>  struct async_inflight_info {
>       struct rte_mbuf *mbuf;
>       uint16_t descs; /* num of descs inflight */
> +     uint16_t nr_buffers; /* num of buffers inflight for packed ring */
>  };
> 
>  /**
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index a70fe01d8..f509186c6 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -338,19 +338,22 @@ cleanup_device(struct virtio_net *dev, int destroy)
>  }
> 
>  static void
> -vhost_free_async_mem(struct vhost_virtqueue *vq)
> +vhost_free_async_mem(struct virtio_net *dev, struct vhost_virtqueue *vq)
>  {
> -     if (vq->async_pkts_info)
> -             rte_free(vq->async_pkts_info);
> -     if (vq->async_descs_split)
> +     rte_free(vq->async_pkts_info);
> +
> +     if (vq_is_packed(dev)) {
> +             rte_free(vq->async_buffers_packed);
> +             vq->async_buffers_packed = NULL;
> +     } else {
>               rte_free(vq->async_descs_split);
> -     if (vq->it_pool)
> -             rte_free(vq->it_pool);
> -     if (vq->vec_pool)
> -             rte_free(vq->vec_pool);
> +             vq->async_descs_split = NULL;
> +     }
> +
> +     rte_free(vq->it_pool);
> +     rte_free(vq->vec_pool);
> 
>       vq->async_pkts_info = NULL;
> -     vq->async_descs_split = NULL;
>       vq->it_pool = NULL;
>       vq->vec_pool = NULL;
>  }
> @@ -360,10 +363,10 @@ free_vq(struct virtio_net *dev, struct
> vhost_virtqueue *vq)
>  {
>       if (vq_is_packed(dev))
>               rte_free(vq->shadow_used_packed);
> -     else {
> +     else
>               rte_free(vq->shadow_used_split);
> -             vhost_free_async_mem(vq);
> -     }
> +
> +     vhost_free_async_mem(dev, vq);
>       rte_free(vq->batch_copy_elems);
>       if (vq->iotlb_pool)
>               rte_mempool_free(vq->iotlb_pool);
> @@ -1626,10 +1629,9 @@ int rte_vhost_async_channel_register(int vid,
> uint16_t queue_id,
>       if (unlikely(vq == NULL || !dev->async_copy))
>               return -1;
> 
> -     /* packed queue is not supported */
> -     if (unlikely(vq_is_packed(dev) || !f.async_inorder)) {
> +     if (unlikely(!f.async_inorder)) {
>               VHOST_LOG_CONFIG(ERR,
> -                     "async copy is not supported on packed queue or
> non-inorder mode "
> +                     "async copy is not supported on non-inorder mode "
>                       "(vid %d, qid: %d)\n", vid, queue_id);
>               return -1;
>       }
> @@ -1667,12 +1669,19 @@ int rte_vhost_async_channel_register(int vid,
> uint16_t queue_id,
>       vq->vec_pool = rte_malloc_socket(NULL,
>                       VHOST_MAX_ASYNC_VEC * sizeof(struct iovec),
>                       RTE_CACHE_LINE_SIZE, node);
> -     vq->async_descs_split = rte_malloc_socket(NULL,
> +     if (vq_is_packed(dev)) {
> +             vq->async_buffers_packed = rte_malloc_socket(NULL,
> +                     vq->size * sizeof(struct vring_used_elem_packed),
> +                     RTE_CACHE_LINE_SIZE, node);
> +     } else {
> +             vq->async_descs_split = rte_malloc_socket(NULL,
>                       vq->size * sizeof(struct vring_used_elem),
>                       RTE_CACHE_LINE_SIZE, node);
> -     if (!vq->async_descs_split || !vq->async_pkts_info ||
> -             !vq->it_pool || !vq->vec_pool) {
> -             vhost_free_async_mem(vq);
> +     }
> +
> +     if (!vq->async_buffers_packed || !vq->async_descs_split ||
async_buffers_packed and async_descs_split are two members of a union.
Like the way processed in vhost_free_async_mem(), do you think it's better
to check if they are NULL in if-else respectively?

> +             !vq->async_pkts_info || !vq->it_pool || !vq->vec_pool) {
> +             vhost_free_async_mem(dev, vq);
>               VHOST_LOG_CONFIG(ERR,
>                               "async register failed: cannot allocate
> memory for vq data "
>                               "(vid %d, qid: %d)\n", vid, queue_id);
> @@ -1728,7 +1737,7 @@ int rte_vhost_async_channel_unregister(int vid,
> uint16_t queue_id)
>               goto out;
>       }
> 
> -     vhost_free_async_mem(vq);
> +     vhost_free_async_mem(dev, vq);
> 
>       vq->async_ops.transfer_data = NULL;
>       vq->async_ops.check_completed_copies = NULL;
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index f628714c2..673335217 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -201,9 +201,18 @@ struct vhost_virtqueue {
>       uint16_t        async_pkts_idx;
>       uint16_t        async_pkts_inflight_n;
>       uint16_t        async_last_pkts_n;
> -     struct vring_used_elem  *async_descs_split;
> -     uint16_t async_desc_idx;
> -     uint16_t last_async_desc_idx;
> +     union {
> +             struct vring_used_elem  *async_descs_split;
> +             struct vring_used_elem_packed *async_buffers_packed;
> +     };
> +     union {
> +             uint16_t async_desc_idx;
> +             uint16_t async_packed_buffer_idx;
> +     };
> +     union {
> +             uint16_t last_async_desc_idx;
> +             uint16_t last_async_buffer_idx;
> +     };
> 
>       /* vq async features */
>       bool            async_inorder;
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 438bdafd1..54e11e3a5 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -363,14 +363,14 @@
> vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
>  }
> 
>  static __rte_always_inline void
> -vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> -                                struct vhost_virtqueue *vq,
> -                                uint32_t len[],
> -                                uint16_t id[],
> -                                uint16_t count[],
> +vhost_shadow_enqueue_packed(struct vhost_virtqueue *vq,
> +                                uint32_t *len,
> +                                uint16_t *id,
> +                                uint16_t *count,
>                                  uint16_t num_buffers)
>  {
>       uint16_t i;
> +
>       for (i = 0; i < num_buffers; i++) {
>               /* enqueue shadow flush action aligned with batch num */
>               if (!vq->shadow_used_idx)
> @@ -382,6 +382,17 @@ vhost_shadow_enqueue_single_packed(struct
> virtio_net *dev,
>               vq->shadow_aligned_idx += count[i];
>               vq->shadow_used_idx++;
>       }
> +}
> +
> +static __rte_always_inline void
> +vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
> +                                struct vhost_virtqueue *vq,
> +                                uint32_t *len,
> +                                uint16_t *id,
> +                                uint16_t *count,
> +                                uint16_t num_buffers)
> +{
> +     vhost_shadow_enqueue_packed(vq, len, id, count, num_buffers);
> 
>       if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
>               do_data_copy_enqueue(dev, vq);
> @@ -1474,6 +1485,23 @@ store_dma_desc_info_split(struct
> vring_used_elem *s_ring, struct vring_used_elem
>       }
>  }
> 
> +static __rte_always_inline void
> +store_dma_desc_info_packed(struct vring_used_elem_packed *s_ring,
> +             struct vring_used_elem_packed *d_ring,
> +             uint16_t ring_size, uint16_t s_idx, uint16_t d_idx, uint16_t
> count)
> +{
> +     uint16_t elem_size = sizeof(struct vring_used_elem_packed);
> +
> +     if (d_idx + count <= ring_size) {
> +             rte_memcpy(d_ring + d_idx, s_ring + s_idx, count *
> elem_size);
> +     } else {
> +             uint16_t size = ring_size - d_idx;
> +
> +             rte_memcpy(d_ring + d_idx, s_ring + s_idx, size * elem_size);
> +             rte_memcpy(d_ring, s_ring + s_idx + size, (count - size) *
> elem_size);
> +     }
> +}
> +
>  static __rte_noinline uint32_t
>  virtio_dev_rx_async_submit_split(struct virtio_net *dev,
>       struct vhost_virtqueue *vq, uint16_t queue_id,
> @@ -1641,6 +1669,330 @@ virtio_dev_rx_async_submit_split(struct
> virtio_net *dev,
>       return pkt_idx;
>  }
> 
> +static __rte_always_inline void
> +vhost_update_used_packed(struct vhost_virtqueue *vq,
> +                     struct vring_used_elem_packed *shadow_ring,
> +                     uint16_t count)
> +{
> +     int i;
> +     uint16_t used_idx = vq->last_used_idx;
> +     uint16_t head_idx = vq->last_used_idx;
> +     uint16_t head_flags = 0;
> +
> +     if (count == 0)
> +             return;
> +
> +     /* Split loop in two to save memory barriers */
> +     for (i = 0; i < count; i++) {
> +             vq->desc_packed[used_idx].id = shadow_ring[i].id;
> +             vq->desc_packed[used_idx].len = shadow_ring[i].len;
> +
> +             used_idx += shadow_ring[i].count;
> +             if (used_idx >= vq->size)
> +                     used_idx -= vq->size;
> +     }
> +
> +     /* The ordering for storing desc flags needs to be enforced. */
> +     rte_atomic_thread_fence(__ATOMIC_RELEASE);
> +
> +     for (i = 0; i < count; i++) {
> +             uint16_t flags;
> +
> +             if (vq->shadow_used_packed[i].len)
> +                     flags = VRING_DESC_F_WRITE;
> +             else
> +                     flags = 0;
> +
> +             if (vq->used_wrap_counter) {
> +                     flags |= VRING_DESC_F_USED;
> +                     flags |= VRING_DESC_F_AVAIL;
> +             } else {
> +                     flags &= ~VRING_DESC_F_USED;
> +                     flags &= ~VRING_DESC_F_AVAIL;
> +             }
> +
> +             if (i > 0) {
> +                     vq->desc_packed[vq->last_used_idx].flags = flags;
> +
No need a blank line above.

> +             } else {
> +                     head_idx = vq->last_used_idx;
> +                     head_flags = flags;
> +             }
> +
> +             vq_inc_last_used_packed(vq, shadow_ring[i].count);
> +     }
> +
> +     vq->desc_packed[head_idx].flags = head_flags;
> +}
> +
> +static __rte_always_inline int
> +vhost_enqueue_async_single_packed(struct virtio_net *dev,
> +                         struct vhost_virtqueue *vq,
> +                         struct rte_mbuf *pkt,
> +                         struct buf_vector *buf_vec,
> +                         uint16_t *nr_descs,
> +                         uint16_t *nr_buffers,
> +                         struct vring_packed_desc *async_descs,
> +                         struct iovec *src_iovec, struct iovec *dst_iovec,
> +                         struct rte_vhost_iov_iter *src_it,
> +                         struct rte_vhost_iov_iter *dst_it)
> +{
> +     uint16_t nr_vec = 0;
> +     uint16_t avail_idx = vq->last_avail_idx;
> +     uint16_t max_tries, tries = 0;
> +     uint16_t buf_id = 0;
> +     uint32_t len = 0;
> +     uint16_t desc_count = 0;
> +     uint32_t size = pkt->pkt_len + sizeof(struct
> virtio_net_hdr_mrg_rxbuf);
> +     uint32_t buffer_len[vq->size];
> +     uint16_t buffer_buf_id[vq->size];
> +     uint16_t buffer_desc_count[vq->size];
> +     *nr_buffers = 0;
> +
> +     if (rxvq_is_mergeable(dev))
> +             max_tries = vq->size - 1;
> +     else
> +             max_tries = 1;
> +
> +     while (size > 0) {
> +             /*
> +              * if we tried all available ring items, and still
> +              * can't get enough buf, it means something abnormal
> +              * happened.
> +              */
> +             if (unlikely(++tries > max_tries))
> +                     return -1;
> +
> +             if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx,
> &desc_count, buf_vec, &nr_vec,
> +                                             &buf_id, &len,
> VHOST_ACCESS_RW) < 0))
> +                     return -1;
> +
> +             len = RTE_MIN(len, size);
> +             size -= len;
> +
> +             buffer_len[*nr_buffers] = len;
> +             buffer_buf_id[*nr_buffers] = buf_id;
> +             buffer_desc_count[*nr_buffers] = desc_count;
> +             *nr_buffers += 1;
> +
> +             *nr_descs += desc_count;
> +             avail_idx += desc_count;
> +             if (avail_idx >= vq->size)
> +                     avail_idx -= vq->size;
> +     }
> +
> +     if (async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers,
> src_iovec, dst_iovec,
> +                     src_it, dst_it) < 0)
> +             return -1;
> +     /* store descriptors for DMA */
> +     if (avail_idx >= *nr_descs) {
> +             rte_memcpy(async_descs, &vq->desc_packed[vq-
> >last_avail_idx],
> +                     *nr_descs * sizeof(struct vring_packed_desc));
> +     } else {
> +             uint16_t nr_copy = vq->size - vq->last_avail_idx;
> +             rte_memcpy(async_descs, &vq->desc_packed[vq-
> >last_avail_idx],
> +                     nr_copy * sizeof(struct vring_packed_desc));
> +             rte_memcpy(async_descs + nr_copy, vq->desc_packed,
> +                     (*nr_descs - nr_copy) * sizeof(struct
> vring_packed_desc));
> +     }
> +
> +     vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id,
> buffer_desc_count, *nr_buffers);
> +
> +     return 0;
> +}
> +
> +static __rte_always_inline int16_t
> +virtio_dev_rx_async_single_packed(struct virtio_net *dev, struct
> vhost_virtqueue *vq,
> +                         struct rte_mbuf *pkt, uint16_t *nr_descs, uint16_t
> *nr_buffers,
> +                         struct vring_packed_desc *async_descs,
> +                         struct iovec *src_iovec, struct iovec *dst_iovec,
> +                         struct rte_vhost_iov_iter *src_it, struct
> rte_vhost_iov_iter *dst_it)
> +{
> +     struct buf_vector buf_vec[BUF_VECTOR_MAX];
> +     *nr_descs = 0;
> +     *nr_buffers = 0;
> +
> +     if (unlikely(vhost_enqueue_async_single_packed(dev, vq, pkt,
> buf_vec, nr_descs, nr_buffers,
> +                                              async_descs, src_iovec,
> dst_iovec,
> +                                              src_it, dst_it) < 0)) {
> +             VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc
> from vring\n", dev->vid);
> +             return -1;
> +     }
> +
> +     VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end
> index %d\n",
> +                     dev->vid, vq->last_avail_idx, vq->last_avail_idx +
> *nr_descs);
> +
> +     return 0;
> +}
> +
> +static __rte_always_inline void
> +dma_error_handler_packed(struct vhost_virtqueue *vq, struct
> vring_packed_desc *async_descs,
> +                     uint16_t async_descs_idx, uint16_t slot_idx, uint32_t
> nr_err,
> +                     uint32_t *pkt_idx, uint32_t *num_async_pkts,
> uint32_t *num_done_pkts)
> +{
> +     uint16_t descs_err = 0;
> +     uint16_t buffers_err = 0;
> +     struct async_inflight_info *pkts_info = vq->async_pkts_info;
> +
> +     *num_async_pkts -= nr_err;
> +     *pkt_idx -= nr_err;
> +     /* calculate the sum of buffers and descs of DMA-error packets. */
> +     while (nr_err-- > 0) {
> +             descs_err += pkts_info[slot_idx % vq->size].descs;
I notice there are several parts using "%" to wrap around index, but
existed code uses "& (vq->size - 1)" instead. I think it's better to keep
it consistent.

Thanks,
Jiayu

Reply via email to