On 8/29/19 4:12 PM, JinYu wrote:
> This patch introduces three APIs to operate the inflight
> ring. Three APIs are set, set last and clear. It includes
> split and packed ring.
> 
> Signed-off-by: Lin Li <lili...@baidu.com>
> Signed-off-by: Xun Ni <ni...@baidu.com>
> Signed-off-by: Yu Zhang <zhangy...@baidu.com>
> Signed-off-by: Jin Yu <jin...@intel.com>
> ---
>  lib/librte_vhost/rte_vhost.h | 116 ++++++++++++++++
>  lib/librte_vhost/vhost.c     | 252 +++++++++++++++++++++++++++++++++++
>  2 files changed, 368 insertions(+)
> 
> diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
> index e090cdfee..d3b6eda21 100644
> --- a/lib/librte_vhost/rte_vhost.h
> +++ b/lib/librte_vhost/rte_vhost.h
> @@ -693,6 +693,122 @@ int rte_vhost_get_mem_table(int vid, struct 
> rte_vhost_memory **mem);
>  int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
>                             struct rte_vhost_vring *vring);
>  
> +/**
> + * Set split inflight descriptor.
> + *
> + * This function save descriptors that has been comsumed in available
> + * ring
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param idx
> + *  inflight entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
> +     uint16_t idx);
> +
> +/**
> + * Set packed inflight descriptor and get corresponding inflight entry
> + *
> + * This function save descriptors that has been comsumed
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param head
> + *  head of descriptors
> + * @param last
> + *  last of descriptors
> + * @param inflight_entry
> + *  corresponding inflight entry
> + * @return
> + *  0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
> +     uint16_t head, uint16_t last, uint16_t *inflight_entry);
> +
> +/**
> + * Save the head of list that the last batch of used descriptors.
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param idx
> + *  descriptor entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_last_inflight_io_split(int vid,
> +     uint16_t vring_idx, uint16_t idx);
> +
> +/**
> + * Update the inflight free_head, used_idx and used_wrap_counter.
> + *
> + * This function will update status first before updating descriptors
> + * to used
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param head
> + *  head of descriptors
> + * @return
> + *  0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_set_last_inflight_io_packed(int vid,
> +     uint16_t vring_idx, uint16_t head);
> +
> +/**
> + * Clear the split inflight status.
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param last_used_idx
> + *  last used idx of used ring
> + * @param idx
> + *  inflight entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
> +     uint16_t last_used_idx, uint16_t idx);
> +
> +/**
> + * Clear the packed inflight status.
> + *
> + * @param vid
> + *  vhost device ID
> + * @param vring_idx
> + *  vring index
> + * @param head
> + *  inflight entry index
> + * @return
> + *  0 on success, -1 on failure
> + */
> +__rte_experimental
> +int
> +rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
> +     uint16_t head);
> +
>  /**
>   * Notify the guest that used descriptors have been added to the vring.  This
>   * function acts as a memory barrier.
> diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
> index 660ac2a07..58940152f 100644
> --- a/lib/librte_vhost/vhost.c
> +++ b/lib/librte_vhost/vhost.c
> @@ -783,6 +783,258 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
>       return 0;
>  }
>  
> +int
> +rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
> +     uint16_t idx)
> +{
> +     struct virtio_net *dev;
> +     struct vhost_virtqueue *vq;
> +
> +     dev = get_device(vid);
> +     if (unlikely(!dev))
> +             return -1;
> +
> +     if (unlikely(!(dev->protocol_features &
> +             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> +             return 0;
> +
> +     if (unlikely(vq_is_packed(dev)))
> +             return -1;
> +
> +     if (unlikely(vring_idx >= VHOST_MAX_VRING))
> +             return -1;
> +
> +     vq = dev->virtqueue[vring_idx];
> +     if (unlikely(!vq))
> +             return -1;
> +
> +     if (unlikely(!vq->inflight_split))
> +             return -1;
> +
> +     vq->inflight_split->desc[idx].counter = vq->global_counter++;
> +     vq->inflight_split->desc[idx].inflight = 1;
> +     return 0;
> +}
> +
> +int
> +rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
> +     uint16_t head, uint16_t last, uint16_t *inflight_entry)
> +{
> +     struct virtio_net *dev;
> +     struct vhost_virtqueue *vq;
> +     struct inflight_info_packed *inflight_info;
> +     struct vring_packed_desc *desc;
> +     uint16_t old_free_head, free_head;
> +
> +     dev = get_device(vid);
> +     if (unlikely(!dev))
> +             return -1;
> +
> +     if (unlikely(!(dev->protocol_features &
> +             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> +             return 0;
> +
> +     if (unlikely(!vq_is_packed(dev)))
> +             return -1;
> +
> +     if (unlikely(vring_idx >= VHOST_MAX_VRING))
> +             return -1;
> +
> +     vq = dev->virtqueue[vring_idx];
> +     if (unlikely(!vq))
> +             return -1;
> +
> +     inflight_info = vq->inflight_packed;
> +     if (unlikely(!inflight_info))
> +             return -1;
> +
> +     desc = vq->desc_packed;
> +     old_free_head = inflight_info->old_free_head;
> +     free_head = old_free_head;
> +
> +     /* init header descriptor */
> +     inflight_info->desc[old_free_head].num = 0;
> +     inflight_info->desc[old_free_head].counter = vq->global_counter++;
> +     inflight_info->desc[old_free_head].inflight = 1;

I think head has to be validated so that it does not causes out of
bounds accesses.

> +     /* save desc entry in flight entry */
> +     while (head != ((last + 1) % vq->size)) {
> +             inflight_info->desc[old_free_head].num++;
> +             inflight_info->desc[free_head].addr = desc[head].addr;
> +             inflight_info->desc[free_head].len = desc[head].len;
> +             inflight_info->desc[free_head].flags = desc[head].flags;
> +             inflight_info->desc[free_head].id = desc[head].id;
> +
> +             inflight_info->desc[old_free_head].last = free_head;
> +             free_head = inflight_info->desc[free_head].next;
> +             inflight_info->free_head = free_head;
> +             head = (head + 1) % vq->size;
> +     }
> +
> +     inflight_info->old_free_head = free_head;
> +     *inflight_entry = old_free_head;
> +
> +     return 0;
> +}
> +
> +int
> +rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
> +     uint16_t last_used_idx, uint16_t idx)
> +{
> +     struct virtio_net *dev;
> +     struct vhost_virtqueue *vq;
> +
> +     dev = get_device(vid);
> +     if (unlikely(!dev))
> +             return -1;
> +
> +     if (unlikely(!(dev->protocol_features &
> +             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> +             return 0;
> +
> +     if (unlikely(vq_is_packed(dev)))
> +             return -1;
> +
> +     if (unlikely(vring_idx >= VHOST_MAX_VRING))
> +             return -1;
> +
> +     vq = dev->virtqueue[vring_idx];
> +     if (unlikely(!vq))
> +             return -1;
> +
> +     if (unlikely(!vq->inflight_split))
> +             return -1;
> +
> +     rte_compiler_barrier();
> +
> +     vq->inflight_split->desc[idx].inflight = 0;


Maybe it would be better to check idx value, so that it does not causes
out-of-bound accesses.

> +
> +     rte_compiler_barrier();
> +
> +     vq->inflight_split->used_idx = last_used_idx;
> +     return 0;
> +}
> +
> +int
> +rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
> +     uint16_t head)
> +{
> +     struct virtio_net *dev;
> +     struct vhost_virtqueue *vq;
> +     struct inflight_info_packed *inflight_info;
> +
> +     dev = get_device(vid);
> +     if (unlikely(!dev))
> +             return -1;
> +
> +     if (unlikely(!(dev->protocol_features &
> +             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> +             return 0;
> +
> +     if (unlikely(!vq_is_packed(dev)))
> +             return -1;
> +
> +     if (unlikely(vring_idx >= VHOST_MAX_VRING))
> +             return -1;
> +
> +     vq = dev->virtqueue[vring_idx];
> +     if (unlikely(!vq))
> +             return -1;
> +
> +     inflight_info = vq->inflight_packed;
> +     if (unlikely(!inflight_info))
> +             return -1;
> +
> +     rte_compiler_barrier();
> +
> +     inflight_info->desc[head].inflight = 0;


Maybe it would be better to check head value, so that it does not causes
out-of-bound accesses.


> +     rte_compiler_barrier();
> +
> +     inflight_info->old_free_head = inflight_info->free_head;
> +     inflight_info->old_used_idx = inflight_info->used_idx;
> +     inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter;
> +
> +     return 0;
> +}
> +
> +int
> +rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
> +     uint16_t idx)
> +{
> +     struct virtio_net *dev;
> +     struct vhost_virtqueue *vq;
> +
> +     dev = get_device(vid);
> +     if (unlikely(!dev))
> +             return -1;
> +
> +     if (unlikely(!(dev->protocol_features &
> +             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> +             return 0;
> +
> +     if (unlikely(vq_is_packed(dev)))
> +             return -1;
> +
> +     if (unlikely(vring_idx >= VHOST_MAX_VRING))
> +             return -1;
> +
> +     vq = dev->virtqueue[vring_idx];
> +     if (unlikely(!vq))
> +             return -1;
> +
> +     if (unlikely(!vq->inflight_split))
> +             return -1;
> +
> +     vq->inflight_split->last_inflight_io = idx;
> +     return 0;
> +}
> +
> +int
> +rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
> +     uint16_t head)
> +{
> +     struct virtio_net *dev;
> +     struct vhost_virtqueue *vq;
> +     struct inflight_info_packed *inflight_info;
> +     uint16_t last;
> +
> +     dev = get_device(vid);
> +     if (unlikely(!dev))
> +             return -1;
> +
> +     if (unlikely(!(dev->protocol_features &
> +             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
> +             return 0;
> +
> +     if (unlikely(!vq_is_packed(dev)))
> +             return -1;
> +
> +     if (unlikely(vring_idx >= VHOST_MAX_VRING))
> +             return -1;
> +
> +     vq = dev->virtqueue[vring_idx];
> +     if (unlikely(!vq))
> +             return -1;
> +
> +     inflight_info = vq->inflight_packed;
> +     if (unlikely(!inflight_info))
> +             return -1;
> +
> +     last = inflight_info->desc[head].last;

Ditto
> +     inflight_info->desc[last].next = inflight_info->free_head;
> +     inflight_info->free_head = head;
> +     inflight_info->used_idx += inflight_info->desc[head].num;
> +     if (inflight_info->used_idx >= inflight_info->desc_num) {
> +             inflight_info->used_idx -= inflight_info->desc_num;
> +             inflight_info->used_wrap_counter =
> +                     !inflight_info->used_wrap_counter;
> +     }
> +
> +     return 0;
> +}
> +
>  int
>  rte_vhost_vring_call(int vid, uint16_t vring_idx)
>  {
> 

Reply via email to