On 5/28/21 10:11 AM, Jiayu Hu wrote:
> This patch is to add thread unsafe version for async register and
> unregister functions.
> 
> Signed-off-by: Jiayu Hu <jiayu...@intel.com>
> ---
>  doc/guides/prog_guide/vhost_lib.rst |  12 +++
>  lib/vhost/rte_vhost_async.h         |  42 ++++++++++
>  lib/vhost/version.map               |   4 +
>  lib/vhost/vhost.c                   | 161 
> +++++++++++++++++++++++++++---------
>  4 files changed, 178 insertions(+), 41 deletions(-)
> 
> diff --git a/doc/guides/prog_guide/vhost_lib.rst 
> b/doc/guides/prog_guide/vhost_lib.rst
> index d18fb98..6b2745f 100644
> --- a/doc/guides/prog_guide/vhost_lib.rst
> +++ b/doc/guides/prog_guide/vhost_lib.rst
> @@ -253,6 +253,12 @@ The following is an overview of some key Vhost API 
> functions:
>      vhost invokes this function to get the copy data completed by async
>      devices.
>  
> +* ``rte_vhost_async_channel_register_thread_unsafe(vid, queue_id, features, 
> ops)``
> +  Register a vhost queue with async copy device channel without
> +  performing any locking.
> +
> +  This function is only safe to call from within vhost callback functions.
> +
>  * ``rte_vhost_async_channel_unregister(vid, queue_id)``
>  
>    Unregister the async copy device channel from a vhost queue.
> @@ -265,6 +271,12 @@ The following is an overview of some key Vhost API 
> functions:
>    devices for all vhost queues in destroy_device(), when a
>    virtio device is paused or shut down.
>  
> +* ``rte_vhost_async_channel_unregister_thread_unsafe(vid, queue_id)``
> +  Unregister the async copy device channel from a vhost queue without
> +  performing any locking.
> +
> +  This function is only safe to call from within vhost callback functions.
> +
>  * ``rte_vhost_submit_enqueue_burst(vid, queue_id, pkts, count, comp_pkts, 
> comp_count)``
>  
>    Submit an enqueue request to transmit ``count`` packets from host to guest
> diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
> index 6faa31f..d054b5f 100644
> --- a/lib/vhost/rte_vhost_async.h
> +++ b/lib/vhost/rte_vhost_async.h
> @@ -143,6 +143,48 @@ __rte_experimental
>  int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id);
>  
>  /**
> + * register an async channel for vhost without performing any locking
> + *
> + * @note This function does not perform any locking, and is only safe to call
> + *       from within vhost callback functions.
> + *
> + * @param vid
> + *  vhost device id async channel to be attached to
> + * @param queue_id
> + *  vhost queue id async channel to be attached to
> + * @param features
> + *  DMA channel feature bit
> + *    b0       : DMA supports inorder data transfer
> + *    b1  - b15: reserved
> + *    b16 - b27: Packet length threshold for DMA transfer
> + *    b28 - b31: reserved
> + * @param ops
> + *  DMA operation callbacks
> + * @return
> + *  0 on success, -1 on failures
> + */
> +__rte_experimental
> +int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t 
> queue_id,
> +     uint32_t features, struct rte_vhost_async_channel_ops *ops);
> +
> +/**
> + * unregister a dma channel for vhost without performing any lock
> + *
> + * @note This function does not perform any locking, and is only safe to call
> + *       from within vhost callback functions.
> + *
> + * @param vid
> + *  vhost device id DMA channel to be detached
> + * @param queue_id
> + *  vhost queue id DMA channel to be detached
> + * @return
> + *  0 on success, -1 on failures
> + */
> +__rte_experimental
> +int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
> +             uint16_t queue_id);
> +
> +/**
>   * This function submits enqueue data to async engine. Successfully
>   * enqueued packets can be transfer completed or being occupied by DMA
>   * engines, when this API returns. Transfer completed packets are returned
> diff --git a/lib/vhost/version.map b/lib/vhost/version.map
> index 9103a23..2363db8 100644
> --- a/lib/vhost/version.map
> +++ b/lib/vhost/version.map
> @@ -79,4 +79,8 @@ EXPERIMENTAL {
>  
>       # added in 21.05
>       rte_vhost_get_negotiated_protocol_features;
> +
> +     # added in 21.08
> +     rte_vhost_async_channel_register_thread_unsafe;
> +     rte_vhost_async_channel_unregister_thread_unsafe;
>  };
> diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
> index c96f633..025e150 100644
> --- a/lib/vhost/vhost.c
> +++ b/lib/vhost/vhost.c
> @@ -1609,46 +1609,20 @@ int rte_vhost_extern_callback_register(int vid,
>       return 0;
>  }
>  
> -int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
> -                                     uint32_t features,
> -                                     struct rte_vhost_async_channel_ops *ops)
> +static __rte_always_inline int
> +async_channel_register(int vid, uint16_t queue_id,
> +             struct rte_vhost_async_features f,
> +             struct rte_vhost_async_channel_ops *ops)
>  {
> -     struct vhost_virtqueue *vq;
>       struct virtio_net *dev = get_device(vid);
> -     struct rte_vhost_async_features f;
> +     struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
>       int node;
>  
> -     if (dev == NULL || ops == NULL)
> -             return -1;
> -
> -     f.intval = features;
> -
> -     if (queue_id >= VHOST_MAX_VRING)
> -             return -1;
> -
> -     vq = dev->virtqueue[queue_id];
> -
> -     if (unlikely(vq == NULL || !dev->async_copy))
> -             return -1;
> -
> -     if (unlikely(!f.async_inorder)) {
> -             VHOST_LOG_CONFIG(ERR,
> -                     "async copy is not supported on non-inorder mode "
> -                     "(vid %d, qid: %d)\n", vid, queue_id);
> -             return -1;
> -     }
> -
> -     if (unlikely(ops->check_completed_copies == NULL ||
> -             ops->transfer_data == NULL))
> -             return -1;
> -
> -     rte_spinlock_lock(&vq->access_lock);
> -
>       if (unlikely(vq->async_registered)) {
>               VHOST_LOG_CONFIG(ERR,
>                       "async register failed: channel already registered "
>                       "(vid %d, qid: %d)\n", vid, queue_id);
> -             goto reg_out;
> +             return -1;
>       }
>  
>  #ifdef RTE_LIBRTE_VHOST_NUMA
> @@ -1670,7 +1644,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t 
> queue_id,
>               VHOST_LOG_CONFIG(ERR,
>                       "async register failed: cannot allocate memory for 
> async_pkts_info "
>                       "(vid %d, qid: %d)\n", vid, queue_id);
> -             goto reg_out;
> +             return -1;
>       }
>  
>       vq->it_pool = rte_malloc_socket(NULL,
> @@ -1681,7 +1655,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t 
> queue_id,
>               VHOST_LOG_CONFIG(ERR,
>                       "async register failed: cannot allocate memory for 
> it_pool "
>                       "(vid %d, qid: %d)\n", vid, queue_id);
> -             goto reg_out;
> +             return -1;
>       }
>  
>       vq->vec_pool = rte_malloc_socket(NULL,
> @@ -1692,7 +1666,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t 
> queue_id,
>               VHOST_LOG_CONFIG(ERR,
>                       "async register failed: cannot allocate memory for 
> vec_pool "
>                       "(vid %d, qid: %d)\n", vid, queue_id);
> -             goto reg_out;
> +             return -1;
>       }
>  
>       if (vq_is_packed(dev)) {
> @@ -1704,7 +1678,7 @@ int rte_vhost_async_channel_register(int vid, uint16_t 
> queue_id,
>                       VHOST_LOG_CONFIG(ERR,
>                               "async register failed: cannot allocate memory 
> for async buffers "
>                               "(vid %d, qid: %d)\n", vid, queue_id);
> -                     goto reg_out;
> +                     return -1;
>               }
>       } else {
>               vq->async_descs_split = rte_malloc_socket(NULL,
> @@ -1715,22 +1689,92 @@ int rte_vhost_async_channel_register(int vid, 
> uint16_t queue_id,
>                       VHOST_LOG_CONFIG(ERR,
>                               "async register failed: cannot allocate memory 
> for async descs "
>                               "(vid %d, qid: %d)\n", vid, queue_id);
> -                     goto reg_out;
> +                     return -1;
>               }
>       }
>  
>       vq->async_ops.check_completed_copies = ops->check_completed_copies;
>       vq->async_ops.transfer_data = ops->transfer_data;
> -
>       vq->async_inorder = f.async_inorder;
>       vq->async_threshold = f.async_threshold;
> -
>       vq->async_registered = true;
>  
> -reg_out:
> +     return 0;
> +}
> +
> +int rte_vhost_async_channel_register(int vid, uint16_t queue_id,
> +                                     uint32_t features,
> +                                     struct rte_vhost_async_channel_ops *ops)
> +{
> +     struct vhost_virtqueue *vq;
> +     struct virtio_net *dev = get_device(vid);
> +     struct rte_vhost_async_features f;
> +     int ret;
> +
> +     if (dev == NULL || ops == NULL)
> +             return -1;
> +
> +     f.intval = features;

Not directly related to this patch set, but could you please rework
struct rte_vhost_async_features? There is no point to pack the flags and
threshold values.

Also, the prototype should just pass the struct directly, or add
different fields for the threshold and the features.


> +
> +     if (queue_id >= VHOST_MAX_VRING)
> +             return -1;
> +
> +     vq = dev->virtqueue[queue_id];
> +
> +     if (unlikely(vq == NULL || !dev->async_copy))
> +             return -1;
> +
> +     if (unlikely(!f.async_inorder)) {
> +             VHOST_LOG_CONFIG(ERR,
> +                     "async copy is not supported on non-inorder mode "
> +                     "(vid %d, qid: %d)\n", vid, queue_id);
> +             return -1;
> +     }
> +
> +     if (unlikely(ops->check_completed_copies == NULL ||
> +             ops->transfer_data == NULL))
> +             return -1;
> +
> +     rte_spinlock_lock(&vq->access_lock);
> +     ret = async_channel_register(vid, queue_id, f, ops);
>       rte_spinlock_unlock(&vq->access_lock);
>  
> -     return 0;
> +     return ret;
> +}
> +
> +int rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t 
> queue_id,
> +                                     uint32_t features,
> +                                     struct rte_vhost_async_channel_ops *ops)
> +{
> +     struct vhost_virtqueue *vq;
> +     struct virtio_net *dev = get_device(vid);
> +     struct rte_vhost_async_features f;
> +
> +     if (dev == NULL || ops == NULL)
> +             return -1;
> +
> +     f.intval = features;
> +
> +     if (queue_id >= VHOST_MAX_VRING)
> +             return -1;
> +
> +     vq = dev->virtqueue[queue_id];
> +
> +     if (unlikely(vq == NULL || !dev->async_copy))
> +             return -1;
> +
> +     if (unlikely(!f.async_inorder)) {
> +             VHOST_LOG_CONFIG(ERR,
> +                     "async copy is not supported on non-inorder mode "
> +                     "(vid %d, qid: %d)\n", vid, queue_id);
> +             return -1;
> +     }
> +
> +     if (unlikely(ops->check_completed_copies == NULL ||
> +             ops->transfer_data == NULL))
> +             return -1;
> +
> +     return async_channel_register(vid, queue_id, f, ops);
>  }
>  
>  int rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
> @@ -1780,5 +1824,40 @@ int rte_vhost_async_channel_unregister(int vid, 
> uint16_t queue_id)
>       return ret;
>  }
>  
> +int rte_vhost_async_channel_unregister_thread_unsafe(int vid,
> +             uint16_t queue_id)
> +{
> +     struct vhost_virtqueue *vq;
> +     struct virtio_net *dev = get_device(vid);
> +
> +     if (dev == NULL)
> +             return -1;
> +
> +     if (queue_id >= VHOST_MAX_VRING)
> +             return -1;
> +
> +     vq = dev->virtqueue[queue_id];
> +
> +     if (vq == NULL)
> +             return -1;
> +
> +     if (!vq->async_registered)
> +             return 0;
> +
> +     if (vq->async_pkts_inflight_n) {
> +             VHOST_LOG_CONFIG(ERR, "Failed to unregister async channel. "
> +                     "async inflight packets must be completed before 
> unregistration.\n");
> +             return -1;
> +     }
> +
> +     vhost_free_async_mem(vq);
> +
> +     vq->async_ops.transfer_data = NULL;
> +     vq->async_ops.check_completed_copies = NULL;
> +     vq->async_registered = false;
> +
> +     return 0;
> +}
> +
>  RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
>  RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);
> 

Reply via email to