> -----Original Message-----
> From: Victor Kaplansky [mailto:vkapl...@redhat.com]
> Sent: Wednesday, December 6, 2017 9:56 PM
> To: dev@dpdk.org; y...@fridaylinux.org; Bie, Tiwei; Tan, Jianfeng;
> vkapl...@redhat.com
> Cc: sta...@dpdk.org; jfrei...@redhat.com; Maxime Coquelin
> Subject: [PATCH] vhost_user: protect active rings from async ring changes
> 
> When performing live migration or memory hot-plugging,
> the changes to the device and vrings made by message handler
> done independently from vring usage by PMD threads.
> 
> This causes for example segfauls during live-migration

segfauls ->segfaults?

> with MQ enable, but in general virtually any request
> sent by qemu changing the state of device can cause
> problems.
> 
> These patches fixes all above issues by adding a spinlock
> to every vring and requiring message handler to start operation
> only after ensuring that all PMD threads related to the divece

Another typo: divece.

> are out of critical section accessing the vring data.
> 
> Each vring has its own lock in order to not create contention
> between PMD threads of different vrings and to prevent
> performance degradation by scaling queue pair number.

Also wonder how much overhead it brings.

Instead of locking each vring, can we just, waiting a while (10us for example) 
after call destroy_device() callback so that every PMD thread has enough time 
to skip out the criterial area?

> ---
>  lib/librte_vhost/vhost.h      |  1 +
>  lib/librte_vhost/vhost_user.c | 44
> +++++++++++++++++++++++++++++++++++++++++++
>  lib/librte_vhost/virtio_net.c |  8 ++++++++
>  3 files changed, 53 insertions(+)
> 
> diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
> index 1cc81c17..812aeccd 100644
> --- a/lib/librte_vhost/vhost.h
> +++ b/lib/librte_vhost/vhost.h
> @@ -137,6 +137,7 @@ struct vhost_virtqueue {
>       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
>       int                             iotlb_cache_nr;
>       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
> +        rte_spinlock_t active_lock;
>  } __rte_cache_aligned;
> 
>  /* Old kernels have no such macros defined */
> diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
> index f4c7ce46..02a3a7d3 100644
> --- a/lib/librte_vhost/vhost_user.c
> +++ b/lib/librte_vhost/vhost_user.c
> @@ -1190,6 +1190,46 @@ vhost_user_check_and_alloc_queue_pair(struct
> virtio_net *dev, VhostUserMsg *msg)
>       return alloc_vring_queue(dev, vring_idx);
>  }
> 
> +static void
> +vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
> +{
> +     unsigned int i = 0;
> +     unsigned int vq_num = 0;
> +
> +     while (vq_num < dev->nr_vring) {
> +             struct vhost_virtqueue *vq = dev->virtqueue[i];
> +
> +             if (!vq) {
> +                     i++;
> +                     continue;
> +             }
> +
> +             rte_spinlock_lock(&vq->active_lock);
> +             vq_num++;
> +             i++;
> +     }
> +}
> +
> +static void
> +vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
> +{
> +     unsigned int i = 0;
> +     unsigned int vq_num = 0;
> +
> +     while (vq_num < dev->nr_vring) {
> +             struct vhost_virtqueue *vq = dev->virtqueue[i];
> +
> +             if (!vq) {
> +                     i++;
> +                     continue;
> +             }
> +
> +             rte_spinlock_unlock(&vq->active_lock);
> +             vq_num++;
> +             i++;
> +     }
> +}
> +
>  int
>  vhost_user_msg_handler(int vid, int fd)
>  {
> @@ -1241,6 +1281,8 @@ vhost_user_msg_handler(int vid, int fd)
>               return -1;
>       }
> 
> +     vhost_user_lock_all_queue_pairs(dev);
> +
>       switch (msg.request.master) {
>       case VHOST_USER_GET_FEATURES:
>               msg.payload.u64 = vhost_user_get_features(dev);
> @@ -1342,6 +1384,8 @@ vhost_user_msg_handler(int vid, int fd)
> 
>       }
> 
> +     vhost_user_unlock_all_queue_pairs(dev);
> +
>       if (msg.flags & VHOST_USER_NEED_REPLY) {
>               msg.payload.u64 = !!ret;
>               msg.size = sizeof(msg.payload.u64);
> diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
> index 6fee16e5..de7e38bb 100644
> --- a/lib/librte_vhost/virtio_net.c
> +++ b/lib/librte_vhost/virtio_net.c
> @@ -44,6 +44,7 @@
>  #include <rte_udp.h>
>  #include <rte_sctp.h>
>  #include <rte_arp.h>
> +#include <rte_spinlock.h>
> 
>  #include "iotlb.h"
>  #include "vhost.h"
> @@ -1180,9 +1181,12 @@ rte_vhost_dequeue_burst(int vid, uint16_t
> queue_id,
>       }
> 
>       vq = dev->virtqueue[queue_id];
> +

Remove above blank line.

>       if (unlikely(vq->enabled == 0))
>               return 0;
> 
> +     rte_spinlock_lock(&vq->active_lock);
> +
>       vq->batch_copy_nb_elems = 0;
> 
>       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
> @@ -1240,6 +1244,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t
> queue_id,
>               if (rarp_mbuf == NULL) {
>                       RTE_LOG(ERR, VHOST_DATA,
>                               "Failed to allocate memory for mbuf.\n");
> +                     rte_spinlock_unlock(&vq->active_lock);
>                       return 0;
>               }
> 
> @@ -1356,6 +1361,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t
> queue_id,
>       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
>               vhost_user_iotlb_rd_unlock(vq);
> 
> +        rte_spinlock_unlock(&vq->active_lock);
> +
>       if (unlikely(rarp_mbuf != NULL)) {
>               /*
>                * Inject it to the head of "pkts" array, so that switch's mac
> @@ -1366,5 +1373,6 @@ rte_vhost_dequeue_burst(int vid, uint16_t
> queue_id,
>               i += 1;
>       }
> 
> +

Remove above blank line.

>       return i;
>  }
> --
> Victor

Reply via email to