On Thu, Feb 22, 2018 at 07:19:09PM +0100, Maxime Coquelin wrote:
This patch implements support for the new SET_VIRTIO_STATUS
vhost-user request.

The main use for this new request is for the backend to know
when the driver sets the DRIVER_OK status bit. Starting Virtio
1.0, we know that once the the bit is set, no more queues will
be initialized.
When it happens, this patch removes all queues starting from
the first uninitialized one, so that the port starts even if
the guest driver does not use all the queues provided by QEMU.
This is for example the case with Windows driver, which only
initializes as much queue pairs as vCPUs.

The second use for this request is when the status changes to
reset or failed state, the vhost port is stopped and virtqueues
cleaned and freed.

Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
lib/librte_vhost/vhost_user.c | 98 +++++++++++++++++++++++++++++++++++++++++++
lib/librte_vhost/vhost_user.h |  5 ++-
2 files changed, 102 insertions(+), 1 deletion(-)

diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index c256ebb06..7ab02c44b 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -67,6 +67,7 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
        [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
        [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
        [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
+       [VHOST_USER_SET_VIRTIO_STATUS]  = "VHOST_USER_SET_VIRTIO_STATUS",
};

static uint64_t
@@ -1244,6 +1245,100 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct 
VhostUserMsg *msg)
        return 0;
}

+static int
+vhost_user_set_virtio_status(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+       uint8_t old_status, new_status;
+       uint32_t i;
+
+       /* As per Virtio spec, the Virtio device status is 8 bits wide */
+       if (msg->payload.u64 != (uint8_t)msg->payload.u64) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                               "Invalid Virtio dev status value (%lx)\n",
+                               msg->payload.u64);
+               return -1;
+       }
+
+       new_status = msg->payload.u64;
+       old_status = dev->virtio_status;
+
+       if (new_status == old_status)
+               return 0;
+
+       RTE_LOG(DEBUG, VHOST_CONFIG,
+                       "New Virtio device status %x (was %x)\n",
+                       new_status, old_status);
+
+       dev->virtio_status = new_status;
+
+       if (new_status == 0 || new_status & VIRTIO_CONFIG_S_FAILED) {
+               /*
+                * The device moved to reset  or failed state,
+                * stop processing the virtqueues
+                */
+               if (dev->flags & VIRTIO_DEV_RUNNING) {
+                       dev->flags &= ~VIRTIO_DEV_RUNNING;
+                       dev->notify_ops->destroy_device(dev->vid);
+               }
+
+               while (dev->nr_vring > 0) {
+                       struct vhost_virtqueue *vq;
+
+                       vq = dev->virtqueue[--dev->nr_vring];
+                       if (!vq)
+                               continue;
+
+                       dev->virtqueue[dev->nr_vring] = NULL;
+                       cleanup_vq(dev, vq, 1);
+                       free_vq(vq);
+               }
+
+               return 0;
+       }
+
+       if ((dev->features & (1ULL << VIRTIO_F_VERSION_1)) &&
+                       (new_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+                       !virtio_is_ready(dev)) {
+               /*
+                * Since Virtio 1.0, we know that no more queues will be
+                * setup after guest sets DRIVER_OK. So let's remove
+                * uinitialized queues.
+                */
+               RTE_LOG(INFO, VHOST_CONFIG,
+                               "Driver is ready, but some queues aren't 
initialized\n");
+
+               /*
+                * Find the first uninitialized queue.
+                *
+                * Note: Ideally the backend implementation should
+                * support sparsed virtqueues, but as long as it is
+                * not the case, let's remove all queues after the
+                * first uninitialized one.
+                */
+               for (i = 0; i < dev->nr_vring; i++) {
+                       if (!vq_is_ready(dev->virtqueue[i]))
+                               break;
+               }
+
+               while (dev->nr_vring >= i) {
+                       struct vhost_virtqueue *vq;
+
+                       vq = dev->virtqueue[--dev->nr_vring];

If i is 0, we could access an array element out of bounds, no?

With this fixed,

Reviewed-by: Jens Freimann <jfreim...@redhat.com>
regards,
Jens

Reply via email to