From: Didier Pallard <didier.pall...@6wind.com> Since guest_mask_notifier can not be used in vhost-user mode due to buffering implied by unix control socket, force use_mask_notifier on virtio devices of vhost-user interfaces, and send correct callfd to the guest at vhost start.
Using guest_notifier_mask function in vhost-user case may break interrupt mask paradigm, because mask/unmask is not really done when returning from guest_notifier_mask call, instead message is posted in a unix socket, and processed later. Add an option boolean flag 'use_mask_notifier' to disable the use of guest_notifier_mask in virtio pci. v3 changes: In respond to Michael S. Tsirkin comments: - vhost_net.c: removed dependency on virtio-pci.h - vhost.c: simplified the check for vhost-user backend, replaced by checking use_mask_notifier; added comment explaining why vring for vhost-user initialized in unmasked state; - cosmetic fixes. v2 changes: - a new boolean field is added to all virtio devices instead of defining a property in some virtio-pci devices. Signed-off-by: Victor Kaplansky <vict...@redhat.com> --- include/hw/virtio/virtio.h | 1 + hw/net/vhost_net.c | 16 ++++++++++++++-- hw/virtio/vhost.c | 13 +++++++++++++ hw/virtio/virtio-pci.c | 14 ++++++++------ hw/virtio/virtio.c | 1 + 5 files changed, 37 insertions(+), 8 deletions(-) diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 108cdb0f..3acbf999 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -90,6 +90,7 @@ struct VirtIODevice VMChangeStateEntry *vmstate; char *bus_name; uint8_t device_endian; + bool use_mask_notifier; QLIST_HEAD(, VirtQueue) *vector_queues; }; diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 3940a04b..8c1ccadb 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -306,13 +306,25 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, } for (j = 0; j < total_queues; j++) { + struct vhost_net *net; + r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true); if (r < 0) { goto err_endian; } - vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2); - } + net = get_vhost_net(ncs[j].peer); + vhost_net_set_vq_index(net, j * 2); + + /* Force use_mask_notifier reset in vhost user case + * Must be done before set_guest_notifier call + */ + if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) { + /* Force virtual device not use mask notifier */ + dev->use_mask_notifier = false; + } + } + r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true); if (r < 0) { error_report("Error binding guest notifier: %d", -r); diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 7dff7554..f8031fae 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, /* Clear and discard previous events if any. */ event_notifier_test_and_clear(&vq->masked_notifier); + /* For vhost user we set vring in unmasked state, since by the + * default it masked, and guest_notifier_mask is not used anymore. + */ + if (vdev->use_mask_notifier == false) { + file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq)); + r = dev->vhost_ops->vhost_set_vring_call(dev, &file); + if (r) { + r = -errno; + goto fail_call; + } + } + return 0; +fail_call: fail_kick: fail_alloc: cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx), diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 5494ff4a..64d263ae 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) /* If guest supports masking, set up irqfd now. * Otherwise, delay until unmasked in the frontend. */ - if (k->guest_notifier_mask) { + if (vdev->use_mask_notifier && k->guest_notifier_mask) { ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); if (ret < 0) { kvm_virtio_pci_vq_vector_release(proxy, vector); @@ -822,7 +822,7 @@ undo: if (vector >= msix_nr_vectors_allocated(dev)) { continue; } - if (k->guest_notifier_mask) { + if (vdev->use_mask_notifier && k->guest_notifier_mask) { kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); } kvm_virtio_pci_vq_vector_release(proxy, vector); @@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) /* If guest supports masking, clean up irqfd now. * Otherwise, it was cleaned when masked in the frontend. */ - if (k->guest_notifier_mask) { + if (vdev->use_mask_notifier && k->guest_notifier_mask) { kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); } kvm_virtio_pci_vq_vector_release(proxy, vector); @@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, /* If guest supports masking, irqfd is already setup, unmask it. * Otherwise, set it up now. */ - if (k->guest_notifier_mask) { + if (vdev->use_mask_notifier && k->guest_notifier_mask) { k->guest_notifier_mask(vdev, queue_no, false); /* Test after unmasking to avoid losing events. */ if (k->guest_notifier_pending && @@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, /* If guest supports masking, keep irqfd but mask it. * Otherwise, clean it up now. */ - if (k->guest_notifier_mask) { + if (vdev->use_mask_notifier && k->guest_notifier_mask) { k->guest_notifier_mask(vdev, queue_no, true); } else { kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); @@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, event_notifier_cleanup(notifier); } - if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) { + if (!msix_enabled(&proxy->pci_dev) && + vdev->use_mask_notifier && + vdc->guest_notifier_mask) { vdc->guest_notifier_mask(vdev, n, !assign); } diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 90f25451..c0238b39 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -792,6 +792,7 @@ void virtio_reset(void *opaque) vdev->queue_sel = 0; vdev->status = 0; vdev->isr = 0; + vdev->use_mask_notifier = true; vdev->config_vector = VIRTIO_NO_VECTOR; virtio_notify_vector(vdev, vdev->config_vector); -- Victor