It allows the Shadow Control VirtQueue to wait the device to use the commands that restore the net device state after a live migration.
Signed-off-by: Eugenio Pérez <epere...@redhat.com> --- hw/virtio/vhost-shadow-virtqueue.h | 1 + hw/virtio/vhost-shadow-virtqueue.c | 57 +++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h index 3c55fe2641..20ca59e9a7 100644 --- a/hw/virtio/vhost-shadow-virtqueue.h +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -124,6 +124,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp); int vhost_svq_inject(VhostShadowVirtqueue *svq, const struct iovec *iov, size_t out_num, size_t in_num); +ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq); void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index c535c99905..831ffb71e5 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -10,6 +10,8 @@ #include "qemu/osdep.h" #include "hw/virtio/vhost-shadow-virtqueue.h" +#include <glib/gpoll.h> + #include "qemu/error-report.h" #include "qapi/error.h" #include "qemu/main-loop.h" @@ -583,10 +585,11 @@ static bool vhost_svq_unmap_elem(VhostShadowVirtqueue *svq, SVQElement *svq_elem return true; } -static void vhost_svq_flush(VhostShadowVirtqueue *svq, - bool check_for_avail_queue) +static size_t vhost_svq_flush(VhostShadowVirtqueue *svq, + bool check_for_avail_queue) { VirtQueue *vq = svq->vq; + size_t ret = 0; /* Forward as many used buffers as possible. */ do { @@ -604,7 +607,7 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, if (svq->copy_descs) { bool ok = vhost_svq_unmap_elem(svq, svq_elem, len, true); if (unlikely(!ok)) { - return; + return ret; } } @@ -621,10 +624,12 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, i, svq->vring.num); virtqueue_fill(vq, elem, len, i); virtqueue_flush(vq, i); - return; + return ret + 1; } virtqueue_fill(vq, elem, len, i++); } + + ret++; } if (i > 0) { @@ -640,6 +645,50 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, vhost_handle_guest_kick(svq); } } while (!vhost_svq_enable_notification(svq)); + + return ret; +} + +/** + * Poll the SVQ for device used buffers. + * + * This function race with main event loop SVQ polling, so extra + * syncthronization is needed. + * + * Return the number of descriptors read from the device. + */ +ssize_t vhost_svq_poll(VhostShadowVirtqueue *svq) +{ + int fd = event_notifier_get_fd(&svq->hdev_call); + GPollFD poll_fd = { + .fd = fd, + .events = G_IO_IN, + }; + assert(fd >= 0); + int r = g_poll(&poll_fd, 1, -1); + + if (unlikely(r < 0)) { + error_report("Cannot poll device call fd "G_POLLFD_FORMAT": (%d) %s", + poll_fd.fd, errno, g_strerror(errno)); + return -errno; + } + + if (r == 0) { + return 0; + } + + if (unlikely(poll_fd.revents & ~(G_IO_IN))) { + error_report( + "Error polling device call fd "G_POLLFD_FORMAT": revents=%d", + poll_fd.fd, poll_fd.revents); + return -1; + } + + /* + * Max return value of vhost_svq_flush is (uint16_t)-1, so it's safe to + * convert to ssize_t. + */ + return vhost_svq_flush(svq, false); } /** -- 2.27.0