Guest notifications offloading, which has been introduced in v23.07, aims at offloading syscalls out of the datapath.
This patch optimizes the offloading by not offloading the guest notification for a given virtqueue if one is already being offloaded by the application. With a single VDUSE device, we can already see few notifications being suppressed when doing throughput testing with Iperf3. We can expect to see much more being suppressed when the offloading thread is under pressure. Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com> --- lib/vhost/vhost.c | 4 ++++ lib/vhost/vhost.h | 26 ++++++++++++++++++++------ 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c index eb6309b681..7794f29c18 100644 --- a/lib/vhost/vhost.c +++ b/lib/vhost/vhost.c @@ -48,6 +48,8 @@ static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = { stats.guest_notifications_offloaded)}, {"guest_notifications_error", offsetof(struct vhost_virtqueue, stats.guest_notifications_error)}, + {"guest_notifications_suppressed", offsetof(struct vhost_virtqueue, + stats.guest_notifications_suppressed)}, {"iotlb_hits", offsetof(struct vhost_virtqueue, stats.iotlb_hits)}, {"iotlb_misses", offsetof(struct vhost_virtqueue, stats.iotlb_misses)}, {"inflight_submitted", offsetof(struct vhost_virtqueue, stats.inflight_submitted)}, @@ -1516,6 +1518,8 @@ rte_vhost_notify_guest(int vid, uint16_t queue_id) rte_rwlock_read_lock(&vq->access_lock); + __atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE); + if (dev->backend_ops->inject_irq(dev, vq)) { if (dev->flags & VIRTIO_DEV_STATS_ENABLED) __atomic_fetch_add(&vq->stats.guest_notifications_error, diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h index 1febfcc150..75e3859e87 100644 --- a/lib/vhost/vhost.h +++ b/lib/vhost/vhost.h @@ -156,6 +156,7 @@ struct virtqueue_stats { uint64_t iotlb_misses; uint64_t inflight_submitted; uint64_t inflight_completed; + uint64_t guest_notifications_suppressed; /* Counters below are atomic, and should be incremented as such. */ uint64_t guest_notifications; uint64_t guest_notifications_offloaded; @@ -365,6 +366,7 @@ struct vhost_virtqueue { struct virtqueue_stats stats; struct vhost_reconnect_vring *log; + bool irq_pending; } __rte_cache_aligned; /* Virtio device status as per Virtio specification */ @@ -930,12 +932,24 @@ vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) static __rte_always_inline void vhost_vring_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq) { - if (dev->notify_ops->guest_notify && - dev->notify_ops->guest_notify(dev->vid, vq->index)) { - if (dev->flags & VIRTIO_DEV_STATS_ENABLED) - __atomic_fetch_add(&vq->stats.guest_notifications_offloaded, - 1, __ATOMIC_RELAXED); - return; + bool expected = false; + + if (dev->notify_ops->guest_notify) { + if (__atomic_compare_exchange_n(&vq->irq_pending, &expected, true, 0, + __ATOMIC_RELEASE, __ATOMIC_RELAXED)) { + if (dev->notify_ops->guest_notify(dev->vid, vq->index)) { + if (dev->flags & VIRTIO_DEV_STATS_ENABLED) + __atomic_fetch_add(&vq->stats.guest_notifications_offloaded, + 1, __ATOMIC_RELAXED); + return; + } + + /* Offloading failed, fallback to direct IRQ injection */ + __atomic_store_n(&vq->irq_pending, 0, __ATOMIC_RELEASE); + } else { + vq->stats.guest_notifications_suppressed++; + return; + } } if (dev->backend_ops->inject_irq(dev, vq)) { -- 2.41.0