On Thu, Aug 31, 2023 at 4:44 PM Maxime Coquelin <maxime.coque...@redhat.com> wrote: > > Guest notifications offloading, which has been introduced > in v23.07, aims at offloading syscalls out of the datapath. > > This patch optimizes the offloading by not offloading the > guest notification for a given virtqueue if one is already > being offloaded by the application. > > With a single VDUSE device, we can already see few > notifications being suppressed when doing throughput > testing with Iperf3. We can expect to see much more being > suppressed when the offloading thread is under pressure. > > Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com> > --- > lib/vhost/vhost.c | 4 ++++ > lib/vhost/vhost.h | 27 +++++++++++++++++++++------ > 2 files changed, 25 insertions(+), 6 deletions(-) > > diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c > index eb6309b681..7794f29c18 100644 > --- a/lib/vhost/vhost.c > +++ b/lib/vhost/vhost.c > @@ -48,6 +48,8 @@ static const struct vhost_vq_stats_name_off > vhost_vq_stat_strings[] = { > stats.guest_notifications_offloaded)}, > {"guest_notifications_error", offsetof(struct vhost_virtqueue, > stats.guest_notifications_error)}, > + {"guest_notifications_suppressed", offsetof(struct vhost_virtqueue, > + stats.guest_notifications_suppressed)}, > {"iotlb_hits", offsetof(struct vhost_virtqueue, > stats.iotlb_hits)}, > {"iotlb_misses", offsetof(struct vhost_virtqueue, > stats.iotlb_misses)}, > {"inflight_submitted", offsetof(struct vhost_virtqueue, > stats.inflight_submitted)}, > @@ -1516,6 +1518,8 @@ rte_vhost_notify_guest(int vid, uint16_t queue_id) > > rte_rwlock_read_lock(&vq->access_lock); > > + __atomic_store_n(&vq->irq_pending, false, __ATOMIC_RELEASE); > + > if (dev->backend_ops->inject_irq(dev, vq)) { > if (dev->flags & VIRTIO_DEV_STATS_ENABLED) > > __atomic_fetch_add(&vq->stats.guest_notifications_error, > diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h > index 9723429b1c..3e78379e48 100644 > --- a/lib/vhost/vhost.h > +++ b/lib/vhost/vhost.h > @@ -156,6 +156,7 @@ struct virtqueue_stats { > uint64_t iotlb_misses; > uint64_t inflight_submitted; > uint64_t inflight_completed; > + uint64_t guest_notifications_suppressed; > /* Counters below are atomic, and should be incremented as such. */ > uint64_t guest_notifications; > uint64_t guest_notifications_offloaded; > @@ -346,6 +347,8 @@ struct vhost_virtqueue { > > struct vhost_vring_addr ring_addrs; > struct virtqueue_stats stats; > + > + bool irq_pending; > } __rte_cache_aligned; > > /* Virtio device status as per Virtio specification */ > @@ -908,12 +911,24 @@ vhost_need_event(uint16_t event_idx, uint16_t new_idx, > uint16_t old) > static __rte_always_inline void > vhost_vring_inject_irq(struct virtio_net *dev, struct vhost_virtqueue *vq) > { > - if (dev->notify_ops->guest_notify && > - dev->notify_ops->guest_notify(dev->vid, vq->index)) { > - if (dev->flags & VIRTIO_DEV_STATS_ENABLED) > - > __atomic_fetch_add(&vq->stats.guest_notifications_offloaded, > - 1, __ATOMIC_RELAXED); > - return; > + bool expected = false; > + > + if (dev->notify_ops->guest_notify) { > + if (__atomic_compare_exchange_n(&vq->irq_pending, &expected, > true, 0, > + __ATOMIC_RELEASE, __ATOMIC_RELAXED)) { > + if (dev->notify_ops->guest_notify(dev->vid, > vq->index)) { > + if (dev->flags & VIRTIO_DEV_STATS_ENABLED) > + > __atomic_fetch_add(&vq->stats.guest_notifications_offloaded, > + 1, __ATOMIC_RELAXED); > + return; > + } > + > + /* Offloading failed, fallback to direct IRQ > injection */ > + __atomic_store_n(&vq->irq_pending, 0, > __ATOMIC_RELEASE);
Nit: s/0/false/ > + } else { > + vq->stats.guest_notifications_suppressed++; > + return; > + } > } > > if (dev->backend_ops->inject_irq(dev, vq)) { > -- > 2.41.0 > -- David Marchand