In case VIRTIO_F_ORDER_PLATFORM(36) is not negotiated, then the frontend and backend are assumed to be implemented in software, that is they can run on identical CPUs in an SMP configuration. Thus a weak form of memory barriers like rte_smp_r/wmb, other than rte_cio_r/wmb, is sufficient for this case(vq->hw->weak_barriers == 1) and yields better performance. For the above case, this patch helps yielding even better performance by replacing the two-way barriers with C11 one-way barriers for avail index in split ring.
Signed-off-by: Joyce Kong <joyce.k...@arm.com> Reviewed-by: Gavin Hu <gavin...@arm.com> --- drivers/net/virtio/virtqueue.h | 20 ++++++++++++++++++-- lib/librte_vhost/virtio_net.c | 14 +++++--------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index f1815d3f4..131ea71e7 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -500,8 +500,24 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, static inline void vq_update_avail_idx(struct virtqueue *vq) { - virtio_wmb(vq->hw->weak_barriers); - vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + if (vq->hw->weak_barriers) { + /* x86 prefers to using rte_smp_wmb over __atomic_store_n as + * it reports a slightly better perf, which comes from the + * saved branch by the compiler. + * The if and else branches are identical with the smp and + * cio barriers both defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; +#else + __atomic_store_n(&vq->vq_split.ring.avail->idx, + vq->vq_avail_idx, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + } } static inline void diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 7f6e7f2c1..4c5380bc1 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -991,13 +991,11 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; - avail_head = *((volatile uint16_t *)&vq->avail->idx); - /* * The ordering between avail index and * desc reads needs to be enforced. */ - rte_smp_rmb(); + avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE); rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -1712,16 +1710,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - free_entries = *((volatile uint16_t *)&vq->avail->idx) - - vq->last_avail_idx; - if (free_entries == 0) - return 0; - /* * The ordering between avail index and * desc reads needs to be enforced. */ - rte_smp_rmb(); + free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) - + vq->last_avail_idx; + if (free_entries == 0) + return 0; rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); -- 2.17.1