This patch moves the prefetch after the available index is read to avoid prefetching a descriptor not available yet.
Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com> --- lib/librte_vhost/virtio_net.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 68b72e7a5..0a860ca72 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -794,7 +794,6 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; - rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); avail_head = *((volatile uint16_t *)&vq->avail->idx); /* @@ -803,6 +802,8 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, */ rte_smp_rmb(); + rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); + for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; uint16_t nr_vec = 0; @@ -1378,8 +1379,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); - free_entries = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; if (free_entries == 0) @@ -1391,6 +1390,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, */ rte_smp_rmb(); + rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); count = RTE_MIN(count, MAX_PKT_BURST); -- 2.17.2