Now that we have a single function to map the descriptors buffers, let's prefetch them there as it is the earliest place we can do it.
Signed-off-by: Maxime Coquelin <maxime.coque...@redhat.com> Reviewed-by: Tiwei Bie <tiwei....@intel.com> --- lib/librte_vhost/virtio_net.c | 32 ++------------------------------ 1 file changed, 2 insertions(+), 30 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 4564e9bcc9..8f0e784f77 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -286,6 +286,8 @@ map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(!desc_addr)) return -1; + rte_prefetch0((void *)(uintptr_t)desc_addr); + buf_vec[vec_id].buf_iova = desc_iova; buf_vec[vec_id].buf_addr = desc_addr; buf_vec[vec_id].buf_len = desc_chunck_len; @@ -666,9 +668,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, buf_iova = buf_vec[vec_idx].buf_iova; buf_len = buf_vec[vec_idx].buf_len; - if (nr_vec > 1) - rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr); - if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) { error = -1; goto out; @@ -711,10 +710,6 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, buf_iova = buf_vec[vec_idx].buf_iova; buf_len = buf_vec[vec_idx].buf_len; - /* Prefetch next buffer address. */ - if (vec_idx + 1 < nr_vec) - rte_prefetch0((void *)(uintptr_t) - buf_vec[vec_idx + 1].buf_addr); buf_offset = 0; buf_avail = buf_len; } @@ -812,8 +807,6 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, break; } - rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); @@ -861,8 +854,6 @@ virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, break; } - rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); @@ -1118,9 +1109,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, goto out; } - if (likely(nr_vec > 1)) - rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr); - if (virtio_net_with_host_offload(dev)) { if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) { /* @@ -1131,7 +1119,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, hdr = &tmp_hdr; } else { hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr); - rte_prefetch0(hdr); } } @@ -1161,9 +1148,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen; } - rte_prefetch0((void *)(uintptr_t) - (buf_addr + buf_offset)); - PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset), (uint32_t)buf_avail, 0); @@ -1229,14 +1213,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, buf_iova = buf_vec[vec_idx].buf_iova; buf_len = buf_vec[vec_idx].buf_len; - /* - * Prefecth desc n + 1 buffer while - * desc n buffer is processed. - */ - if (vec_idx + 1 < nr_vec) - rte_prefetch0((void *)(uintptr_t) - buf_vec[vec_idx + 1].buf_addr); - buf_offset = 0; buf_avail = buf_len; @@ -1380,8 +1356,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (likely(dev->dequeue_zero_copy == 0)) update_shadow_used_ring_split(vq, head_idx, 0); - rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); - pkts[i] = rte_pktmbuf_alloc(mbuf_pool); if (unlikely(pkts[i] == NULL)) { RTE_LOG(ERR, VHOST_DATA, @@ -1491,8 +1465,6 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, update_shadow_used_ring_packed(vq, buf_id, 0, desc_count); - rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); - pkts[i] = rte_pktmbuf_alloc(mbuf_pool); if (unlikely(pkts[i] == NULL)) { RTE_LOG(ERR, VHOST_DATA, -- 2.21.0