Array of buf_vector's is just an array for temporary storing information about available descriptors. It used only locally in virtio_dev_merge_rx() and there is no reason for that array to be shared.
Fix that by allocating local buf_vec inside virtio_dev_merge_rx(). Signed-off-by: Ilya Maximets <i.maximets at samsung.com> --- lib/librte_vhost/rte_virtio_net.h | 1 - lib/librte_vhost/vhost_rxtx.c | 45 ++++++++++++++++++++------------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/lib/librte_vhost/rte_virtio_net.h b/lib/librte_vhost/rte_virtio_net.h index 10dcb90..ae1e4fb 100644 --- a/lib/librte_vhost/rte_virtio_net.h +++ b/lib/librte_vhost/rte_virtio_net.h @@ -91,7 +91,6 @@ struct vhost_virtqueue { int kickfd; /**< Currently unused as polling mode is enabled. */ int enabled; uint64_t reserved[16]; /**< Reserve some spaces for future extension. */ - struct buf_vector buf_vec[BUF_VECTOR_MAX]; /**< for scatter RX. */ } __rte_cache_aligned; diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c index 411dd95..9095fb1 100644 --- a/lib/librte_vhost/vhost_rxtx.c +++ b/lib/librte_vhost/vhost_rxtx.c @@ -295,7 +295,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, static inline uint32_t __attribute__((always_inline)) copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, uint16_t res_base_idx, uint16_t res_end_idx, - struct rte_mbuf *pkt) + struct rte_mbuf *pkt, struct buf_vector *buf_vec) { uint32_t vec_idx = 0; uint32_t entry_success = 0; @@ -325,7 +325,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, */ vq = dev->virtqueue[queue_id]; - vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr); + vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); vb_hdr_addr = vb_addr; /* Prefetch buffer address. */ @@ -345,19 +345,19 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, seg_avail = rte_pktmbuf_data_len(pkt); vb_offset = vq->vhost_hlen; - vb_avail = vq->buf_vec[vec_idx].buf_len - vq->vhost_hlen; + vb_avail = buf_vec[vec_idx].buf_len - vq->vhost_hlen; entry_len = vq->vhost_hlen; if (vb_avail == 0) { uint32_t desc_idx = - vq->buf_vec[vec_idx].desc_idx; + buf_vec[vec_idx].desc_idx; if ((vq->desc[desc_idx].flags & VRING_DESC_F_NEXT) == 0) { /* Update used ring with desc information */ vq->used->ring[cur_idx & (vq->size - 1)].id - = vq->buf_vec[vec_idx].desc_idx; + = buf_vec[vec_idx].desc_idx; vq->used->ring[cur_idx & (vq->size - 1)].len = entry_len; @@ -367,12 +367,12 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, } vec_idx++; - vb_addr = gpa_to_vva(dev, vq->buf_vec[vec_idx].buf_addr); + vb_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr); /* Prefetch buffer address. */ rte_prefetch0((void *)(uintptr_t)vb_addr); vb_offset = 0; - vb_avail = vq->buf_vec[vec_idx].buf_len; + vb_avail = buf_vec[vec_idx].buf_len; } cpy_len = RTE_MIN(vb_avail, seg_avail); @@ -399,11 +399,11 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, * entry reach to its end. * But the segment doesn't complete. */ - if ((vq->desc[vq->buf_vec[vec_idx].desc_idx].flags & + if ((vq->desc[buf_vec[vec_idx].desc_idx].flags & VRING_DESC_F_NEXT) == 0) { /* Update used ring with desc information */ vq->used->ring[cur_idx & (vq->size - 1)].id - = vq->buf_vec[vec_idx].desc_idx; + = buf_vec[vec_idx].desc_idx; vq->used->ring[cur_idx & (vq->size - 1)].len = entry_len; entry_len = 0; @@ -413,9 +413,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, vec_idx++; vb_addr = gpa_to_vva(dev, - vq->buf_vec[vec_idx].buf_addr); + buf_vec[vec_idx].buf_addr); vb_offset = 0; - vb_avail = vq->buf_vec[vec_idx].buf_len; + vb_avail = buf_vec[vec_idx].buf_len; cpy_len = RTE_MIN(vb_avail, seg_avail); } else { /* @@ -434,7 +434,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, * from buf_vec. */ uint32_t desc_idx = - vq->buf_vec[vec_idx].desc_idx; + buf_vec[vec_idx].desc_idx; if ((vq->desc[desc_idx].flags & VRING_DESC_F_NEXT) == 0) { @@ -456,9 +456,9 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, /* Get next buffer from buf_vec. */ vec_idx++; vb_addr = gpa_to_vva(dev, - vq->buf_vec[vec_idx].buf_addr); + buf_vec[vec_idx].buf_addr); vb_avail = - vq->buf_vec[vec_idx].buf_len; + buf_vec[vec_idx].buf_len; vb_offset = 0; } @@ -471,7 +471,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, */ /* Update used ring with desc information */ vq->used->ring[cur_idx & (vq->size - 1)].id - = vq->buf_vec[vec_idx].desc_idx; + = buf_vec[vec_idx].desc_idx; vq->used->ring[cur_idx & (vq->size - 1)].len = entry_len; entry_success++; @@ -485,7 +485,7 @@ copy_from_mbuf_to_vring(struct virtio_net *dev, uint32_t queue_id, static inline void __attribute__((always_inline)) update_secure_len(struct vhost_virtqueue *vq, uint32_t id, - uint32_t *secure_len, uint32_t *vec_idx) + uint32_t *secure_len, uint32_t *vec_idx, struct buf_vector *buf_vec) { uint16_t wrapped_idx = id & (vq->size - 1); uint32_t idx = vq->avail->ring[wrapped_idx]; @@ -496,9 +496,9 @@ update_secure_len(struct vhost_virtqueue *vq, uint32_t id, do { next_desc = 0; len += vq->desc[idx].len; - vq->buf_vec[vec_id].buf_addr = vq->desc[idx].addr; - vq->buf_vec[vec_id].buf_len = vq->desc[idx].len; - vq->buf_vec[vec_id].desc_idx = idx; + buf_vec[vec_id].buf_addr = vq->desc[idx].addr; + buf_vec[vec_id].buf_len = vq->desc[idx].len; + buf_vec[vec_id].desc_idx = idx; vec_id++; if (vq->desc[idx].flags & VRING_DESC_F_NEXT) { @@ -523,6 +523,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, uint16_t avail_idx; uint16_t res_base_idx, res_cur_idx; uint8_t success = 0; + struct buf_vector buf_vec[BUF_VECTOR_MAX]; LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_merge_rx()\n", dev->device_fh); @@ -561,8 +562,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, if (unlikely(res_cur_idx == avail_idx)) goto merge_rx_exit; - update_secure_len(vq, res_cur_idx, - &secure_len, &vec_idx); + update_secure_len(vq, res_cur_idx, &secure_len, + &vec_idx, buf_vec); res_cur_idx++; } while (pkt_len > secure_len); @@ -573,7 +574,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, } while (success == 0); entry_success = copy_from_mbuf_to_vring(dev, queue_id, - res_base_idx, res_cur_idx, pkts[pkt_idx]); + res_base_idx, res_cur_idx, pkts[pkt_idx], buf_vec); rte_smp_wmb(); -- 2.5.0