Move allocation out further and perform all allocation in one loop. The same goes for freeing packets. This is to prepare for use of bulk versions of these functions.
Signed-off-by: Balazs Nemeth <bnem...@redhat.com> --- lib/librte_vhost/virtio_net.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 666e7fdb8..496f750e3 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -2471,14 +2471,13 @@ virtio_dev_tx_packed(struct virtio_net *dev, uint32_t remained = count; uint16_t i; + for (i = 0; i < count; ++i) + pkts[i] = rte_pktmbuf_alloc(mbuf_pool); + do { rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); if (remained >= PACKED_BATCH_SIZE) { - vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) { - pkts[pkt_idx + i] = - rte_pktmbuf_alloc(mbuf_pool); - } if (!virtio_dev_tx_batch_packed(dev, vq, &pkts[pkt_idx])) { @@ -2486,19 +2485,11 @@ virtio_dev_tx_packed(struct virtio_net *dev, remained -= PACKED_BATCH_SIZE; continue; - } else { - vhost_for_each_try_unroll(i, 0, - PACKED_BATCH_SIZE) { - rte_pktmbuf_free(pkts[pkt_idx + i]); - } } } - pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool); - if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool, &pkts[pkt_idx])) { - rte_pktmbuf_free(pkts[pkt_idx]); break; } pkt_idx++; @@ -2506,6 +2497,9 @@ virtio_dev_tx_packed(struct virtio_net *dev, } while (remained); + for (i = pkt_idx; i < count; ++i) + rte_pktmbuf_free(pkts[i]); + if (vq->shadow_used_idx) { do_data_copy_dequeue(vq); -- 2.30.2