In preparation for subsequent patches, move mbuf allocation out and
rename virtio_dev_pktmbuf_alloc to virtio_dev_pktmbuf_prep. This
function now receives an already allocated mbuf pointer.

Signed-off-by: Balazs Nemeth <bnem...@redhat.com>
---
 lib/librte_vhost/virtio_net.c | 54 ++++++++++++++++++++++++++---------
 1 file changed, 40 insertions(+), 14 deletions(-)

diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 7f621fb6d..666e7fdb8 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -2166,6 +2166,23 @@ virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct 
rte_mempool *mp,
        return NULL;
 }
 
+static __rte_always_inline int
+virtio_dev_pktmbuf_prep(struct virtio_net *dev, struct rte_mbuf *pkt,
+                        uint32_t data_len)
+{
+       if (rte_pktmbuf_tailroom(pkt) >= data_len)
+               return 0;
+
+       /* attach an external buffer if supported */
+       if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+               return 0;
+
+       /* check if chained buffers are allowed */
+       if (!dev->linearbuf)
+               return 0;
+       return 1;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -2259,7 +2276,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq,
 static __rte_always_inline int
 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
                                 struct vhost_virtqueue *vq,
-                                struct rte_mempool *mbuf_pool,
                                 struct rte_mbuf **pkts,
                                 uint16_t avail_idx,
                                 uintptr_t *desc_addrs,
@@ -2304,8 +2320,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
        }
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-               pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
-               if (!pkts[i])
+               if (virtio_dev_pktmbuf_prep(dev, pkts[i], lens[i]))
                        goto free_buf;
        }
 
@@ -2326,16 +2341,12 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
        return 0;
 
 free_buf:
-       for (i = 0; i < PACKED_BATCH_SIZE; i++)
-               rte_pktmbuf_free(pkts[i]);
-
        return -1;
 }
 
 static __rte_always_inline int
 virtio_dev_tx_batch_packed(struct virtio_net *dev,
                           struct vhost_virtqueue *vq,
-                          struct rte_mempool *mbuf_pool,
                           struct rte_mbuf **pkts)
 {
        uint16_t avail_idx = vq->last_avail_idx;
@@ -2345,8 +2356,8 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
        uint16_t ids[PACKED_BATCH_SIZE];
        uint16_t i;
 
-       if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-                                            avail_idx, desc_addrs, ids))
+       if (vhost_reserve_avail_batch_packed(dev, vq, pkts, avail_idx,
+                                            desc_addrs, ids))
                return -1;
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -2396,8 +2407,8 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
                                         VHOST_ACCESS_RO) < 0))
                return -1;
 
-       *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-       if (unlikely(*pkts == NULL)) {
+
+       if (unlikely(virtio_dev_pktmbuf_prep(dev, *pkts, buf_len))) {
                if (!allocerr_warned) {
                        VHOST_LOG_DATA(ERR,
                                "Failed mbuf alloc of size %d from %s on %s.\n",
@@ -2416,7 +2427,6 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
                                dev->ifname);
                        allocerr_warned = true;
                }
-               rte_pktmbuf_free(*pkts);
                return -1;
        }
 
@@ -2459,22 +2469,38 @@ virtio_dev_tx_packed(struct virtio_net *dev,
 {
        uint32_t pkt_idx = 0;
        uint32_t remained = count;
+       uint16_t i;
 
        do {
                rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
 
                if (remained >= PACKED_BATCH_SIZE) {
-                       if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
+                       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+                               pkts[pkt_idx + i] =
+                                       rte_pktmbuf_alloc(mbuf_pool);
+                       }
+
+                       if (!virtio_dev_tx_batch_packed(dev, vq,
                                                        &pkts[pkt_idx])) {
                                pkt_idx += PACKED_BATCH_SIZE;
                                remained -= PACKED_BATCH_SIZE;
+
                                continue;
+                       } else {
+                               vhost_for_each_try_unroll(i, 0,
+                                       PACKED_BATCH_SIZE) {
+                                       rte_pktmbuf_free(pkts[pkt_idx + i]);
+                               }
                        }
                }
 
+               pkts[pkt_idx] = rte_pktmbuf_alloc(mbuf_pool);
+
                if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
-                                               &pkts[pkt_idx]))
+                                               &pkts[pkt_idx])) {
+                       rte_pktmbuf_free(pkts[pkt_idx]);
                        break;
+               }
                pkt_idx++;
                remained--;
 
-- 
2.30.2

Reply via email to