Both legs of the loop share the same logic: the common parts are about
reserving and filling both address and length into the description.

This is moved into reserve_and_fill().

Bugzilla ID: 1440
Suggested-by: Maryam Tahhan <mtah...@redhat.com>
Signed-off-by: Ariel Otilibili <ariel.otilib...@6wind.com>
Acked-by: Maryam Tahhan <mtah...@redhat.com>
---
 drivers/net/af_xdp/rte_eth_af_xdp.c | 73 ++++++++++++++++-------------
 1 file changed, 40 insertions(+), 33 deletions(-)

diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c 
b/drivers/net/af_xdp/rte_eth_af_xdp.c
index 092bcb73aa0a..8d30d307fa0c 100644
--- a/drivers/net/af_xdp/rte_eth_af_xdp.c
+++ b/drivers/net/af_xdp/rte_eth_af_xdp.c
@@ -536,21 +536,49 @@ kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons 
*cq)
                }
 }
 
+static inline struct xdp_desc *
+reserve_and_fill(struct pkt_tx_queue *txq, struct rte_mbuf *mbuf,
+                struct xsk_umem_info *umem, void **pkt_ptr)
+{
+       struct xdp_desc *desc = NULL;
+       uint64_t addr, offset;
+       uint32_t idx_tx;
+
+       if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx))
+               goto out;
+
+       desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
+       desc->len = mbuf->pkt_len;
+
+       addr = (uint64_t)mbuf - (uint64_t)umem->buffer
+               - umem->mb_pool->header_size;
+       offset = rte_pktmbuf_mtod(mbuf, uint64_t) - (uint64_t)mbuf
+               + umem->mb_pool->header_size;
+
+       if (pkt_ptr)
+               *pkt_ptr = xsk_umem__get_data(umem->buffer, addr + offset);
+
+       offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+       desc->addr = addr | offset;
+
+out:
+       return desc;
+}
+
 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
 static uint16_t
 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
        struct pkt_tx_queue *txq = queue;
        struct xsk_umem_info *umem = txq->umem;
-       struct rte_mbuf *mbuf;
+       struct rte_mbuf *mbuf, *local_mbuf = NULL;
        unsigned long tx_bytes = 0;
        int i;
-       uint32_t idx_tx;
        uint16_t count = 0;
        struct xdp_desc *desc;
-       uint64_t addr, offset;
        struct xsk_ring_cons *cq = &txq->pair->cq;
        uint32_t free_thresh = cq->size >> 1;
+       void *pkt;
 
        if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
                pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
@@ -559,51 +587,30 @@ af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, 
uint16_t nb_pkts)
                mbuf = bufs[i];
 
                if (mbuf->pool == umem->mb_pool) {
-                       if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+                 if (!(desc = reserve_and_fill(txq, mbuf, umem, NULL))) {
                                kick_tx(txq, cq);
-                               if (!xsk_ring_prod__reserve(&txq->tx, 1,
-                                                           &idx_tx))
+                               desc = reserve_and_fill(txq, mbuf, umem, NULL);
+                               if (!desc)
                                        goto out;
                        }
-                       desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
-                       desc->len = mbuf->pkt_len;
-                       addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
-                                       umem->mb_pool->header_size;
-                       offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
-                                       (uint64_t)mbuf +
-                                       umem->mb_pool->header_size;
-                       offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
-                       desc->addr = addr | offset;
+
                        tx_bytes += desc->len;
                        count++;
                } else {
-                       struct rte_mbuf *local_mbuf =
-                                       rte_pktmbuf_alloc(umem->mb_pool);
-                       void *pkt;
-
-                       if (local_mbuf == NULL)
+                       if (!(local_mbuf = rte_pktmbuf_alloc(umem->mb_pool)))
                                goto out;
 
-                       if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+                       desc = reserve_and_fill(txq, local_mbuf, umem, &pkt);
+                       if (!desc) {
                                rte_pktmbuf_free(local_mbuf);
                                goto out;
                        }
 
-                       desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
                        desc->len = mbuf->pkt_len;
-
-                       addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
-                                       umem->mb_pool->header_size;
-                       offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
-                                       (uint64_t)local_mbuf +
-                                       umem->mb_pool->header_size;
-                       pkt = xsk_umem__get_data(umem->buffer, addr + offset);
-                       offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
-                       desc->addr = addr | offset;
                        rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
-                                       desc->len);
-                       tx_bytes += desc->len;
+                                  desc->len);
                        rte_pktmbuf_free(mbuf);
+                       tx_bytes += desc->len;
                        count++;
                }
        }
-- 
2.30.2

Reply via email to