Previously we set the next pointer to NULL on allocation, we now set it to NULL on free, as the next pointer is on a second cache line.
Signed-off-by: Bruce Richardson <bruce.richardson at intel.com> --- lib/librte_mbuf/rte_mbuf.h | 4 +++- lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 18 +++++++++--------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index a3e3e4f..01d4f6e 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -629,8 +629,10 @@ __rte_pktmbuf_prefree_seg(struct rte_mbuf *m) static inline void __attribute__((always_inline)) rte_pktmbuf_free_seg(struct rte_mbuf *m) { - if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) + if (likely(NULL != (m = __rte_pktmbuf_prefree_seg(m)))) { + m->next = NULL; __rte_mbuf_raw_free(m); + } } /** diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c index fa3b357..f9cc352 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -145,6 +145,7 @@ ixgbe_tx_free_bufs(struct igb_tx_queue *txq) /* free buffers one at a time */ if ((txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) != 0) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { + txep->mbuf->next = NULL; rte_mempool_put(txep->mbuf->pool, txep->mbuf); txep->mbuf = NULL; } @@ -250,6 +251,14 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring; uint16_t n = 0; + /* + * Begin scanning the H/W ring for done descriptors when the + * number of available descriptors drops below tx_free_thresh. For + * each done descriptor, free the associated buffer. + */ + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + /* Only use descriptors that are available */ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) @@ -313,15 +322,6 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (txq->tx_tail >= txq->nb_tx_desc) txq->tx_tail = 0; - /* - * Begin scanning the H/W ring for done descriptors when the - * number of available descriptors drops below tx_free_thresh. For - * each done descriptor, free the associated buffer. - */ - if (txq->nb_tx_free < txq->tx_free_thresh) - ixgbe_tx_free_bufs(txq); - - /* update tail pointer */ rte_wmb(); IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); -- 1.9.3