Adjust iavf driver to also use the common mbuf freeing functions on Tx
queue release/cleanup. The implementation is complicated a little by the
need to integrate the additional "has_ctx" parameter for the iavf code,
but changes in other drivers are minimal - just a constant "false"
parameter.

Signed-off-by: Bruce Richardson <bruce.richard...@intel.com>
---
 drivers/net/intel/common/tx.h                 | 27 +++++++-------
 drivers/net/intel/i40e/i40e_rxtx.c            |  6 +--
 drivers/net/intel/iavf/iavf_rxtx.c            | 37 ++-----------------
 drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c | 24 +-----------
 drivers/net/intel/iavf/iavf_rxtx_vec_common.h | 18 ---------
 drivers/net/intel/iavf/iavf_rxtx_vec_sse.c    |  9 +----
 drivers/net/intel/ice/ice_dcf_ethdev.c        |  4 +-
 drivers/net/intel/ice/ice_rxtx.c              |  6 +--
 drivers/net/intel/ixgbe/ixgbe_rxtx.c          |  6 +--
 9 files changed, 31 insertions(+), 106 deletions(-)

diff --git a/drivers/net/intel/common/tx.h b/drivers/net/intel/common/tx.h
index 1bf2a61b2f..310b51adcf 100644
--- a/drivers/net/intel/common/tx.h
+++ b/drivers/net/intel/common/tx.h
@@ -271,23 +271,23 @@ ci_tx_free_bufs_vec(struct ci_tx_queue *txq, 
ci_desc_done_fn desc_done, bool ctx
        return txq->tx_rs_thresh;
 }
 
-#define IETH_FREE_BUFS_LOOP(txq, swr, start) do { \
+#define IETH_FREE_BUFS_LOOP(swr, nb_desc, start, end) do { \
                uint16_t i = start; \
-               if (txq->tx_tail < i) { \
-                       for (; i < txq->nb_tx_desc; i++) { \
+               if (end < i) { \
+                       for (; i < nb_desc; i++) { \
                                rte_pktmbuf_free_seg(swr[i].mbuf); \
                                swr[i].mbuf = NULL; \
                        } \
                        i = 0; \
                } \
-               for (; i < txq->tx_tail; i++) { \
+               for (; i < end; i++) { \
                        rte_pktmbuf_free_seg(swr[i].mbuf); \
                        swr[i].mbuf = NULL; \
                } \
 } while (0)
 
 static inline void
-ci_txq_release_all_mbufs(struct ci_tx_queue *txq)
+ci_txq_release_all_mbufs(struct ci_tx_queue *txq, bool use_ctx)
 {
        if (unlikely(!txq || !txq->sw_ring))
                return;
@@ -306,15 +306,14 @@ ci_txq_release_all_mbufs(struct ci_tx_queue *txq)
         *  vPMD tx will not set sw_ring's mbuf to NULL after free,
         *  so need to free remains more carefully.
         */
-       const uint16_t start = txq->tx_next_dd - txq->tx_rs_thresh + 1;
-
-       if (txq->vector_sw_ring) {
-               struct ci_tx_entry_vec *swr = txq->sw_ring_vec;
-               IETH_FREE_BUFS_LOOP(txq, swr, start);
-       } else {
-               struct ci_tx_entry *swr = txq->sw_ring;
-               IETH_FREE_BUFS_LOOP(txq, swr, start);
-       }
+       const uint16_t start = (txq->tx_next_dd - txq->tx_rs_thresh + 1) >> 
use_ctx;
+       const uint16_t nb_desc = txq->nb_tx_desc >> use_ctx;
+       const uint16_t end = txq->tx_tail >> use_ctx;
+
+       if (txq->vector_sw_ring)
+               IETH_FREE_BUFS_LOOP(txq->sw_ring_vec, nb_desc, start, end);
+       else
+               IETH_FREE_BUFS_LOOP(txq->sw_ring, nb_desc, start, end);
 }
 
 #endif /* _COMMON_INTEL_TX_H_ */
diff --git a/drivers/net/intel/i40e/i40e_rxtx.c 
b/drivers/net/intel/i40e/i40e_rxtx.c
index b70919c5dc..081d743e62 100644
--- a/drivers/net/intel/i40e/i40e_rxtx.c
+++ b/drivers/net/intel/i40e/i40e_rxtx.c
@@ -1933,7 +1933,7 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
                return err;
        }
 
-       ci_txq_release_all_mbufs(txq);
+       ci_txq_release_all_mbufs(txq, false);
        i40e_reset_tx_queue(txq);
        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -2608,7 +2608,7 @@ i40e_tx_queue_release(void *txq)
                return;
        }
 
-       ci_txq_release_all_mbufs(q);
+       ci_txq_release_all_mbufs(q, false);
        rte_free(q->sw_ring);
        rte_memzone_free(q->mz);
        rte_free(q);
@@ -3071,7 +3071,7 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                if (!dev->data->tx_queues[i])
                        continue;
-               ci_txq_release_all_mbufs(dev->data->tx_queues[i]);
+               ci_txq_release_all_mbufs(dev->data->tx_queues[i], false);
                i40e_reset_tx_queue(dev->data->tx_queues[i]);
        }
 
diff --git a/drivers/net/intel/iavf/iavf_rxtx.c 
b/drivers/net/intel/iavf/iavf_rxtx.c
index 18bb17df4b..22639da965 100644
--- a/drivers/net/intel/iavf/iavf_rxtx.c
+++ b/drivers/net/intel/iavf/iavf_rxtx.c
@@ -387,24 +387,6 @@ release_rxq_mbufs(struct iavf_rx_queue *rxq)
        rxq->rx_nb_avail = 0;
 }
 
-static inline void
-release_txq_mbufs(struct ci_tx_queue *txq)
-{
-       uint16_t i;
-
-       if (!txq || !txq->sw_ring) {
-               PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
-               return;
-       }
-
-       for (i = 0; i < txq->nb_tx_desc; i++) {
-               if (txq->sw_ring[i].mbuf) {
-                       rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-                       txq->sw_ring[i].mbuf = NULL;
-               }
-       }
-}
-
 static const
 struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
        [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
@@ -413,18 +395,6 @@ struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
 #endif
 };
 
-static const
-struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
-       [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
-#ifdef RTE_ARCH_X86
-       [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = 
iavf_tx_queue_release_mbufs_sse,
-#ifdef CC_AVX512_SUPPORT
-       [IAVF_REL_MBUFS_AVX512_VEC].release_mbufs = 
iavf_tx_queue_release_mbufs_avx512,
-#endif
-#endif
-
-};
-
 static inline void
 iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
                                    struct rte_mbuf *mb,
@@ -889,7 +859,6 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->q_set = true;
        dev->data->tx_queues[queue_idx] = txq;
        txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
-       txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
 
        if (check_tx_vec_allow(txq) == false) {
                struct iavf_adapter *ad =
@@ -1068,7 +1037,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        }
 
        txq = dev->data->tx_queues[tx_queue_id];
-       iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
+       ci_txq_release_all_mbufs(txq, txq->use_ctx);
        reset_tx_queue(txq);
        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -1097,7 +1066,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, 
uint16_t qid)
        if (!q)
                return;
 
-       iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
+       ci_txq_release_all_mbufs(q, q->use_ctx);
        rte_free(q->sw_ring);
        rte_memzone_free(q->mz);
        rte_free(q);
@@ -1114,7 +1083,7 @@ iavf_reset_queues(struct rte_eth_dev *dev)
                txq = dev->data->tx_queues[i];
                if (!txq)
                        continue;
-               
iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
+               ci_txq_release_all_mbufs(txq, txq->use_ctx);
                reset_tx_queue(txq);
                dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c 
b/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c
index 8543490c70..007759e451 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_avx512.c
@@ -2357,31 +2357,11 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct 
rte_mbuf **tx_pkts,
        return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
 }
 
-void __rte_cold
-iavf_tx_queue_release_mbufs_avx512(struct ci_tx_queue *txq)
-{
-       unsigned int i;
-       const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-       const uint16_t end_desc = txq->tx_tail >> txq->use_ctx; /* next empty 
slot */
-       const uint16_t wrap_point = txq->nb_tx_desc >> txq->use_ctx;  /* end of 
SW ring */
-       struct ci_tx_entry_vec *swr = (void *)txq->sw_ring;
-
-       if (!txq->sw_ring || txq->nb_tx_free == max_desc)
-               return;
-
-       i = (txq->tx_next_dd - txq->tx_rs_thresh + 1) >> txq->use_ctx;
-       while (i != end_desc) {
-               rte_pktmbuf_free_seg(swr[i].mbuf);
-               swr[i].mbuf = NULL;
-               if (++i == wrap_point)
-                       i = 0;
-       }
-}
-
 int __rte_cold
 iavf_txq_vec_setup_avx512(struct ci_tx_queue *txq)
 {
-       txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
+       txq->vector_tx = true;
+       txq->vector_sw_ring = true;
        return 0;
 }
 
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_common.h 
b/drivers/net/intel/iavf/iavf_rxtx_vec_common.h
index 8ea3d4d010..8f272044b9 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_common.h
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_common.h
@@ -60,24 +60,6 @@ _iavf_rx_queue_release_mbufs_vec(struct iavf_rx_queue *rxq)
        memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
 }
 
-static inline void
-_iavf_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
-{
-       unsigned i;
-       const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1);
-
-       if (!txq->sw_ring || txq->nb_tx_free == max_desc)
-               return;
-
-       i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
-       while (i != txq->tx_tail) {
-               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-               txq->sw_ring[i].mbuf = NULL;
-               if (++i == txq->nb_tx_desc)
-                       i = 0;
-       }
-}
-
 static inline int
 iavf_rxq_vec_setup_default(struct iavf_rx_queue *rxq)
 {
diff --git a/drivers/net/intel/iavf/iavf_rxtx_vec_sse.c 
b/drivers/net/intel/iavf/iavf_rxtx_vec_sse.c
index 5c0b2fff46..3adf2a59e4 100644
--- a/drivers/net/intel/iavf/iavf_rxtx_vec_sse.c
+++ b/drivers/net/intel/iavf/iavf_rxtx_vec_sse.c
@@ -1458,16 +1458,11 @@ iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue 
*rxq)
        _iavf_rx_queue_release_mbufs_vec(rxq);
 }
 
-void __rte_cold
-iavf_tx_queue_release_mbufs_sse(struct ci_tx_queue *txq)
-{
-       _iavf_tx_queue_release_mbufs_vec(txq);
-}
-
 int __rte_cold
 iavf_txq_vec_setup(struct ci_tx_queue *txq)
 {
-       txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
+       txq->vector_tx = true;
+       txq->vector_sw_ring = false;
        return 0;
 }
 
diff --git a/drivers/net/intel/ice/ice_dcf_ethdev.c 
b/drivers/net/intel/ice/ice_dcf_ethdev.c
index 9be0a96d35..efff76afa8 100644
--- a/drivers/net/intel/ice/ice_dcf_ethdev.c
+++ b/drivers/net/intel/ice/ice_dcf_ethdev.c
@@ -501,7 +501,7 @@ ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        }
 
        txq = dev->data->tx_queues[tx_queue_id];
-       ci_txq_release_all_mbufs(txq);
+       ci_txq_release_all_mbufs(txq, false);
        reset_tx_queue(txq);
        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -651,7 +651,7 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev)
                txq = dev->data->tx_queues[i];
                if (!txq)
                        continue;
-               ci_txq_release_all_mbufs(txq);
+               ci_txq_release_all_mbufs(txq, false);
                reset_tx_queue(txq);
                dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
        }
diff --git a/drivers/net/intel/ice/ice_rxtx.c b/drivers/net/intel/ice/ice_rxtx.c
index 62b709c544..03a7092e79 100644
--- a/drivers/net/intel/ice/ice_rxtx.c
+++ b/drivers/net/intel/ice/ice_rxtx.c
@@ -1090,7 +1090,7 @@ ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
                return -EINVAL;
        }
 
-       ci_txq_release_all_mbufs(txq);
+       ci_txq_release_all_mbufs(txq, false);
        ice_reset_tx_queue(txq);
        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -1153,7 +1153,7 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
                return -EINVAL;
        }
 
-       ci_txq_release_all_mbufs(txq);
+       ci_txq_release_all_mbufs(txq, false);
        txq->qtx_tail = NULL;
 
        return 0;
@@ -1532,7 +1532,7 @@ ice_tx_queue_release(void *txq)
                return;
        }
 
-       ci_txq_release_all_mbufs(q);
+       ci_txq_release_all_mbufs(q, false);
        rte_free(q->sw_ring);
        rte_memzone_free(q->mz);
        rte_free(q);
diff --git a/drivers/net/intel/ixgbe/ixgbe_rxtx.c 
b/drivers/net/intel/ixgbe/ixgbe_rxtx.c
index bf9d461b06..3b7a6a6f0e 100644
--- a/drivers/net/intel/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/intel/ixgbe/ixgbe_rxtx.c
@@ -2457,7 +2457,7 @@ static void __rte_cold
 ixgbe_tx_queue_release(struct ci_tx_queue *txq)
 {
        if (txq != NULL && txq->ops != NULL) {
-               ci_txq_release_all_mbufs(txq);
+               ci_txq_release_all_mbufs(txq, false);
                txq->ops->free_swring(txq);
                rte_memzone_free(txq->mz);
                rte_free(txq);
@@ -3364,7 +3364,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
                struct ci_tx_queue *txq = dev->data->tx_queues[i];
 
                if (txq != NULL) {
-                       ci_txq_release_all_mbufs(txq);
+                       ci_txq_release_all_mbufs(txq, false);
                        txq->ops->reset(txq);
                        dev->data->tx_queue_state[i] = 
RTE_ETH_QUEUE_STATE_STOPPED;
                }
@@ -5639,7 +5639,7 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        }
 
        if (txq->ops != NULL) {
-               ci_txq_release_all_mbufs(txq);
+               ci_txq_release_all_mbufs(txq, false);
                txq->ops->reset(txq);
        }
        dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
-- 
2.43.0

Reply via email to