Add support to the ice driver for the API rte_eth_tx_done_cleanup
to force free consumed buffers on Tx ring.

Signed-off-by: Chenxu Di <chenxux...@intel.com>
---
 drivers/net/ice/ice_ethdev.c |   3 +
 drivers/net/ice/ice_rxtx.c   | 155 +++++++++++++++++++++++++++++++++++
 drivers/net/ice/ice_rxtx.h   |  11 +++
 3 files changed, 169 insertions(+)

diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index de189daba..3d586fede 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -220,6 +220,7 @@ static const struct eth_dev_ops ice_eth_dev_ops = {
        .filter_ctrl                  = ice_dev_filter_ctrl,
        .udp_tunnel_port_add          = ice_dev_udp_tunnel_port_add,
        .udp_tunnel_port_del          = ice_dev_udp_tunnel_port_del,
+       .tx_done_cleanup              = ice_tx_done_cleanup,
 };
 
 /* store statistics names and its offset in stats structure */
@@ -2137,6 +2138,8 @@ ice_dev_init(struct rte_eth_dev *dev)
        dev->tx_pkt_burst = ice_xmit_pkts;
        dev->tx_pkt_prepare = ice_prep_pkts;
 
+       ice_set_tx_done_cleanup_func(ice_tx_done_cleanup_scalar);
+
        /* for secondary processes, we don't initialise any further as primary
         * has already done this work.
         */
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 2db174456..db531d0fc 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -863,6 +863,9 @@ ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        return 0;
 }
 
+
+
+
 int
 ice_rx_queue_setup(struct rte_eth_dev *dev,
                   uint16_t queue_idx,
@@ -2643,6 +2646,155 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
        return txq->tx_rs_thresh;
 }
 
+static ice_tx_done_cleanup_t ice_tx_done_cleanup_op;
+
+int
+ice_tx_done_cleanup_scalar(struct ice_tx_queue *txq,
+                       uint32_t free_cnt)
+{
+       uint32_t pkt_cnt;
+       uint16_t i;
+       uint16_t tx_last;
+       uint16_t tx_id;
+       uint16_t nb_tx_to_clean;
+       uint16_t nb_tx_free_last;
+       struct ice_tx_entry *swr_ring = txq->sw_ring;
+
+       /* Start free mbuf from the next of tx_tail */
+       tx_last = txq->tx_tail;
+       tx_id  = swr_ring[tx_last].next_id;
+
+       if (txq->nb_tx_free == 0)
+               if (ice_xmit_cleanup(txq))
+                       return 0;
+
+       nb_tx_to_clean = txq->nb_tx_free;
+       nb_tx_free_last = txq->nb_tx_free;
+       if (!free_cnt)
+               free_cnt = txq->nb_tx_desc;
+
+       /* Loop through swr_ring to count the amount of
+        * freeable mubfs and packets.
+        */
+       for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+               for (i = 0; i < nb_tx_to_clean &&
+                       pkt_cnt < free_cnt &&
+                       tx_id != tx_last; i++) {
+                       if (swr_ring[tx_id].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+                               swr_ring[tx_id].mbuf = NULL;
+
+                               /*
+                                * last segment in the packet,
+                                * increment packet count
+                                */
+                               pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+                       }
+
+                       tx_id = swr_ring[tx_id].next_id;
+               }
+
+               if (tx_id == tx_last || txq->tx_rs_thresh
+                       > txq->nb_tx_desc - txq->nb_tx_free)
+                       break;
+
+               if (pkt_cnt < free_cnt) {
+                       if (ice_xmit_cleanup(txq))
+                               break;
+
+                       nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+                       nb_tx_free_last = txq->nb_tx_free;
+               }
+       }
+
+       PMD_TX_FREE_LOG(DEBUG,
+               "Free %u Packets successfully "
+               "(port=%d queue=%d)",
+               pkt_cnt, txq->port_id, txq->queue_id);
+
+       return (int)pkt_cnt;
+}
+
+int
+ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
+                       uint32_t free_cnt __rte_unused)
+{
+       return -ENOTSUP;
+}
+
+int
+ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
+                       uint32_t free_cnt)
+{
+       uint16_t i;
+       uint16_t tx_first;
+       uint16_t tx_id;
+       uint32_t pkt_cnt;
+       struct ice_tx_entry *swr_ring = txq->sw_ring;
+
+       /* Start free mbuf from tx_first */
+       tx_first = txq->tx_next_dd - (txq->tx_rs_thresh - 1);
+       tx_id  = tx_first;
+
+       /* while free_cnt is 0,
+        * suppose one mbuf per packet,
+        * try to free packets as many as possible
+        */
+       if (free_cnt == 0)
+               free_cnt = txq->nb_tx_desc;
+
+       /* Loop through swr_ring to count freeable packets */
+       for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+               if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_rs_thresh)
+                       break;
+
+               if (!ice_tx_free_bufs(txq))
+                       break;
+
+               for (i = 0; i != txq->tx_rs_thresh &&
+                       tx_id != tx_first; i++) {
+                       /* last segment in the packet,
+                        * increment packet count
+                        */
+                       pkt_cnt += (tx_id == swr_ring[tx_id].last_id);
+                       tx_id = swr_ring[tx_id].next_id;
+               }
+
+               if (tx_id == tx_first)
+                       break;
+       }
+
+       PMD_TX_FREE_LOG(DEBUG,
+               "Free %u packets successfully "
+               "(port=%d queue=%d)",
+               pkt_cnt, txq->port_id, txq->queue_id);
+
+       return (int)pkt_cnt;
+}
+
+int
+ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+       ice_tx_done_cleanup_t func = ice_get_tx_done_cleanup_func();
+
+       if (!func)
+               return -ENOTSUP;
+
+       return func(txq, free_cnt);
+}
+
+void
+ice_set_tx_done_cleanup_func(ice_tx_done_cleanup_t fn)
+{
+       ice_tx_done_cleanup_op = fn;
+}
+
+ice_tx_done_cleanup_t
+ice_get_tx_done_cleanup_func(void)
+{
+       return ice_tx_done_cleanup_op;
+}
+
 /* Populate 4 descriptors with data from 4 mbufs */
 static inline void
 tx4(volatile struct ice_tx_desc *txdp, struct rte_mbuf **pkts)
@@ -3003,6 +3155,7 @@ ice_set_tx_function(struct rte_eth_dev *dev)
                                    ice_xmit_pkts_vec_avx2 :
                                    ice_xmit_pkts_vec;
                dev->tx_pkt_prepare = NULL;
+               ice_set_tx_done_cleanup_func(ice_tx_done_cleanup_vec);
 
                return;
        }
@@ -3012,10 +3165,12 @@ ice_set_tx_function(struct rte_eth_dev *dev)
                PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
                dev->tx_pkt_burst = ice_xmit_pkts_simple;
                dev->tx_pkt_prepare = NULL;
+               ice_set_tx_done_cleanup_func(ice_tx_done_cleanup_simple);
        } else {
                PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
                dev->tx_pkt_burst = ice_xmit_pkts;
                dev->tx_pkt_prepare = ice_prep_pkts;
+               ice_set_tx_done_cleanup_func(ice_tx_done_cleanup_scalar);
        }
 }
 
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 9e3d2cd07..151bead62 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -135,6 +135,9 @@ union ice_tx_offload {
        };
 };
 
+typedef int (*ice_tx_done_cleanup_t)(struct ice_tx_queue *txq,
+                               uint32_t free_cnt);
+
 int ice_rx_queue_setup(struct rte_eth_dev *dev,
                       uint16_t queue_idx,
                       uint16_t nb_desc,
@@ -183,6 +186,7 @@ int ice_rx_descriptor_status(void *rx_queue, uint16_t 
offset);
 int ice_tx_descriptor_status(void *tx_queue, uint16_t offset);
 void ice_set_default_ptype_table(struct rte_eth_dev *dev);
 const uint32_t *ice_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
 
 int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
@@ -202,4 +206,11 @@ uint16_t ice_recv_scattered_pkts_vec_avx2(void *rx_queue,
 uint16_t ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                                uint16_t nb_pkts);
 int ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc);
+void ice_set_tx_done_cleanup_func(ice_tx_done_cleanup_t fn);
+ice_tx_done_cleanup_t ice_get_tx_done_cleanup_func(void);
+int ice_tx_done_cleanup(void *txq, uint32_t free_cnt);
+int ice_tx_done_cleanup_scalar(struct ice_tx_queue *txq, uint32_t free_cnt);
+int ice_tx_done_cleanup_vec(struct ice_tx_queue *txq, uint32_t free_cnt);
+int ice_tx_done_cleanup_simple(struct ice_tx_queue *txq, uint32_t free_cnt);
+
 #endif /* _ICE_RXTX_H_ */
-- 
2.17.1

Reply via email to