To add a wrapper function to remove the limit of tx burst size.
The patch makes fm10k vec function an best effort to transmit
pkts in the consistent behavior like fm10k_xmit_pkts does that.

Cc: Jing Chen <jing.d.c...@intel.com>
Signed-off-by: Zhiyong Yang <zhiyong.y...@intel.com>
---
 drivers/net/fm10k/fm10k.h          |  4 ++--
 drivers/net/fm10k/fm10k_ethdev.c   | 28 +++++++++++++++++++++++++---
 drivers/net/fm10k/fm10k_rxtx_vec.c |  4 ++--
 3 files changed, 29 insertions(+), 7 deletions(-)

diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index c6fed21..8e1a950 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -368,8 +368,8 @@ void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue 
*rxq);
 uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
 uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **,
                                        uint16_t);
-uint16_t fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
+uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                   uint16_t nb_pkts);
 void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
 int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq);
 
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index c4fe746..dd4ea80 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -197,9 +197,9 @@ fm10k_tx_vec_condition_check(__rte_unused struct 
fm10k_tx_queue *txq)
 }
 
 uint16_t __attribute__((weak))
-fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
-               __rte_unused struct rte_mbuf **tx_pkts,
-               __rte_unused uint16_t nb_pkts)
+fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+                          __rte_unused struct rte_mbuf **tx_pkts,
+                          __rte_unused uint16_t nb_pkts)
 {
        return 0;
 }
@@ -2741,6 +2741,28 @@ fm10k_check_ftag(struct rte_devargs *devargs)
        return 1;
 }
 
+static uint16_t
+fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                   uint16_t nb_pkts)
+{
+       uint16_t nb_tx = 0;
+       struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+
+       while (nb_pkts) {
+               uint16_t ret, num;
+
+               num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+               ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+                                                num);
+               nb_tx += ret;
+               nb_pkts -= ret;
+               if (ret < num)
+                       break;
+       }
+
+       return nb_tx;
+}
+
 static void __attribute__((cold))
 fm10k_set_tx_function(struct rte_eth_dev *dev)
 {
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c 
b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 27f3e43..ab87206 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -800,8 +800,8 @@ tx_backlog_entry(struct rte_mbuf **txep,
 }
 
 uint16_t
-fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                       uint16_t nb_pkts)
+fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                          uint16_t nb_pkts)
 {
        struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
        volatile struct fm10k_tx_desc *txdp;
-- 
2.7.4

Reply via email to