From: Pavel Belous <pavel.bel...@aquantia.com>

Signed-off-by: Igor Russkikh <igor.russk...@aquantia.com>
---
 drivers/net/atlantic/atl_ethdev.c |  10 +
 drivers/net/atlantic/atl_rxtx.c   | 566 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 576 insertions(+)

diff --git a/drivers/net/atlantic/atl_ethdev.c 
b/drivers/net/atlantic/atl_ethdev.c
index bb4d96bb1..7abae2236 100644
--- a/drivers/net/atlantic/atl_ethdev.c
+++ b/drivers/net/atlantic/atl_ethdev.c
@@ -297,6 +297,11 @@ static const struct eth_dev_ops atl_eth_dev_ops = {
        .rx_queue_setup       = atl_rx_queue_setup,
        .rx_queue_release     = atl_rx_queue_release,
 
+       .tx_queue_start       = atl_tx_queue_start,
+       .tx_queue_stop        = atl_tx_queue_stop,
+       .tx_queue_setup       = atl_tx_queue_setup,
+       .tx_queue_release     = atl_tx_queue_release,
+
        .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
        .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
 
@@ -324,6 +329,8 @@ static const struct eth_dev_ops atl_eth_dev_ops = {
        .mac_addr_set         = atl_set_default_mac_addr,
        .set_mc_addr_list     = atl_dev_set_mc_addr_list,
        .rxq_info_get         = atl_rxq_info_get,
+       .txq_info_get         = atl_txq_info_get,
+
        .reta_update          = atl_reta_update,
        .reta_query           = atl_reta_query,
        .rss_hash_update      = atl_rss_hash_update,
@@ -639,6 +646,9 @@ atl_dev_start(struct rte_eth_dev *dev)
                }
        }
 
+       /* initialize transmission unit */
+       atl_tx_init(dev);
+
        /* This can fail when allocating mbufs for descriptor rings */
        err = atl_rx_init(dev);
        if (err) {
diff --git a/drivers/net/atlantic/atl_rxtx.c b/drivers/net/atlantic/atl_rxtx.c
index 6198f5dfe..bed2265b9 100644
--- a/drivers/net/atlantic/atl_rxtx.c
+++ b/drivers/net/atlantic/atl_rxtx.c
@@ -46,6 +46,20 @@
 #include "hw_atl/hw_atl_b0.h"
 #include "hw_atl/hw_atl_b0_internal.h"
 
+#define ATL_TX_CKSUM_OFFLOAD_MASK (                             \
+       PKT_TX_IP_CKSUM |                                \
+       PKT_TX_L4_MASK |                                 \
+       PKT_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_MASK (  \
+       PKT_TX_VLAN_PKT |                                \
+       PKT_TX_IP_CKSUM |                                \
+       PKT_TX_L4_MASK |                                 \
+       PKT_TX_TCP_SEG)
+
+#define ATL_TX_OFFLOAD_NOTSUP_MASK \
+       (PKT_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK)
+
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
  */
@@ -54,6 +68,15 @@ struct atl_rx_entry {
 };
 
 /**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct atl_tx_entry {
+       struct rte_mbuf *mbuf;
+       uint16_t next_id;
+       uint16_t last_id;
+};
+
+/**
  * Structure associated with each RX queue.
  */
 struct atl_rx_queue {
@@ -72,6 +95,22 @@ struct atl_rx_queue {
        bool                    l4_csum_enabled;
 };
 
+/**
+ * Structure associated with each TX queue.
+ */
+struct atl_tx_queue {
+       struct hw_atl_txd_s     *hw_ring;
+       uint64_t                hw_ring_phys_addr;
+       struct atl_tx_entry     *sw_ring;
+       uint16_t                nb_tx_desc;
+       uint16_t                tx_tail;
+       uint16_t                tx_head;
+       uint16_t                queue_id;
+       uint16_t                port_id;
+       uint16_t                tx_free_thresh;
+       uint16_t                tx_free;
+};
+
 inline static void
 atl_reset_rx_queue(struct atl_rx_queue *rxq)
 {
@@ -169,6 +208,134 @@ atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
rx_queue_id,
        return 0;
 }
 
+static inline void
+atl_reset_tx_queue(struct atl_tx_queue *txq)
+{
+       struct atl_tx_entry *tx_entry;
+       union hw_atl_txc_s *txc;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (!txq) {
+               PMD_DRV_LOG(ERR, "Pointer to txq is NULL");
+               return;
+       }
+
+       tx_entry = txq->sw_ring;
+
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               txc = (union hw_atl_txc_s *)&txq->hw_ring[i];
+               txc->flags1 = 0;
+               txc->flags2 = 2;
+       }
+
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               txq->hw_ring[i].dd = 1;
+               tx_entry[i].mbuf = NULL;
+       }
+
+       txq->tx_tail = 0;
+       txq->tx_head = 0;
+       txq->tx_free = txq->nb_tx_desc - 1;
+}
+
+int
+atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+                  uint16_t nb_tx_desc, unsigned int socket_id,
+                  const struct rte_eth_txconf *tx_conf)
+{
+       struct atl_tx_queue *txq;
+       const struct rte_memzone *mz;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* make sure a valid number of descriptors have been requested */
+       if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE || nb_tx_desc > 
AQ_HW_MAX_TX_RING_SIZE) {
+               PMD_INIT_LOG(ERR, "Number of Tx descriptors must be "
+                       "less than or equal to %d, "
+                       "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE, 
AQ_HW_MIN_TX_RING_SIZE);
+               return -EINVAL;
+       }
+
+       /*
+        * if this queue existed already, free the associated memory. The
+        * queue cannot be reused in case we need to allocate memory on
+        * different socket than was previously used.
+        */
+       if (dev->data->tx_queues[tx_queue_id] != NULL) {
+               atl_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+               dev->data->tx_queues[tx_queue_id] = NULL;
+       }
+
+       /* allocate memory for the queue structure */
+       txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq), 
RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq == NULL) {
+               PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
+               return -ENOMEM;
+       }
+
+       /* setup queue */
+       txq->nb_tx_desc = nb_tx_desc;
+       txq->port_id = dev->data->port_id;
+       txq->queue_id = tx_queue_id;
+       txq->tx_free_thresh = tx_conf->tx_free_thresh;
+
+
+       /* allocate memory for the software ring */
+       txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring",
+                                               nb_tx_desc * sizeof(struct 
atl_tx_entry),
+                                               RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq->sw_ring == NULL) {
+               PMD_INIT_LOG(ERR, "Cannot allocate software ring");
+               rte_free(txq);
+               return -ENOMEM;
+       }
+
+       /*
+        * allocate memory for the hardware descriptor ring. A memzone large
+        * enough to hold the maximum ring size is requested to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id,
+                                     HW_ATL_B0_MAX_TXD * sizeof(struct 
hw_atl_txd_s),
+                                     128, socket_id);
+       if (mz == NULL) {
+               PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
+               rte_free(txq->sw_ring);
+               rte_free(txq);
+               return -ENOMEM;
+       }
+       txq->hw_ring = mz->addr;
+       txq->hw_ring_phys_addr = mz->iova;
+
+       atl_reset_tx_queue(txq);
+
+       dev->data->tx_queues[tx_queue_id] = txq;
+       return 0;
+}
+
+int
+atl_tx_init(struct rte_eth_dev *eth_dev)
+{
+       struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct atl_tx_queue *txq;
+       uint64_t base_addr = 0;
+       int i = 0;
+       int err = 0;
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i =0 ; i < eth_dev->data->nb_tx_queues; i++) {
+               txq = eth_dev->data->tx_queues[i];
+               base_addr = txq->hw_ring_phys_addr;
+
+               err = hw_atl_b0_hw_ring_tx_init(hw, base_addr, txq->queue_id, 
txq->nb_tx_desc, 0, txq->port_id);
+       }
+
+       return err;
+}
+
 int
 atl_rx_init(struct rte_eth_dev *eth_dev)
 {
@@ -324,6 +491,77 @@ atl_rx_queue_release(void *rx_queue)
        }
 }
 
+static void
+atl_tx_queue_release_mbufs(struct atl_tx_queue *txq)
+{
+       int i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (txq->sw_ring != NULL) {
+               for (i = 0; i < txq->nb_tx_desc; i++) {
+                       if (txq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+int
+atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (tx_queue_id < dev->data->nb_tx_queues) {
+               hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id);
+
+               rte_wmb();
+               hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id);
+               dev->data->tx_queue_state[tx_queue_id] = 
RTE_ETH_QUEUE_STATE_STARTED;
+       } else {
+               return -1;
+       }
+
+       return 0;
+}
+
+int
+atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct atl_tx_queue *txq;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = dev->data->tx_queues[tx_queue_id];
+
+       hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id);
+
+       atl_tx_queue_release_mbufs(txq);
+       atl_reset_tx_queue(txq);
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+void
+atl_tx_queue_release(void *tx_queue)
+{
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (tx_queue != NULL) {
+               struct atl_tx_queue *txq = (struct atl_tx_queue *)tx_queue;
+
+               atl_tx_queue_release_mbufs(txq);
+               rte_free(txq->sw_ring);
+               rte_free(txq);
+       }
+}
+
 void
 atl_free_queues(struct rte_eth_dev *dev)
 {
@@ -337,6 +575,11 @@ atl_free_queues(struct rte_eth_dev *dev)
        }
        dev->data->nb_rx_queues = 0;
 
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               atl_tx_queue_release(dev->data->tx_queues[i]);
+               dev->data->tx_queues[i] = 0;
+       }
+       dev->data->nb_tx_queues = 0;
 }
 
 int
@@ -346,6 +589,13 @@ atl_start_queues(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               if (atl_tx_queue_start(dev, i) != 0) {
+                       PMD_DRV_LOG(ERR, "Start Tx queue %d failed", i);
+                       return -1;
+               }
+       }
+
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (atl_rx_queue_start(dev, i) != 0) {
                        PMD_DRV_LOG(ERR, "Start Rx queue %d failed", i);
@@ -363,6 +613,13 @@ atl_stop_queues(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               if (atl_tx_queue_stop(dev, i) != 0) {
+                       PMD_DRV_LOG(ERR, "Stop Tx queue %d failed", i);
+                       return -1;
+               }
+       }
+
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (atl_rx_queue_stop(dev, i) != 0) {
                        PMD_DRV_LOG(ERR, "Stop Rx queue %d failed", i);
@@ -387,6 +644,18 @@ atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t 
queue_id, struct rte_eth_rxq_
        qinfo->nb_desc = rxq->nb_rx_desc;
 }
 
+void
+atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct 
rte_eth_txq_info *qinfo)
+{
+       struct atl_tx_queue *txq;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = dev->data->tx_queues[queue_id];
+
+       qinfo->nb_desc = txq->nb_tx_desc;
+}
+
 /* Return Rx queue avail count */
 
 uint32_t
@@ -459,6 +728,31 @@ atl_dev_rx_descriptor_status(void *rx_queue, uint16_t 
offset)
        return RTE_ETH_RX_DESC_AVAIL;
 }
 
+int
+atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+       struct atl_tx_queue *txq = tx_queue;
+       struct hw_atl_txd_s *txd;
+       uint32_t idx;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (unlikely(offset >= txq->nb_tx_desc))
+               return -EINVAL;
+
+       idx = txq->tx_tail + offset;
+
+       if (idx >= txq->nb_tx_desc)
+               idx -= txq->nb_tx_desc;
+
+       txd = &txq->hw_ring[idx];
+
+       if (txd->dd)
+               return RTE_ETH_TX_DESC_DONE;
+
+       return RTE_ETH_TX_DESC_FULL;
+}
+
 static int
 atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable)
 {
@@ -495,6 +789,46 @@ atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, 
uint16_t queue_id)
        return atl_rx_enable_intr(eth_dev, queue_id, false);
 }
 
+uint16_t
+atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
+{
+       int i, ret;
+       uint64_t ol_flags;
+       struct rte_mbuf *m;
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i = 0; i < nb_pkts; i++) {
+               m = tx_pkts[i];
+               ol_flags = m->ol_flags;
+
+               if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) {
+                       rte_errno = -EINVAL;
+                       return i;
+               }
+
+               if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) {
+                       rte_errno = -ENOTSUP;
+                       return i;
+               }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               ret = rte_validate_tx_offload(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+#endif
+               ret = rte_net_intel_cksum_prepare(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+       }
+
+       return i;
+}
+
 static uint64_t
 atl_desc_to_offload_flags(struct atl_rx_queue *rxq, struct hw_atl_rxd_wb_s 
*rxd_wb)
 {
@@ -756,3 +1090,235 @@ atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
 
        return nb_rx;
 }
+
+static void 
+atl_xmit_cleanup(struct atl_tx_queue *txq)
+{
+       struct atl_tx_entry *sw_ring;
+       struct hw_atl_txd_s *txd;
+       int to_clean = 0;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (txq != NULL) {
+               sw_ring = txq->sw_ring;
+               int head = txq->tx_head;
+               int cnt;
+               int i;
+
+               for (i = 0, cnt = head; ; i++) {
+                       txd = &txq->hw_ring[cnt];
+
+                       if (txd->dd)
+                               to_clean++;
+
+                       cnt = (cnt + 1) % txq->nb_tx_desc;
+                       if (cnt == txq->tx_tail)
+                               break;
+               }
+
+               if (to_clean == 0)
+                       return;
+
+               while (to_clean) {
+                       txd = &txq->hw_ring[head];
+
+                       struct atl_tx_entry *rx_entry = &sw_ring[head];
+
+                       if (rx_entry->mbuf) {
+                               rte_pktmbuf_free_seg(rx_entry->mbuf);
+                               rx_entry->mbuf = NULL;
+                       }
+
+                       if (txd->dd) {
+                               to_clean--;
+                       }
+
+                       txd->buf_addr = 0;
+                       txd->flags = 0;
+
+                       head = (head + 1) % txq->nb_tx_desc;
+                       txq->tx_free++;
+               }
+
+               txq->tx_head = head;
+       }
+}
+
+static int
+atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc)
+{
+       uint32_t tx_cmd = 0;
+       uint64_t ol_flags = tx_pkt->ol_flags;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (ol_flags & PKT_TX_TCP_SEG) {
+               PMD_DRV_LOG(DEBUG, "xmit TSO pkt");
+       
+               tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs;
+
+               txc->cmd = 0x4;
+
+               if (ol_flags & PKT_TX_IPV6)
+                       txc->cmd |= 0x2;
+
+               txc->l2_len = tx_pkt->l2_len;
+               txc->l3_len = tx_pkt->l3_len;
+               txc->l4_len = tx_pkt->l4_len;
+
+               txc->mss_len = tx_pkt->tso_segsz;
+       }
+
+       if (ol_flags & PKT_TX_VLAN_PKT) {
+               tx_cmd |= tx_desc_cmd_vlan;
+               txc->vlan_tag = tx_pkt->vlan_tci;
+       }
+
+       if (tx_cmd) {
+               txc->type = tx_desc_type_ctx;
+               txc->idx = 0;
+       }
+
+       return tx_cmd;
+}
+
+static inline void
+atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd, 
uint32_t tx_cmd)
+{
+       txd->cmd |= tx_desc_cmd_fcs;
+       txd->cmd |= (mbuf->ol_flags & PKT_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0;
+       /* L4 csum requested */
+       txd->cmd |= (mbuf->ol_flags & PKT_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0;
+       txd->cmd |= tx_cmd;
+}
+
+static inline void
+atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq, struct rte_mbuf 
*tx_pkt)
+{
+       struct atl_adapter *adapter = 
ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]);
+       uint32_t pay_len = 0;
+       int tail = 0;
+       struct atl_tx_entry *tx_entry;
+       uint64_t buf_dma_addr;
+       struct rte_mbuf *m_seg;
+       union hw_atl_txc_s *txc = NULL;
+       struct hw_atl_txd_s *txd = NULL;
+       u32 tx_cmd = 0U;
+       int desc_count = 0;
+
+       PMD_INIT_FUNC_TRACE();
+
+       tail = txq->tx_tail;
+
+       txc = (union hw_atl_txc_s *)&txq->hw_ring[tail];
+
+       txc->flags1 = 0U;
+       txc->flags2 = 0U;
+
+       tx_cmd = atl_tso_setup(tx_pkt, txc);
+
+       if (tx_cmd) {
+               /* We've consumed the first desc, adjust counters */
+               tail = (tail + 1) % txq->nb_tx_desc;
+               txq->tx_tail = tail;
+               txq->tx_free -= 1;
+
+               txd = &txq->hw_ring[tail];
+               txd->flags = 0U;
+       } else {
+               txd = (struct hw_atl_txd_s *)txc;
+       }
+
+       txd->ct_en = !!tx_cmd;
+
+       txd->type = tx_desc_type_desc;
+
+       atl_setup_csum_offload(tx_pkt, txd, tx_cmd);
+
+       if (tx_cmd) {
+               txd->ct_idx = 0;
+       }
+
+       pay_len = tx_pkt->pkt_len;
+
+       txd->pay_len = pay_len;
+       
+       for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) {
+               if (desc_count > 0) {
+                       txd = &txq->hw_ring[tail];
+                       txd->flags = 0U;
+               }
+
+               buf_dma_addr = rte_mbuf_data_iova(m_seg);
+               txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
+
+               txd->type = tx_desc_type_desc;
+               txd->len = m_seg->data_len;
+               txd->pay_len = pay_len;
+
+               /* Store mbuf for freeing later */
+               tx_entry = &txq->sw_ring[tail];
+
+               if (tx_entry->mbuf)
+                       rte_pktmbuf_free_seg(tx_entry->mbuf);
+               tx_entry->mbuf = m_seg;
+
+               tail = (tail + 1) % txq->nb_tx_desc;
+
+               desc_count++;
+       }
+
+       // Last descriptor requires EOP and WB
+       txd->eop = 1U;
+       txd->cmd |= tx_desc_cmd_wb;
+
+       hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id);
+
+       txq->tx_tail = tail;
+
+       txq->tx_free -= desc_count;
+
+       adapter->sw_stats.q_opackets[txq->queue_id]++;
+       adapter->sw_stats.q_obytes[txq->queue_id] += pay_len;
+}
+
+uint16_t
+atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct rte_eth_dev *dev = NULL;
+       struct aq_hw_s *hw = NULL;
+       struct atl_tx_queue *txq = tx_queue;
+       struct rte_mbuf *tx_pkt;
+       uint16_t nb_tx;
+
+       dev = &rte_eth_devices[txq->port_id];
+       hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_TX_LOG(DEBUG, "txq%d pkts: %d tx_free=%d tx_tail=%d tx_head=%d",
+                       txq->queue_id, nb_pkts, txq->tx_free, txq->tx_tail, 
txq->tx_head);
+
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               tx_pkt = *tx_pkts++;
+
+               /* Clean Tx queue if needed */
+               if (txq->tx_free < txq->tx_free_thresh)
+                       atl_xmit_cleanup(txq);
+
+               /* Check if we have enough free descriptors */
+               if (txq->tx_free < tx_pkt->nb_segs)
+                       break;
+
+               /* check mbuf is valid */
+               if ((tx_pkt->nb_segs == 0) || ((tx_pkt->nb_segs > 1) && 
(tx_pkt->next == NULL)))
+                       break;
+
+               /* Send the packet */
+               atl_xmit_pkt(hw, txq, tx_pkt);
+       }
+
+       PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx);
+
+       return nb_tx;
+}
+
-- 
2.13.3.windows.1

Reply via email to