Add receive and transmit units stop for specified queue, release mbufs and free 
queues.

Signed-off-by: Jiawen Wu <jiawe...@trustnetic.com>
---
 drivers/net/txgbe/base/txgbe_type.h |   3 +
 drivers/net/txgbe/txgbe_ethdev.c    |   7 +
 drivers/net/txgbe/txgbe_ethdev.h    |  15 ++
 drivers/net/txgbe/txgbe_rxtx.c      | 305 +++++++++++++++++++++++++++-
 drivers/net/txgbe/txgbe_rxtx.h      |  25 +++
 5 files changed, 354 insertions(+), 1 deletion(-)

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index 6229d8acc..c05e8e8b1 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -467,6 +467,9 @@ struct txgbe_hw {
                TXGBE_SW_RESET,
                TXGBE_GLOBAL_RESET
        } reset_type;
+
+       u32 q_rx_regs[128 * 4];
+       u32 q_tx_regs[128 * 4];
 };
 
 #include "txgbe_regs.h"
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 4fab88c5c..80470c6e7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -599,6 +599,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 
 error:
        PMD_INIT_LOG(ERR, "failure in txgbe_dev_start(): %d", err);
+       txgbe_dev_clear_queues(dev);
        return -EIO;
 }
 
@@ -638,6 +639,8 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
                hw->mac.disable_tx_laser(hw);
        }
 
+       txgbe_dev_clear_queues(dev);
+
        /* Clear stored conf */
        dev->data->scattered_rx = 0;
        dev->data->lro = 0;
@@ -1320,7 +1323,11 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .stats_get                  = txgbe_dev_stats_get,
        .stats_reset                = txgbe_dev_stats_reset,
        .rx_queue_start             = txgbe_dev_rx_queue_start,
+       .rx_queue_stop              = txgbe_dev_rx_queue_stop,
        .tx_queue_start             = txgbe_dev_tx_queue_start,
+       .tx_queue_stop              = txgbe_dev_tx_queue_stop,
+       .rx_queue_release           = txgbe_dev_rx_queue_release,
+       .tx_queue_release           = txgbe_dev_tx_queue_release,
        .dev_led_on                 = txgbe_dev_led_on,
        .dev_led_off                = txgbe_dev_led_off,
 };
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2dc0327cb..f5ee1cae6 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -82,18 +82,33 @@ int txgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
 /*
  * RX/TX function prototypes
  */
+void txgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
 void txgbe_dev_free_queues(struct rte_eth_dev *dev);
 
+void txgbe_dev_rx_queue_release(void *rxq);
+
+void txgbe_dev_tx_queue_release(void *txq);
+
 int txgbe_dev_rx_init(struct rte_eth_dev *dev);
 
 void txgbe_dev_tx_init(struct rte_eth_dev *dev);
 
 int txgbe_dev_rxtx_start(struct rte_eth_dev *dev);
 
+void txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id);
+void txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+void txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id);
+
 int txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 
+int txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
 int txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+int txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
 uint16_t txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts);
 
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index ad5d1d22f..58824045b 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -15,6 +15,8 @@
 
 #include <rte_ethdev.h>
 #include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
 
 #include "txgbe_logs.h"
 #include "base/txgbe.h"
@@ -102,6 +104,22 @@ txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
        return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
 }
 
+static void __rte_cold
+txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
+{
+       if (txq != NULL && txq->ops != NULL) {
+               txq->ops->release_mbufs(txq);
+               txq->ops->free_swring(txq);
+               rte_free(txq);
+       }
+}
+
+void __rte_cold
+txgbe_dev_tx_queue_release(void *txq)
+{
+       txgbe_tx_queue_release(txq);
+}
+
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
@@ -129,10 +147,169 @@ txgbe_set_tx_function(struct rte_eth_dev *dev, struct 
txgbe_tx_queue *txq)
        }
 }
 
+/**
+ * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
+ *
+ * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
+ * in the sw_rsc_ring is not set to NULL but rather points to the next
+ * mbuf of this RSC aggregation (that has not been completed yet and still
+ * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
+ * will just free first "nb_segs" segments of the cluster explicitly by calling
+ * an rte_pktmbuf_free_seg().
+ *
+ * @m scattered cluster head
+ */
+static void __rte_cold
+txgbe_free_sc_cluster(struct rte_mbuf *m)
+{
+       uint16_t i, nb_segs = m->nb_segs;
+       struct rte_mbuf *next_seg;
+
+       for (i = 0; i < nb_segs; i++) {
+               next_seg = m->next;
+               rte_pktmbuf_free_seg(m);
+               m = next_seg;
+       }
+}
+
+static void __rte_cold
+txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
+{
+       unsigned i;
+
+       if (rxq->sw_ring != NULL) {
+               for (i = 0; i < rxq->nb_rx_desc; i++) {
+                       if (rxq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+                               rxq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+               if (rxq->rx_nb_avail) {
+                       for (i = 0; i < rxq->rx_nb_avail; ++i) {
+                               struct rte_mbuf *mb;
+
+                               mb = rxq->rx_stage[rxq->rx_next_avail + i];
+                               rte_pktmbuf_free_seg(mb);
+                       }
+                       rxq->rx_nb_avail = 0;
+               }
+       }
+
+       if (rxq->sw_sc_ring)
+               for (i = 0; i < rxq->nb_rx_desc; i++)
+                       if (rxq->sw_sc_ring[i].fbuf) {
+                               txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
+                               rxq->sw_sc_ring[i].fbuf = NULL;
+                       }
+}
+
+static void __rte_cold
+txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
+{
+       if (rxq != NULL) {
+               txgbe_rx_queue_release_mbufs(rxq);
+               rte_free(rxq->sw_ring);
+               rte_free(rxq->sw_sc_ring);
+               rte_free(rxq);
+       }
+}
+
+void __rte_cold
+txgbe_dev_rx_queue_release(void *rxq)
+{
+       txgbe_rx_queue_release(rxq);
+}
+
+/* Reset dynamic txgbe_rx_queue fields back to defaults */
+static void __rte_cold
+txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
+{
+       static const struct txgbe_rx_desc zeroed_desc = {{{0}, {0} }, {{0}, {0} 
} };
+       unsigned i;
+       uint16_t len = rxq->nb_rx_desc;
+
+       /*
+        * By default, the Rx queue setup function allocates enough memory for
+        * TXGBE_RING_DESC_MAX.  The Rx Burst bulk allocation function requires
+        * extra memory at the end of the descriptor ring to be zero'd out.
+        */
+       if (adapter->rx_bulk_alloc_allowed)
+               /* zero out extra memory */
+               len += RTE_PMD_TXGBE_RX_MAX_BURST;
+
+       /*
+        * Zero out HW ring memory. Zero out extra memory at the end of
+        * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
+        * reads extra memory as zeros.
+        */
+       for (i = 0; i < len; i++) {
+               rxq->rx_ring[i] = zeroed_desc;
+       }
+
+       /*
+        * initialize extra software ring entries. Space for these extra
+        * entries is always allocated
+        */
+       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+       for (i = rxq->nb_rx_desc; i < len; ++i) {
+               rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
+       }
+
+       rxq->rx_nb_avail = 0;
+       rxq->rx_next_avail = 0;
+       rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
+       rxq->rx_tail = 0;
+       rxq->nb_rx_hold = 0;
+       rxq->pkt_first_seg = NULL;
+       rxq->pkt_last_seg = NULL;
+
+}
+
+void __rte_cold
+txgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+       unsigned i;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct txgbe_tx_queue *txq = dev->data->tx_queues[i];
+
+               if (txq != NULL) {
+                       txq->ops->release_mbufs(txq);
+                       txq->ops->reset(txq);
+               }
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
+
+               if (rxq != NULL) {
+                       txgbe_rx_queue_release_mbufs(rxq);
+                       txgbe_reset_rx_queue(adapter, rxq);
+               }
+       }
+}
+
 void
 txgbe_dev_free_queues(struct rte_eth_dev *dev)
 {
-       RTE_SET_USED(dev);
+       unsigned i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+               dev->data->rx_queues[i] = NULL;
+       }
+       dev->data->nb_rx_queues = 0;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+               dev->data->tx_queues[i] = NULL;
+       }
+       dev->data->nb_tx_queues = 0;
 }
 
 static int __rte_cold
@@ -490,6 +667,41 @@ txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
        return 0;
 }
 
+void
+txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+       u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+       *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
+       *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
+       *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
+}
+
+void
+txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
+{
+       u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
+       wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
+       wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
+       wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
+}
+
+void
+txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+       u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+       *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
+       *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
+       *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
+}
+
+void
+txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
+{
+       u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
+       wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
+       wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
+       wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
+}
 
 /*
  * Start Receive Units for specified queue.
@@ -532,6 +744,44 @@ txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
        return 0;
 }
 
+/*
+ * Stop Receive Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct txgbe_rx_queue *rxq;
+       uint32_t rxdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+
+       txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
+       wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
+
+       /* Wait until RX Enable bit clear */
+       poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+       do {
+               rte_delay_ms(1);
+               rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
+       } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
+       if (!poll_ms)
+               PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
+
+       rte_delay_us(RTE_TXGBE_WAIT_100_US);
+       txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
+
+       txgbe_rx_queue_release_mbufs(rxq);
+       txgbe_reset_rx_queue(adapter, rxq);
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
 /*
  * Start Transmit Units for specified queue.
  */
@@ -565,3 +815,56 @@ txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        return 0;
 }
 
+/*
+ * Stop Transmit Units for specified queue.
+ */
+int __rte_cold
+txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_tx_queue *txq;
+       uint32_t txdctl;
+       uint32_t txtdh, txtdt;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+
+       txq = dev->data->tx_queues[tx_queue_id];
+
+       /* Wait until TX queue is empty */
+       poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+       do {
+               rte_delay_us(RTE_TXGBE_WAIT_100_US);
+               txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
+               txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
+       } while (--poll_ms && (txtdh != txtdt));
+       if (!poll_ms)
+               PMD_INIT_LOG(ERR,
+                       "Tx Queue %d is not empty when stopping.",
+                       tx_queue_id);
+
+       txgbe_dev_save_tx_queue(hw, txq->reg_idx);
+       wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
+
+       /* Wait until TX Enable bit clear */
+       poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
+       do {
+               rte_delay_ms(1);
+               txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
+       } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
+       if (!poll_ms)
+               PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+                       tx_queue_id);
+
+       rte_delay_us(RTE_TXGBE_WAIT_100_US);
+       txgbe_dev_store_tx_queue(hw, txq->reg_idx);
+
+       if (txq->ops != NULL) {
+               txq->ops->release_mbufs(txq);
+               txq->ops->reset(txq);
+       }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
diff --git a/drivers/net/txgbe/txgbe_rxtx.h b/drivers/net/txgbe/txgbe_rxtx.h
index b8ca83672..72cbf1f87 100644
--- a/drivers/net/txgbe/txgbe_rxtx.h
+++ b/drivers/net/txgbe/txgbe_rxtx.h
@@ -54,6 +54,7 @@ struct txgbe_rx_desc {
 #define RTE_PMD_TXGBE_RX_MAX_BURST 32
 
 #define RTE_TXGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_TXGBE_WAIT_100_US               100
 
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
@@ -62,6 +63,10 @@ struct txgbe_rx_entry {
        struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
 };
 
+struct txgbe_scattered_rx_entry {
+       struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
+};
+
 /**
  * Structure associated with each RX queue.
  */
@@ -70,7 +75,16 @@ struct txgbe_rx_queue {
        volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
        uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
        struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
+       struct txgbe_scattered_rx_entry *sw_sc_ring; /**< address of scattered 
Rx software ring. */
+       struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+       struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
        uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+       uint16_t            rx_tail;  /**< current value of RDT register. */
+       uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+       uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
+       uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
+       uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
+       uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
        uint16_t            queue_id; /**< RX queue index. */
        uint16_t            reg_idx;  /**< RX queue register index. */
        uint16_t            port_id;  /**< Device port identifier. */
@@ -78,6 +92,10 @@ struct txgbe_rx_queue {
        uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
        uint8_t             rx_deferred_start; /**< not in global dev start. */
        uint64_t            offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
+       /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+       struct rte_mbuf fake_mbuf;
+       /** hold packets to return to application */
+       struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST*2];
 };
 
 /**
@@ -94,9 +112,16 @@ struct txgbe_tx_queue {
        uint8_t             hthresh;       /**< Host threshold register. */
        uint8_t             wthresh;       /**< Write-back threshold reg. */
        uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
+       const struct txgbe_txq_ops *ops;       /**< txq ops */
        uint8_t             tx_deferred_start; /**< not in global dev start. */
 };
 
+struct txgbe_txq_ops {
+       void (*release_mbufs)(struct txgbe_tx_queue *txq);
+       void (*free_swring)(struct txgbe_tx_queue *txq);
+       void (*reset)(struct txgbe_tx_queue *txq);
+};
+
 /* Takes an ethdev and a queue and sets up the tx function to be used based on
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
-- 
2.18.4



Reply via email to