1. Add API to support queue start and stop functionality for RX/TX, and 
implement them in IXGBE PMD;
2. Enable hardware loopback functionality in VMDQ mode;

Signed-off-by: Ouyang Changchun <changchun.ouyang at intel.com>
---
 lib/librte_eal/linuxapp/eal/eal_memory.c |   2 +-
 lib/librte_ether/rte_ethdev.c            | 104 ++++++++++++++
 lib/librte_ether/rte_ethdev.h            |  80 +++++++++++
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c      |   4 +
 lib/librte_pmd_ixgbe/ixgbe_ethdev.h      |   8 ++
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c        | 233 ++++++++++++++++++++++++++-----
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h        |   6 +
 7 files changed, 400 insertions(+), 37 deletions(-)

diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c 
b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 69ad63e..dd10e15 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -134,6 +134,7 @@ rte_mem_virt2phy(const void *virtaddr)
        uint64_t page, physaddr;
        unsigned long virt_pfn;
        int page_size;
+       off_t offset;

        /* standard page size */
        page_size = getpagesize();
@@ -145,7 +146,6 @@ rte_mem_virt2phy(const void *virtaddr)
                return RTE_BAD_PHYS_ADDR;
        }

-       off_t offset;
        virt_pfn = (unsigned long)virtaddr / page_size;
        offset = sizeof(uint64_t) * virt_pfn;
        if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index ec411db..7faeeff 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -293,6 +293,110 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, 
uint16_t nb_queues)
        return (0);
 }

+int
+rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+
+       dev = &rte_eth_devices[port_id];
+       if (rx_queue_id >= dev->data->nb_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
+
+       return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+
+       dev = &rte_eth_devices[port_id];
+       if (rx_queue_id >= dev->data->nb_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
+
+       return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+
+       dev = &rte_eth_devices[port_id];
+       if (tx_queue_id >= dev->data->nb_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", tx_queue_id);
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
+
+       return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
+
+}
+
+int
+rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+
+       dev = &rte_eth_devices[port_id];
+       if (tx_queue_id >= dev->data->nb_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", tx_queue_id);
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
+
+       return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
+
+}
+
 static int
 rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
 {
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index cd4bec6..f2a8dc5 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -480,6 +480,7 @@ struct rte_eth_vmdq_rx_conf {
        enum rte_eth_nb_pools nb_queue_pools; /**< VMDq only mode, 8 or 64 
pools */
        uint8_t enable_default_pool; /**< If non-zero, use a default pool */
        uint8_t default_pool; /**< The default pool, if applicable */
+       uint8_t enable_loop_back; /**< Enable VT loop back */
        uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
        struct {
                uint16_t vlan_id; /**< The vlan id of the received frame */
@@ -501,6 +502,7 @@ struct rte_eth_rxconf {
        struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
        uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
        uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. 
*/
+       uint8_t start_rx_per_q; /**< start rx per queue. */
 };

 #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
@@ -521,6 +523,7 @@ struct rte_eth_txconf {
        uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
        uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
        uint32_t txq_flags; /**< Set flags for the Tx queue */
+       uint8_t start_tx_per_q; /**< start tx per queue. */
 };

 /**
@@ -934,6 +937,14 @@ typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev 
*dev,
                                    struct rte_eth_dev_info *dev_info);
 /**< @internal Get specific informations of an Ethernet device. */

+typedef int (* eth_queue_start_t)(struct rte_eth_dev *dev,
+                                   uint16_t queue_id);
+/**< @internal Start rx and tx of a queue of an Ethernet device. */
+
+typedef int (* eth_queue_stop_t)(struct rte_eth_dev *dev,
+                                   uint16_t queue_id);
+/**< @internal Stop rx and tx of a queue of an Ethernet device. */
+
 typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
                                    uint16_t rx_queue_id,
                                    uint16_t nb_rx_desc,
@@ -1237,6 +1248,10 @@ struct eth_dev_ops {
        vlan_tpid_set_t            vlan_tpid_set;      /**< Outer VLAN TPID 
Setup. */
        vlan_strip_queue_set_t     vlan_strip_queue_set; /**< VLAN Stripping on 
queue. */
        vlan_offload_set_t         vlan_offload_set; /**< Set VLAN Offload. */
+       eth_queue_start_t          rx_queue_start;/**< Start RX for a queue.*/
+       eth_queue_stop_t           rx_queue_stop;/**< Stop RX for a queue.*/
+       eth_queue_start_t          tx_queue_start;/**< Start TX for a queue.*/
+       eth_queue_stop_t           tx_queue_stop;/**< Stop TX for a queue.*/
        eth_rx_queue_setup_t       rx_queue_setup;/**< Set up device RX queue.*/
        eth_queue_release_t        rx_queue_release;/**< Release RX queue.*/
        eth_rx_queue_count_t       rx_queue_count; /**< Get Rx queue count. */
@@ -1733,6 +1748,71 @@ extern int rte_eth_tx_queue_setup(uint8_t port_id, 
uint16_t tx_queue_id,
  */
 extern int rte_eth_dev_socket_id(uint8_t port_id);

+/*
+ * Start specified RX queue of a port
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device
+ * @param rx_queue_id
+ *   The index of the rx queue to update the ring.
+ *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -EINVAL: The port_id or the queue_id out of range.
+ *   - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id);
+
+/*
+ * Stop specified RX queue of a port
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device
+ * @param rx_queue_id
+ *   The index of the rx queue to update the ring.
+ *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -EINVAL: The port_id or the queue_id out of range.
+ *   - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
+
+/*
+ * Start specified TX queue of a port
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device
+ * @param tx_queue_id
+ *   The index of the tx queue to update the ring.
+ *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -EINVAL: The port_id or the queue_id out of range.
+ *   - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id);
+
+/*
+ * Stop specified TX queue of a port
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device
+ * @param tx_queue_id
+ *   The index of the tx queue to update the ring.
+ *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -EINVAL: The port_id or the queue_id out of range.
+ *   - -ENOTSUP: The function not supported in PMD driver.
+ */
+extern int rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id);
+
+

 /**
  * Start an Ethernet device.
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 49ff0d1..62a6d77 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -275,6 +275,10 @@ static struct eth_dev_ops ixgbe_eth_dev_ops = {
        .vlan_tpid_set        = ixgbe_vlan_tpid_set,
        .vlan_offload_set     = ixgbe_vlan_offload_set,
        .vlan_strip_queue_set = ixgbe_vlan_strip_queue_set,
+       .rx_queue_start       = ixgbe_dev_rx_queue_start,
+       .rx_queue_stop        = ixgbe_dev_rx_queue_stop,
+       .tx_queue_start       = ixgbe_dev_tx_queue_start,
+       .tx_queue_stop        = ixgbe_dev_tx_queue_stop,
        .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
        .rx_queue_release     = ixgbe_dev_rx_queue_release,
        .rx_queue_count       = ixgbe_dev_rx_queue_count,
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
index 7c6139b..ae52c8e 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
@@ -245,6 +245,14 @@ void ixgbe_dev_tx_init(struct rte_eth_dev *dev);

 void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);

+int ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+
+int ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+int ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
 int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);

 void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 55414b9..2a98051 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1588,7 +1588,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
  * descriptors should meet the following condition:
  *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
  */
-#define IXGBE_MIN_RING_DESC 64
+#define IXGBE_MIN_RING_DESC 32
 #define IXGBE_MAX_RING_DESC 4096

 /*
@@ -1836,6 +1836,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->port_id = dev->data->port_id;
        txq->txq_flags = tx_conf->txq_flags;
        txq->ops = &def_txq_ops;
+       txq->start_tx_per_q= tx_conf->start_tx_per_q;

        /*
         * Modification to set VFTDT for virtual function if vf is detected
@@ -2078,6 +2079,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
                                                        0 : ETHER_CRC_LEN);
        rxq->drop_en = rx_conf->rx_drop_en;
+       rxq->start_rx_per_q= rx_conf->start_rx_per_q;

        /*
         * Allocate RX ring hardware descriptors. A memzone large enough to
@@ -3025,6 +3027,14 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)

        }

+       /* PFDMA Tx General Switch Control Enables VMDQ loopback */
+       if (cfg->enable_loop_back){
+               IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+               for(i = 0; i < RTE_IXGBE_VMTXSW_REGISTER_COUNT; i++) {
+                       IXGBE_WRITE_REG(hw, IXGBE_VMTXSW(i), UINT32_MAX);
+               }
+       }
+
        IXGBE_WRITE_FLUSH(hw);
 }

@@ -3234,7 +3244,6 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        uint32_t rxcsum;
        uint16_t buf_size;
        uint16_t i;
-       int ret;

        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3289,11 +3298,6 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];

-               /* Allocate buffers for descriptor rings */
-               ret = ixgbe_alloc_rx_queue_mbufs(rxq);
-               if (ret)
-                       return ret;
-
                /*
                 * Reset crc_len in case it was changed after queue setup by a
                 * call to configure.
@@ -3500,10 +3504,8 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
        struct igb_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t dmatxctl;
-       uint32_t rxdctl;
        uint32_t rxctrl;
        uint16_t i;
-       int poll_ms;

        PMD_INIT_FUNC_TRACE();
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3526,55 +3528,214 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)

        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
-               txdctl |= IXGBE_TXDCTL_ENABLE;
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
-
-               /* Wait until TX Enable ready */
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       poll_ms = 10;
-                       do {
-                               rte_delay_ms(1);
-                               txdctl = IXGBE_READ_REG(hw, 
IXGBE_TXDCTL(txq->reg_idx));
-                       } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
-                       if (!poll_ms)
-                               PMD_INIT_LOG(ERR, "Could not enable "
-                                            "Tx Queue %d\n", i);
+               if (!txq->start_tx_per_q) {
+                       ixgbe_dev_tx_queue_start(dev, i);
                }
        }
+
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
+               if (!rxq->start_rx_per_q) {
+                       ixgbe_dev_rx_queue_start(dev, i);
+               }
+       }
+
+       /* Enable Receive engine */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               rxctrl |= IXGBE_RXCTRL_DMBYPS;
+       rxctrl |= IXGBE_RXCTRL_RXEN;
+       hw->mac.ops.enable_rx_dma(hw, rxctrl);
+
+       /* If loopback mode is enabled for 82599, set up the link accordingly */
+       if (hw->mac.type == ixgbe_mac_82599EB &&
+                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+               ixgbe_setup_loopback_link_82599(hw);
+
+}
+
+/*
+ * Start Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_rx_queue *rxq;
+       uint32_t rxdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (rx_queue_id < dev->data->nb_rx_queues) {
+               rxq = dev->data->rx_queues[rx_queue_id];
+
+               /* Allocate buffers for descriptor rings */
+               if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+                       PMD_INIT_LOG(ERR, "Could not alloc mbuf for 
queue:%d\n", rx_queue_id);
+                       return -1;
+               }
                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                rxdctl |= IXGBE_RXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);

                /* Wait until RX Enable ready */
-               poll_ms = 10;
+               poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
                do {
                        rte_delay_ms(1);
                        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
                if (!poll_ms)
                        PMD_INIT_LOG(ERR, "Could not enable "
-                                    "Rx Queue %d\n", i);
+                                    "Rx Queue %d\n", rx_queue_id);
                rte_wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
                IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 
1);
-       }
+       } else
+               return -1;

-       /* Enable Receive engine */
-       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               rxctrl |= IXGBE_RXCTRL_DMBYPS;
-       rxctrl |= IXGBE_RXCTRL_RXEN;
-       hw->mac.ops.enable_rx_dma(hw, rxctrl);
+       return 0;
+}

-       /* If loopback mode is enabled for 82599, set up the link accordingly */
-       if (hw->mac.type == ixgbe_mac_82599EB &&
-                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
-               ixgbe_setup_loopback_link_82599(hw);
+/*
+ * Stop Receive Units for specified queue.
+ */
+int
+ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_rx_queue *rxq;
+       uint32_t rxdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (rx_queue_id < dev->data->nb_rx_queues) {
+               rxq = dev->data->rx_queues[rx_queue_id];

+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+               rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+               /* Wait until RX Enable ready */
+               poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+               do {
+                       rte_delay_ms(1);
+                       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+               } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not disable "
+                                    "Rx Queue %d\n", rx_queue_id);
+
+               rte_delay_us(RTE_IXGBE_WAIT_100_US);
+
+               ixgbe_rx_queue_release_mbufs(rxq);
+               ixgbe_reset_rx_queue(rxq);
+       } else
+               return -1;
+
+       return 0;
+}
+
+
+/*
+ * Start Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint32_t txdctl;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (tx_queue_id < dev->data->nb_tx_queues) {
+               txq = dev->data->tx_queues[tx_queue_id];
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+               /* Wait until TX Enable ready */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+                       do {
+                               rte_delay_ms(1);
+                               txdctl = IXGBE_READ_REG(hw, 
IXGBE_TXDCTL(txq->reg_idx));
+                       } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR, "Could not enable "
+                                            "Tx Queue %d\n", tx_queue_id);
+               }
+               rte_wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+       } else
+               return -1;
+
+       return 0;
 }

+/*
+ * Stop Transmit Units for specified queue.
+ */
+int
+ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint32_t txdctl;
+       uint32_t txtdh, txtdt;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (tx_queue_id < dev->data->nb_tx_queues) {
+               txq = dev->data->tx_queues[tx_queue_id];
+
+               /* Wait until TX queue is empty */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+                       do {
+                               rte_delay_us(RTE_IXGBE_WAIT_100_US);
+                               txtdh = IXGBE_READ_REG(hw, 
IXGBE_TDH(txq->reg_idx));
+                               txtdt = IXGBE_READ_REG(hw, 
IXGBE_TDT(txq->reg_idx));
+                       } while (--poll_ms && (txtdh != txtdt));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR, "Tx Queue %d is not empty 
when stopping.\n",
+                                       tx_queue_id);
+               }
+
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+               txdctl &= ~IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+
+               /* Wait until TX Enable ready */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+                       do {
+                               rte_delay_ms(1);
+                               txdctl = IXGBE_READ_REG(hw, 
IXGBE_TXDCTL(txq->reg_idx));
+                       } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR, "Could not disable "
+                                            "Tx Queue %d\n", tx_queue_id);
+               }
+
+               if (txq->ops != NULL) {
+                       txq->ops->release_mbufs(txq);
+                       txq->ops->reset(txq);
+               }
+       } else
+               return -1;
+
+       return 0;
+}

 /*
  * [VF] Initializes Receive Unit.
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 446eeb7..f9d0177 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -67,6 +67,10 @@
 #define rte_packet_prefetch(p)  do {} while(0)
 #endif

+#define RTE_IXGBE_REGISTER_POLL_WAIT_10_MS  10
+#define RTE_IXGBE_WAIT_100_US               100
+#define RTE_IXGBE_VMTXSW_REGISTER_COUNT     2
+
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
  */
@@ -129,6 +133,7 @@ struct igb_rx_queue {
        uint8_t             port_id;  /**< Device port identifier. */
        uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
        uint8_t             drop_en;  /**< If not 0, set SRRCTL.Drop_En. */
+       uint8_t             start_rx_per_q;
 #ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
        /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
        struct rte_mbuf fake_mbuf;
@@ -193,6 +198,7 @@ struct igb_tx_queue {
        /** Hardware context0 history. */
        struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
        struct ixgbe_txq_ops *ops;          /**< txq ops */
+       uint8_t             start_tx_per_q;
 };

 struct ixgbe_txq_ops {
-- 
1.9.0

Reply via email to