According to exist implementation, rte_eth_[rx|tx]_queue_setup will
always return fail if device is already started(rte_eth_dev_start).

This can't satisfied the usage when application want to deferred setup
part of the queues while keep traffic running on those queues already
be setup.

example:
rte_eth_dev_config(nb_rxq = 2, nb_txq =2)
rte_eth_rx_queue_setup(idx = 0 ...)
rte_eth_rx_queue_setup(idx = 0 ...)
rte_eth_dev_start(...) /* [rx|tx]_burst is ready to start on queue 0 */
rte_eth_rx_queue_setup(idx=1 ...) /* fail*/

Basically this is not a general hardware limitation, because for NIC like
i40e, ixgbe, it is not necessary to stop the whole device before configure
a fresh queue or reconfigure an exist queue with traffic on it.

The patch add new eth_dev_ops: [rx|tx]_queue_hot_[setup|release] and
internal logic of rte_eth_[rx|tx]_queue_setup is changed, so application
is allowed to setup queue after device start if the driver is implemented
properly.

The typical implementation of [rx|tx]_queue_hot_setup could be:
1. do the same thing in [rx|tx]_queue_setup.
2. perform per queue start logic from [rx|tx]_queue_start.

The typical implementation of [rx|tx]_queue_hot_release could be:
1. perform per queue stop logic from [rx|tx]_queue_stop.
2. do the same thing in [rx|tx]_queue_release.

Signed-off-by: Qi Zhang <qi.z.zh...@intel.com>
---
 lib/librte_ether/rte_ethdev.c | 54 ++++++++++++++++++++++++-------------------
 lib/librte_ether/rte_ethdev.h |  8 +++++++
 2 files changed, 38 insertions(+), 24 deletions(-)

diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index a524af740..b4f534dbb 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -1100,6 +1100,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t 
rx_queue_id,
        struct rte_eth_dev_info dev_info;
        struct rte_eth_rxconf local_conf;
        void **rxq;
+       eth_rx_queue_setup_t rxq_setup;
+       eth_queue_release_t rxq_release;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1109,14 +1111,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t 
rx_queue_id,
                return -EINVAL;
        }
 
-       if (dev->data->dev_started) {
-               RTE_PMD_DEBUG_TRACE(
-                   "port %d must be stopped to allow configuration\n", 
port_id);
-               return -EBUSY;
-       }
-
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
 
        /*
         * Check the size of the mbuf data buffer.
@@ -1158,11 +1153,19 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t 
rx_queue_id,
                return -EINVAL;
        }
 
+       if (dev->data->dev_started) {
+               rxq_setup = dev->dev_ops->rx_queue_hot_setup;
+               rxq_release = dev->dev_ops->rx_queue_hot_release;
+       } else {
+               rxq_setup = dev->dev_ops->rx_queue_setup;
+               rxq_release = dev->dev_ops->rx_queue_release;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*rxq_setup, -ENOTSUP);
        rxq = dev->data->rx_queues;
        if (rxq[rx_queue_id]) {
-               RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
-                                       -ENOTSUP);
-               (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
+               RTE_FUNC_PTR_OR_ERR_RET(*rxq_release, -ENOTSUP);
+               (*rxq_release)(rxq[rx_queue_id]);
                rxq[rx_queue_id] = NULL;
        }
 
@@ -1179,8 +1182,8 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t 
rx_queue_id,
                                                    &local_conf.offloads);
        }
 
-       ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
-                                             socket_id, &local_conf, mp);
+       ret = (*rxq_setup)(dev, rx_queue_id, nb_rx_desc,
+                               socket_id, &local_conf, mp);
        if (!ret) {
                if (!dev->data->min_rx_buf_size ||
                    dev->data->min_rx_buf_size > mbp_buf_size)
@@ -1248,6 +1251,8 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t 
tx_queue_id,
        struct rte_eth_dev_info dev_info;
        struct rte_eth_txconf local_conf;
        void **txq;
+       eth_tx_queue_setup_t txq_setup;
+       eth_queue_release_t txq_release;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1257,14 +1262,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t 
tx_queue_id,
                return -EINVAL;
        }
 
-       if (dev->data->dev_started) {
-               RTE_PMD_DEBUG_TRACE(
-                   "port %d must be stopped to allow configuration\n", 
port_id);
-               return -EBUSY;
-       }
-
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
 
        rte_eth_dev_info_get(port_id, &dev_info);
 
@@ -1280,11 +1278,19 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t 
tx_queue_id,
                return -EINVAL;
        }
 
+       if (dev->data->dev_started) {
+               txq_setup = dev->dev_ops->tx_queue_hot_setup;
+               txq_release = dev->dev_ops->tx_queue_hot_release;
+       } else {
+               txq_setup = dev->dev_ops->tx_queue_setup;
+               txq_release = dev->dev_ops->tx_queue_release;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*txq_setup, -ENOTSUP);
        txq = dev->data->tx_queues;
        if (txq[tx_queue_id]) {
-               RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
-                                       -ENOTSUP);
-               (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
+               RTE_FUNC_PTR_OR_ERR_RET(*txq_release, -ENOTSUP);
+               (*txq_release)(txq[tx_queue_id]);
                txq[tx_queue_id] = NULL;
        }
 
@@ -1306,8 +1312,8 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t 
tx_queue_id,
                                          &local_conf.offloads);
        }
 
-       return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
-                                              socket_id, &local_conf);
+       return (*txq_setup)(dev, tx_queue_id, nb_tx_desc, socket_id,
+                               &local_conf);
 }
 
 void
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index b10e2a92d..e78883e19 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -1534,7 +1534,11 @@ struct eth_dev_ops {
        eth_queue_start_t          tx_queue_start;/**< Start TX for a queue. */
        eth_queue_stop_t           tx_queue_stop; /**< Stop TX for a queue. */
        eth_rx_queue_setup_t       rx_queue_setup;/**< Set up device RX queue. 
*/
+       eth_rx_queue_setup_t       rx_queue_hot_setup;
+       /**< Set up RX queue when device already started. */
        eth_queue_release_t        rx_queue_release; /**< Release RX queue. */
+       eth_queue_release_t        rx_queue_hot_release;
+       /**< Release RX queue when device alrelady started. */
        eth_rx_queue_count_t       rx_queue_count;
        /**< Get the number of used RX descriptors. */
        eth_rx_descriptor_done_t   rx_descriptor_done; /**< Check rxd DD bit. */
@@ -1545,7 +1549,11 @@ struct eth_dev_ops {
        eth_rx_enable_intr_t       rx_queue_intr_enable;  /**< Enable Rx queue 
interrupt. */
        eth_rx_disable_intr_t      rx_queue_intr_disable; /**< Disable Rx queue 
interrupt. */
        eth_tx_queue_setup_t       tx_queue_setup;/**< Set up device TX queue. 
*/
+       eth_tx_queue_setup_t       tx_queue_hot_setup;
+       /**< Set up TX queue when device already started. */
        eth_queue_release_t        tx_queue_release; /**< Release TX queue. */
+       eth_queue_release_t        tx_queue_hot_release;
+       /**< Release TX queue when device already started. */
        eth_tx_done_cleanup_t      tx_done_cleanup;/**< Free tx ring mbufs */
 
        eth_dev_led_on_t           dev_led_on;    /**< Turn on LED. */
-- 
2.13.6

Reply via email to