Rename the field name from start_rx_per_q to rx_enable_queue in struct rte_eth_rxconf, and do same thing for TX.
This patch also update description for field rx_enable_queue and tx_enable_queue. Signed-off-by: Ouyang Changchun <changchun.ouyang at intel.com> --- lib/librte_ether/rte_ethdev.h | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 50df654..a452810 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -604,7 +604,16 @@ struct rte_eth_rxconf { struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */ uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */ uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */ - uint8_t start_rx_per_q; /**< start rx per queue. */ + /**< If rx_enable_queue is true, rte_eth_dev_rx_queue_start must be + invocated after rte_eth_dev_start's invocation to start RX for + one queue, and rte_eth_dev_rx_queue_start instead of + rte_eth_dev_start is responsible for allocating mbuf from + mempool and setup the DMA physical address. It is useful in + such scenario: buffer address is not available at the point of + rte_eth_dev_start's invocation but available later, e.g. in + VHOST zero copy case, the buffer address to be setup DMA + address is available only after one VM startup. */ + uint8_t rx_enable_queue; }; #define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */ @@ -625,7 +634,10 @@ struct rte_eth_txconf { uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */ uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */ uint32_t txq_flags; /**< Set flags for the Tx queue */ - uint8_t start_tx_per_q; /**< start tx per queue. */ + /**< If tx_enable_queue is true, rte_eth_dev_tx_queue_start must be + invocated after rte_eth_dev_start's invocation to start TX for + one queue. Refer to start_rx_per_q for the use case. */ + uint8_t tx_enable_queue; }; /** -- 1.8.4.2