From: Wenzhuo Lu <wenzhuo...@intel.com> Define lock mode for RX/TX queue. Because when resetting the device we want the resetting thread to get the lock of the RX/TX queue to make sure the RX/TX is stopped.
Using next ABI macro for this ABI change as it has too much impact. 7 APIs and 1 global variable are impacted. Signed-off-by: Wenzhuo Lu <wenzhuo.lu at intel.com> Signed-off-by: Zhe Tao <zhe.tao at intel.com> Signed-off-by: zhe.tao <zhe.tao at intel.com> --- lib/librte_ether/rte_ethdev.h | 62 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 74e895f..4efb5e9 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -354,7 +354,12 @@ struct rte_eth_rxmode { jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */ hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */ enable_scatter : 1, /**< Enable scatter packets rx handler */ +#ifndef RTE_NEXT_ABI enable_lro : 1; /**< Enable LRO */ +#else + enable_lro : 1, /**< Enable LRO */ + lock_mode : 1; /**< Using lock path */ +#endif }; /** @@ -634,11 +639,68 @@ struct rte_eth_txmode { /**< If set, reject sending out tagged pkts */ hw_vlan_reject_untagged : 1, /**< If set, reject sending out untagged pkts */ +#ifndef RTE_NEXT_ABI hw_vlan_insert_pvid : 1; /**< If set, enable port based VLAN insertion */ +#else + hw_vlan_insert_pvid : 1, + /**< If set, enable port based VLAN insertion */ + lock_mode : 1; + /**< If set, using lock path */ +#endif }; /** + * The macros for the RX/TX lock mode functions + */ +#ifdef RTE_NEXT_ABI +#define RX_LOCK_FUNCTION(dev, func) \ + (dev->data->dev_conf.rxmode.lock_mode ? \ + func ## _lock : func) + +#define TX_LOCK_FUNCTION(dev, func) \ + (dev->data->dev_conf.txmode.lock_mode ? \ + func ## _lock : func) +#else +#define RX_LOCK_FUNCTION(dev, func) func + +#define TX_LOCK_FUNCTION(dev, func) func +#endif + +/* Add the lock RX/TX function for VF reset */ +#define GENERATE_RX_LOCK(func, nic) \ +uint16_t func ## _lock(void *rx_queue, \ + struct rte_mbuf **rx_pkts, \ + uint16_t nb_pkts) \ +{ \ + struct nic ## _rx_queue *rxq = rx_queue; \ + uint16_t nb_rx = 0; \ + \ + if (rte_spinlock_trylock(&rxq->rx_lock)) { \ + nb_rx = func(rx_queue, rx_pkts, nb_pkts); \ + rte_spinlock_unlock(&rxq->rx_lock); \ + } \ + \ + return nb_rx; \ +} + +#define GENERATE_TX_LOCK(func, nic) \ +uint16_t func ## _lock(void *tx_queue, \ + struct rte_mbuf **tx_pkts, \ + uint16_t nb_pkts) \ +{ \ + struct nic ## _tx_queue *txq = tx_queue; \ + uint16_t nb_tx = 0; \ + \ + if (rte_spinlock_trylock(&txq->tx_lock)) { \ + nb_tx = func(tx_queue, tx_pkts, nb_pkts); \ + rte_spinlock_unlock(&txq->tx_lock); \ + } \ + \ + return nb_tx; \ +} + +/** * A structure used to configure an RX ring of an Ethernet port. */ struct rte_eth_rxconf { -- 2.1.4