Added multi-process support for axgbe PMD To achieve multi-process support separate out TX and RX function inside the axgbe driver and call that from a secondary process when it is attaching to an already-configured NIC
Signed-off-by: Jesna K E <jesna....@amd.com> --- .mailmap | 1 + doc/guides/nics/features/axgbe.ini | 1 + doc/guides/rel_notes/release_23_03.rst | 2 + drivers/net/axgbe/axgbe_ethdev.c | 62 ++++++++++++++++++++------ drivers/net/axgbe/axgbe_rxtx.c | 11 ----- drivers/net/axgbe/axgbe_rxtx.h | 5 +++ 6 files changed, 57 insertions(+), 25 deletions(-) diff --git a/.mailmap b/.mailmap index 75884b6fe2..a153c9ab93 100644 --- a/.mailmap +++ b/.mailmap @@ -581,6 +581,7 @@ Jerome Jutteau <jerome.jutt...@outscale.com> Jerry Hao OS <jerry...@os.amperecomputing.com> Jerry Lilijun <jerry.lili...@huawei.com> Jerry Zhang <jerry.zh...@intel.com> +Jesna K E <jesna....@amd.com> Jesper Wramberg <jesper.wramb...@gmail.com> Jesse Brandeburg <jesse.brandeb...@intel.com> Jesse Bruni <jesse.br...@intel.com> diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini index 821bb682d4..5e2d6498e5 100644 --- a/doc/guides/nics/features/axgbe.ini +++ b/doc/guides/nics/features/axgbe.ini @@ -18,6 +18,7 @@ L3 checksum offload = Y L4 checksum offload = Y Basic stats = Y FW version = Y +Multiprocess aware = Y Linux = Y x86-32 = Y x86-64 = Y diff --git a/doc/guides/rel_notes/release_23_03.rst b/doc/guides/rel_notes/release_23_03.rst index b8c5b68d6c..92ec1e4b88 100644 --- a/doc/guides/rel_notes/release_23_03.rst +++ b/doc/guides/rel_notes/release_23_03.rst @@ -55,6 +55,8 @@ New Features Also, make sure to start the actual text at the margin. ======================================================= +* **Added multi-process support for axgbe PMD.** + Removed Items ------------- diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c index b071e4e460..48714eebe6 100644 --- a/drivers/net/axgbe/axgbe_ethdev.c +++ b/drivers/net/axgbe/axgbe_ethdev.c @@ -353,8 +353,6 @@ axgbe_dev_start(struct rte_eth_dev *dev) { struct axgbe_port *pdata = dev->data->dev_private; int ret; - struct rte_eth_dev_data *dev_data = dev->data; - uint16_t max_pkt_len; dev->dev_ops = &axgbe_eth_dev_ops; @@ -388,17 +386,8 @@ axgbe_dev_start(struct rte_eth_dev *dev) rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state); rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state); - max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; - if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) || - max_pkt_len > pdata->rx_buf_size) - dev_data->scattered_rx = 1; - - /* Scatter Rx handling */ - if (dev_data->scattered_rx) - dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; - else - dev->rx_pkt_burst = &axgbe_recv_pkts; - + axgbe_set_rx_function(dev); + axgbe_set_tx_function(dev); return 0; } @@ -2145,6 +2134,45 @@ get_pci_rc_devid(void) return (uint16_t)device_id; } +/* Used in dev_start by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void +axgbe_set_tx_function(struct rte_eth_dev *dev) +{ + struct axgbe_port *pdata = dev->data->dev_private; + + if (pdata->multi_segs_tx) + dev->tx_pkt_burst = &axgbe_xmit_pkts_seg; +#ifdef RTE_ARCH_X86 + struct axgbe_tx_queue *txq = dev->data->tx_queues[0]; + if (!txq->vector_disable && + rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) + dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; +#else + dev->tx_pkt_burst = &axgbe_xmit_pkts; +#endif +} + +void +axgbe_set_rx_function(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *dev_data = dev->data; + uint16_t max_pkt_len; + struct axgbe_port *pdata; + + pdata = dev->data->dev_private; + max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) || + max_pkt_len > pdata->rx_buf_size) + dev_data->scattered_rx = 1; + /* Scatter Rx handling */ + if (dev_data->scattered_rx) + dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts; + else + dev->rx_pkt_burst = &axgbe_recv_pkts; +} + /* * It returns 0 on success. */ @@ -2163,12 +2191,18 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status; eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status; + eth_dev->tx_pkt_burst = &axgbe_xmit_pkts; + eth_dev->rx_pkt_burst = &axgbe_recv_pkts; + /* * For secondary processes, we don't initialise any further as primary * has already done this work. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + axgbe_set_tx_function(eth_dev); + axgbe_set_rx_function(eth_dev); return 0; + } eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c index 7cff79e030..9b283bd9d0 100644 --- a/drivers/net/axgbe/axgbe_rxtx.c +++ b/drivers/net/axgbe/axgbe_rxtx.c @@ -629,17 +629,6 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, RTE_ETH_TX_OFFLOAD_MULTI_SEGS)) pdata->multi_segs_tx = true; - if (pdata->multi_segs_tx) - dev->tx_pkt_burst = &axgbe_xmit_pkts_seg; - else if (txq->vector_disable || - rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) - dev->tx_pkt_burst = &axgbe_xmit_pkts; - else -#ifdef RTE_ARCH_X86 - dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; -#else - dev->tx_pkt_burst = &axgbe_xmit_pkts; -#endif return 0; } diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h index eeef908ceb..1dceb76606 100644 --- a/drivers/net/axgbe/axgbe_rxtx.h +++ b/drivers/net/axgbe/axgbe_rxtx.h @@ -158,6 +158,11 @@ struct axgbe_tx_queue { * RX/TX function prototypes */ +/* Used in dev_start by primary process and then + * in dev_init by secondary process when attaching to an existing ethdev. + */ +void axgbe_set_tx_function(struct rte_eth_dev *dev); +void axgbe_set_rx_function(struct rte_eth_dev *dev); void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx); int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, -- 2.25.1