From: Igor Romanov <igor.roma...@oktetlabs.ru> Make software index of an Rx queue and ethdev index separate. When an ethdev RxQ is accessed in ethdev callbacks, an explicit ethdev queue index is used.
This is a preparation to introducing non-ethdev Rx queues. Signed-off-by: Igor Romanov <igor.roma...@oktetlabs.ru> Signed-off-by: Andrew Rybchenko <andrew.rybche...@oktetlabs.ru> Reviewed-by: Andy Moreton <amore...@xilinx.com> Reviewed-by: Ivan Malov <ivan.ma...@oktetlabs.ru> --- drivers/net/sfc/sfc.h | 2 + drivers/net/sfc/sfc_dp.h | 4 + drivers/net/sfc/sfc_ethdev.c | 69 ++++++++------ drivers/net/sfc/sfc_ev.c | 2 +- drivers/net/sfc/sfc_ev.h | 22 ++++- drivers/net/sfc/sfc_flow.c | 22 +++-- drivers/net/sfc/sfc_rx.c | 179 +++++++++++++++++++++++++---------- drivers/net/sfc/sfc_rx.h | 10 +- 8 files changed, 215 insertions(+), 95 deletions(-) diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h index b48a818adb..ebe705020d 100644 --- a/drivers/net/sfc/sfc.h +++ b/drivers/net/sfc/sfc.h @@ -29,6 +29,7 @@ #include "sfc_filter.h" #include "sfc_sriov.h" #include "sfc_mae.h" +#include "sfc_dp.h" #ifdef __cplusplus extern "C" { @@ -168,6 +169,7 @@ struct sfc_rss { struct sfc_adapter_shared { unsigned int rxq_count; struct sfc_rxq_info *rxq_info; + unsigned int ethdev_rxq_count; unsigned int txq_count; struct sfc_txq_info *txq_info; diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h index 4bed137806..76065483d4 100644 --- a/drivers/net/sfc/sfc_dp.h +++ b/drivers/net/sfc/sfc_dp.h @@ -96,6 +96,10 @@ struct sfc_dp { /** List of datapath variants */ TAILQ_HEAD(sfc_dp_list, sfc_dp); +typedef unsigned int sfc_sw_index_t; +typedef int32_t sfc_ethdev_qid_t; +#define SFC_ETHDEV_QID_INVALID ((sfc_ethdev_qid_t)(-1)) + /* Check if available HW/FW capabilities are sufficient for the datapath */ static inline bool sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps) diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index c50ecea0b9..2651c41288 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -463,26 +463,31 @@ sfc_dev_allmulti_disable(struct rte_eth_dev *dev) } static int -sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, +sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; + struct sfc_rxq_info *rxq_info; + sfc_sw_index_t sw_index; int rc; sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u", - rx_queue_id, nb_rx_desc, socket_id); + ethdev_qid, nb_rx_desc, socket_id); sfc_adapter_lock(sa); - rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id, + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid); + rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id, rx_conf, mb_pool); if (rc != 0) goto fail_rx_qinit; - dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp; + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); + dev->data->rx_queues[ethdev_qid] = rxq_info->dp; sfc_adapter_unlock(sa); @@ -500,7 +505,7 @@ sfc_rx_queue_release(void *queue) struct sfc_dp_rxq *dp_rxq = queue; struct sfc_rxq *rxq; struct sfc_adapter *sa; - unsigned int sw_index; + sfc_sw_index_t sw_index; if (dp_rxq == NULL) return; @@ -1182,15 +1187,14 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, * use any process-local pointers from the adapter data. */ static void -sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id, +sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid, struct rte_eth_rxq_info *qinfo) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; struct sfc_rxq_info *rxq_info; - SFC_ASSERT(rx_queue_id < sas->rxq_count); - - rxq_info = &sas->rxq_info[rx_queue_id]; + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); qinfo->mp = rxq_info->refill_mb_pool; qinfo->conf.rx_free_thresh = rxq_info->refill_threshold; @@ -1232,14 +1236,14 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id, * use any process-local pointers from the adapter data. */ static uint32_t -sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid) { const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; struct sfc_rxq_info *rxq_info; - SFC_ASSERT(rx_queue_id < sas->rxq_count); - rxq_info = &sas->rxq_info[rx_queue_id]; + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); if ((rxq_info->state & SFC_RXQ_STARTED) == 0) return 0; @@ -1293,13 +1297,16 @@ sfc_tx_descriptor_status(void *queue, uint16_t offset) } static int -sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; + struct sfc_rxq_info *rxq_info; + sfc_sw_index_t sw_index; int rc; - sfc_log_init(sa, "RxQ=%u", rx_queue_id); + sfc_log_init(sa, "RxQ=%u", ethdev_qid); sfc_adapter_lock(sa); @@ -1307,14 +1314,16 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (sa->state != SFC_ADAPTER_STARTED) goto fail_not_started; - if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED) + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); + if (rxq_info->state != SFC_RXQ_INITIALIZED) goto fail_not_setup; - rc = sfc_rx_qstart(sa, rx_queue_id); + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid); + rc = sfc_rx_qstart(sa, sw_index); if (rc != 0) goto fail_rx_qstart; - sas->rxq_info[rx_queue_id].deferred_started = B_TRUE; + rxq_info->deferred_started = B_TRUE; sfc_adapter_unlock(sa); @@ -1329,17 +1338,23 @@ sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) } static int -sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid) { struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; + struct sfc_rxq_info *rxq_info; + sfc_sw_index_t sw_index; - sfc_log_init(sa, "RxQ=%u", rx_queue_id); + sfc_log_init(sa, "RxQ=%u", ethdev_qid); sfc_adapter_lock(sa); - sfc_rx_qstop(sa, rx_queue_id); - sas->rxq_info[rx_queue_id].deferred_started = B_FALSE; + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid); + sfc_rx_qstop(sa, sw_index); + + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); + rxq_info->deferred_started = B_FALSE; sfc_adapter_unlock(sa); @@ -1766,27 +1781,27 @@ sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool) } static int -sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid) { const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; struct sfc_rxq_info *rxq_info; - SFC_ASSERT(queue_id < sas->rxq_count); - rxq_info = &sas->rxq_info[queue_id]; + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); return sap->dp_rx->intr_enable(rxq_info->dp); } static int -sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid) { const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev); struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev); + sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid; struct sfc_rxq_info *rxq_info; - SFC_ASSERT(queue_id < sas->rxq_count); - rxq_info = &sas->rxq_info[queue_id]; + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid); return sap->dp_rx->intr_disable(rxq_info->dp); } diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c index b4953ac647..2262994112 100644 --- a/drivers/net/sfc/sfc_ev.c +++ b/drivers/net/sfc/sfc_ev.c @@ -582,7 +582,7 @@ sfc_ev_qpoll(struct sfc_evq *evq) int rc; if (evq->dp_rxq != NULL) { - unsigned int rxq_sw_index; + sfc_sw_index_t rxq_sw_index; rxq_sw_index = evq->dp_rxq->dpq.queue_id; diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h index d796865b7f..5a9f85c2d9 100644 --- a/drivers/net/sfc/sfc_ev.h +++ b/drivers/net/sfc/sfc_ev.h @@ -69,9 +69,25 @@ struct sfc_evq { * Tx event queues follow Rx event queues. */ -static inline unsigned int -sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa, - unsigned int rxq_sw_index) +static inline sfc_ethdev_qid_t +sfc_ethdev_rx_qid_by_rxq_sw_index(__rte_unused struct sfc_adapter_shared *sas, + sfc_sw_index_t rxq_sw_index) +{ + /* Only ethdev queues are present for now */ + return rxq_sw_index; +} + +static inline sfc_sw_index_t +sfc_rxq_sw_index_by_ethdev_rx_qid(__rte_unused struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid) +{ + /* Only ethdev queues are present for now */ + return ethdev_qid; +} + +static inline sfc_sw_index_t +sfc_evq_sw_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa, + sfc_sw_index_t rxq_sw_index) { return 1 + rxq_sw_index; } diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index 0bfd284c9e..2db8af1759 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -1400,10 +1400,10 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, struct sfc_rxq *rxq; struct sfc_rxq_info *rxq_info; - if (queue->index >= sfc_sa2shared(sa)->rxq_count) + if (queue->index >= sfc_sa2shared(sa)->ethdev_rxq_count) return -EINVAL; - rxq = &sa->rxq_ctrl[queue->index]; + rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, queue->index); spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index; rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index]; @@ -1420,7 +1420,7 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_rss *rss = &sas->rss; - unsigned int rxq_sw_index; + sfc_ethdev_qid_t ethdev_qid; struct sfc_rxq *rxq; unsigned int rxq_hw_index_min; unsigned int rxq_hw_index_max; @@ -1434,18 +1434,19 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, if (action_rss->queue_num == 0) return -EINVAL; - rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1; - rxq = &sa->rxq_ctrl[rxq_sw_index]; + ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1; + rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); rxq_hw_index_min = rxq->hw_index; rxq_hw_index_max = 0; for (i = 0; i < action_rss->queue_num; ++i) { - rxq_sw_index = action_rss->queue[i]; + ethdev_qid = action_rss->queue[i]; - if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count) + if ((unsigned int)ethdev_qid >= + sfc_sa2shared(sa)->ethdev_rxq_count) return -EINVAL; - rxq = &sa->rxq_ctrl[rxq_sw_index]; + rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); if (rxq->hw_index < rxq_hw_index_min) rxq_hw_index_min = rxq->hw_index; @@ -1509,9 +1510,10 @@ sfc_flow_parse_rss(struct sfc_adapter *sa, for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { unsigned int nb_queues = action_rss->queue_num; - unsigned int rxq_sw_index = action_rss->queue[i % nb_queues]; - struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index]; + struct sfc_rxq *rxq; + ethdev_qid = action_rss->queue[i % nb_queues]; + rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid); sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; } diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c index 461afc5168..597785ae02 100644 --- a/drivers/net/sfc/sfc_rx.c +++ b/drivers/net/sfc/sfc_rx.c @@ -654,14 +654,17 @@ struct sfc_dp_rx sfc_efx_rx = { }; static void -sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) +sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; unsigned int retry_count; unsigned int wait_count; int rc; + ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index); rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); @@ -698,13 +701,16 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index) (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS)); if (rxq_info->state & SFC_RXQ_FLUSHING) - sfc_err(sa, "RxQ %u flush timed out", sw_index); + sfc_err(sa, "RxQ %d (internal %u) flush timed out", + ethdev_qid, sw_index); if (rxq_info->state & SFC_RXQ_FLUSH_FAILED) - sfc_err(sa, "RxQ %u flush failed", sw_index); + sfc_err(sa, "RxQ %d (internal %u) flush failed", + ethdev_qid, sw_index); if (rxq_info->state & SFC_RXQ_FLUSHED) - sfc_notice(sa, "RxQ %u flushed", sw_index); + sfc_notice(sa, "RxQ %d (internal %u) flushed", + ethdev_qid, sw_index); } sa->priv.dp_rx->qpurge(rxq_info->dp); @@ -764,17 +770,20 @@ sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq) } int -sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) +sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; struct sfc_evq *evq; efx_rx_prefix_layout_t pinfo; int rc; - sfc_log_init(sa, "sw_index=%u", sw_index); - SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); + ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index); + + sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index); rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED); @@ -782,7 +791,7 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) rxq = &sa->rxq_ctrl[sw_index]; evq = rxq->evq; - rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index)); + rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index)); if (rc != 0) goto fail_ev_qstart; @@ -833,15 +842,16 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) rxq_info->state |= SFC_RXQ_STARTED; - if (sw_index == 0 && !sfc_sa2shared(sa)->isolated) { + if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) { rc = sfc_rx_default_rxq_set_filter(sa, rxq); if (rc != 0) goto fail_mac_filter_default_rxq_set; } /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ - sa->eth_dev->data->rx_queue_state[sw_index] = - RTE_ETH_QUEUE_STATE_STARTED; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + sa->eth_dev->data->rx_queue_state[ethdev_qid] = + RTE_ETH_QUEUE_STATE_STARTED; return 0; @@ -864,14 +874,17 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index) } void -sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) +sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; - sfc_log_init(sa, "sw_index=%u", sw_index); - SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); + ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index); + + sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index); rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; @@ -880,13 +893,14 @@ sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index) SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED); /* It seems to be used by DPDK for debug purposes only ('rte_ether') */ - sa->eth_dev->data->rx_queue_state[sw_index] = - RTE_ETH_QUEUE_STATE_STOPPED; + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + sa->eth_dev->data->rx_queue_state[ethdev_qid] = + RTE_ETH_QUEUE_STATE_STOPPED; rxq = &sa->rxq_ctrl[sw_index]; sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr); - if (sw_index == 0) + if (ethdev_qid == 0) efx_mac_filter_default_rxq_clear(sa->nic); sfc_rx_qflush(sa, sw_index); @@ -1056,11 +1070,13 @@ sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool) } int -sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, +sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic); struct sfc_rss *rss = &sfc_sa2shared(sa)->rss; int rc; @@ -1092,16 +1108,22 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(rxq_entries <= sa->rxq_max_entries); SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc); - offloads = rx_conf->offloads | - sa->eth_dev->data->dev_conf.rxmode.offloads; + ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index); + + offloads = rx_conf->offloads; + /* Add device level Rx offloads if the queue is an ethdev Rx queue */ + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads; + rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads); if (rc != 0) goto fail_bad_conf; buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool); if (buf_size == 0) { - sfc_err(sa, "RxQ %u mbuf pool object size is too small", - sw_index); + sfc_err(sa, + "RxQ %d (internal %u) mbuf pool object size is too small", + ethdev_qid, sw_index); rc = EINVAL; goto fail_bad_conf; } @@ -1111,11 +1133,13 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, (offloads & DEV_RX_OFFLOAD_SCATTER), encp->enc_rx_scatter_max, &error)) { - sfc_err(sa, "RxQ %u MTU check failed: %s", sw_index, error); - sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs " + sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s", + ethdev_qid, sw_index, error); + sfc_err(sa, + "RxQ %d (internal %u) calculated Rx buffer size is %u vs " "PDU size %u plus Rx prefix %u bytes", - sw_index, buf_size, (unsigned int)sa->port.pdu, - encp->enc_rx_prefix_size); + ethdev_qid, sw_index, buf_size, + (unsigned int)sa->port.pdu, encp->enc_rx_prefix_size); rc = EINVAL; goto fail_bad_conf; } @@ -1193,7 +1217,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, info.flags = rxq_info->rxq_flags; info.rxq_entries = rxq_info->entries; info.rxq_hw_ring = rxq->mem.esm_base; - info.evq_hw_index = sfc_evq_index_by_rxq_sw_index(sa, sw_index); + info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index); info.evq_entries = evq_entries; info.evq_hw_ring = evq->mem.esm_base; info.hw_index = rxq->hw_index; @@ -1231,13 +1255,18 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index, } void -sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index) +sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_ethdev_qid_t ethdev_qid; struct sfc_rxq_info *rxq_info; struct sfc_rxq *rxq; SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count); - sa->eth_dev->data->rx_queues[sw_index] = NULL; + ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index); + + if (ethdev_qid != SFC_ETHDEV_QID_INVALID) + sa->eth_dev->data->rx_queues[ethdev_qid] = NULL; rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index]; @@ -1479,14 +1508,41 @@ sfc_rx_rss_config(struct sfc_adapter *sa) return rc; } +struct sfc_rxq_info * +sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid) +{ + sfc_sw_index_t sw_index; + + SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count); + SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID); + + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid); + return &sas->rxq_info[sw_index]; +} + +struct sfc_rxq * +sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid) +{ + struct sfc_adapter_shared *sas = sfc_sa2shared(sa); + sfc_sw_index_t sw_index; + + SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count); + SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID); + + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid); + return &sa->rxq_ctrl[sw_index]; +} + int sfc_rx_start(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - unsigned int sw_index; + sfc_sw_index_t sw_index; int rc; - sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); + sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count, + sas->rxq_count); rc = efx_rx_init(sa->nic); if (rc != 0) @@ -1524,9 +1580,10 @@ void sfc_rx_stop(struct sfc_adapter *sa) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - unsigned int sw_index; + sfc_sw_index_t sw_index; - sfc_log_init(sa, "rxq_count=%u", sas->rxq_count); + sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count, + sas->rxq_count); sw_index = sas->rxq_count; while (sw_index-- > 0) { @@ -1538,7 +1595,7 @@ sfc_rx_stop(struct sfc_adapter *sa) } static int -sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) +sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index]; @@ -1606,17 +1663,29 @@ static void sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues) { struct sfc_adapter_shared * const sas = sfc_sa2shared(sa); - int sw_index; + sfc_sw_index_t sw_index; + sfc_ethdev_qid_t ethdev_qid; - SFC_ASSERT(nb_rx_queues <= sas->rxq_count); + SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count); - sw_index = sas->rxq_count; - while (--sw_index >= (int)nb_rx_queues) { - if (sas->rxq_info[sw_index].state & SFC_RXQ_INITIALIZED) + /* + * Finalize only ethdev queues since other ones are finalized only + * on device close and they may require additional deinitializaton. + */ + ethdev_qid = sas->ethdev_rxq_count; + while (--ethdev_qid >= (int)nb_rx_queues) { + struct sfc_rxq_info *rxq_info; + + rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid); + if (rxq_info->state & SFC_RXQ_INITIALIZED) { + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, + ethdev_qid); sfc_rx_qfini(sa, sw_index); + } + } - sas->rxq_count = nb_rx_queues; + sas->ethdev_rxq_count = nb_rx_queues; } /** @@ -1637,7 +1706,7 @@ sfc_rx_configure(struct sfc_adapter *sa) int rc; sfc_log_init(sa, "nb_rx_queues=%u (old %u)", - nb_rx_queues, sas->rxq_count); + nb_rx_queues, sas->ethdev_rxq_count); rc = sfc_rx_check_mode(sa, &dev_conf->rxmode); if (rc != 0) @@ -1666,7 +1735,7 @@ sfc_rx_configure(struct sfc_adapter *sa) struct sfc_rxq_info *new_rxq_info; struct sfc_rxq *new_rxq_ctrl; - if (nb_rx_queues < sas->rxq_count) + if (nb_rx_queues < sas->ethdev_rxq_count) sfc_rx_fini_queues(sa, nb_rx_queues); rc = ENOMEM; @@ -1685,30 +1754,38 @@ sfc_rx_configure(struct sfc_adapter *sa) sas->rxq_info = new_rxq_info; sa->rxq_ctrl = new_rxq_ctrl; if (nb_rx_queues > sas->rxq_count) { - memset(&sas->rxq_info[sas->rxq_count], 0, - (nb_rx_queues - sas->rxq_count) * + unsigned int rxq_count = sas->rxq_count; + + memset(&sas->rxq_info[rxq_count], 0, + (nb_rx_queues - rxq_count) * sizeof(sas->rxq_info[0])); - memset(&sa->rxq_ctrl[sas->rxq_count], 0, - (nb_rx_queues - sas->rxq_count) * + memset(&sa->rxq_ctrl[rxq_count], 0, + (nb_rx_queues - rxq_count) * sizeof(sa->rxq_ctrl[0])); } } - while (sas->rxq_count < nb_rx_queues) { - rc = sfc_rx_qinit_info(sa, sas->rxq_count); + while (sas->ethdev_rxq_count < nb_rx_queues) { + sfc_sw_index_t sw_index; + + sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, + sas->ethdev_rxq_count); + rc = sfc_rx_qinit_info(sa, sw_index); if (rc != 0) goto fail_rx_qinit_info; - sas->rxq_count++; + sas->ethdev_rxq_count++; } + sas->rxq_count = sas->ethdev_rxq_count; + configure_rss: rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ? - MIN(sas->rxq_count, EFX_MAXRSS) : 0; + MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0; if (rss->channels > 0) { struct rte_eth_rss_conf *adv_conf_rss; - unsigned int sw_index; + sfc_sw_index_t sw_index; for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index) rss->tbl[sw_index] = sw_index % rss->channels; diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h index 2730454fd6..96c7dc415d 100644 --- a/drivers/net/sfc/sfc_rx.h +++ b/drivers/net/sfc/sfc_rx.h @@ -119,6 +119,10 @@ struct sfc_rxq_info { }; struct sfc_rxq_info *sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq); +struct sfc_rxq_info *sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas, + sfc_ethdev_qid_t ethdev_qid); +struct sfc_rxq *sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, + sfc_ethdev_qid_t ethdev_qid); int sfc_rx_configure(struct sfc_adapter *sa); void sfc_rx_close(struct sfc_adapter *sa); @@ -129,9 +133,9 @@ int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool); -void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index); -int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index); -void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index); +void sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index); +int sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index); +void sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index); uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa); uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa); -- 2.30.2