The number of Tx descriptors is not used as HW Tx ring size any more. It simply defines maximum fill level.
Signed-off-by: Andrew Rybchenko <arybche...@solarflare.com> Reviewed-by: Andy Moreton <amore...@solarflare.com> --- drivers/net/sfc/sfc_dp_tx.h | 8 ++++++++ drivers/net/sfc/sfc_ef10_tx.c | 29 ++++++++++++++++++++++++++--- drivers/net/sfc/sfc_ethdev.c | 3 +++ drivers/net/sfc/sfc_tx.c | 5 ++++- 4 files changed, 41 insertions(+), 4 deletions(-) diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h index 4485b2f..a384a53 100644 --- a/drivers/net/sfc/sfc_dp_tx.h +++ b/drivers/net/sfc/sfc_dp_tx.h @@ -80,6 +80,13 @@ struct sfc_dp_tx_qcreate_info { }; /** + * Get Tx datapath specific device info. + * + * @param dev_info Device info to be adjusted + */ +typedef void (sfc_dp_tx_get_dev_info_t)(struct rte_eth_dev_info *dev_info); + +/** * Get size of transmit and event queue rings by the number of Tx * descriptors. * @@ -162,6 +169,7 @@ struct sfc_dp_tx { #define SFC_DP_TX_FEAT_MULTI_PROCESS 0x8 #define SFC_DP_TX_FEAT_MULTI_POOL 0x10 #define SFC_DP_TX_FEAT_REFCNT 0x20 + sfc_dp_tx_get_dev_info_t *get_dev_info; sfc_dp_tx_qsize_up_rings_t *qsize_up_rings; sfc_dp_tx_qcreate_t *qcreate; sfc_dp_tx_qdestroy_t *qdestroy; diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c index 99fe87e..02df39c 100644 --- a/drivers/net/sfc/sfc_ef10_tx.c +++ b/drivers/net/sfc/sfc_ef10_tx.c @@ -481,6 +481,17 @@ sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return pktp - &tx_pkts[0]; } +static sfc_dp_tx_get_dev_info_t sfc_ef10_get_dev_info; +static void +sfc_ef10_get_dev_info(struct rte_eth_dev_info *dev_info) +{ + /* + * Number of descriptors just defines maximum number of pushed + * descriptors (fill level). + */ + dev_info->tx_desc_lim.nb_min = 1; + dev_info->tx_desc_lim.nb_align = 1; +} static sfc_dp_tx_qsize_up_rings_t sfc_ef10_tx_qsize_up_rings; static int @@ -489,9 +500,19 @@ sfc_ef10_tx_qsize_up_rings(uint16_t nb_tx_desc, unsigned int *evq_entries, unsigned int *txq_max_fill_level) { - *txq_entries = nb_tx_desc; - *evq_entries = nb_tx_desc; - *txq_max_fill_level = SFC_EF10_TXQ_LIMIT(*txq_entries); + /* + * rte_ethdev API guarantees that the number meets min, max and + * alignment requirements. + */ + if (nb_tx_desc <= EFX_TXQ_MINNDESCS) + *txq_entries = EFX_TXQ_MINNDESCS; + else + *txq_entries = rte_align32pow2(nb_tx_desc); + + *evq_entries = *txq_entries; + + *txq_max_fill_level = RTE_MIN(nb_tx_desc, + SFC_EF10_TXQ_LIMIT(*evq_entries)); return 0; } @@ -637,6 +658,7 @@ struct sfc_dp_tx sfc_ef10_tx = { SFC_DP_TX_FEAT_MULTI_POOL | SFC_DP_TX_FEAT_REFCNT | SFC_DP_TX_FEAT_MULTI_PROCESS, + .get_dev_info = sfc_ef10_get_dev_info, .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, .qcreate = sfc_ef10_tx_qcreate, .qdestroy = sfc_ef10_tx_qdestroy, @@ -654,6 +676,7 @@ struct sfc_dp_tx sfc_ef10_simple_tx = { .type = SFC_DP_TX, }, .features = SFC_DP_TX_FEAT_MULTI_PROCESS, + .get_dev_info = sfc_ef10_get_dev_info, .qsize_up_rings = sfc_ef10_tx_qsize_up_rings, .qcreate = sfc_ef10_tx_qcreate, .qdestroy = sfc_ef10_tx_qdestroy, diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c index 6ff4595..7d75f55 100644 --- a/drivers/net/sfc/sfc_ethdev.c +++ b/drivers/net/sfc/sfc_ethdev.c @@ -171,6 +171,7 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) */ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS; + /* Initialize to hardware limits */ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries; dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS; /* @@ -181,6 +182,8 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) if (sa->dp_rx->get_dev_info != NULL) sa->dp_rx->get_dev_info(dev_info); + if (sa->dp_tx->get_dev_info != NULL) + sa->dp_tx->get_dev_info(dev_info); } static const uint32_t * diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c index f8ee976..f3f447b 100644 --- a/drivers/net/sfc/sfc_tx.c +++ b/drivers/net/sfc/sfc_tx.c @@ -160,6 +160,10 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, &txq_max_fill_level); if (rc != 0) goto fail_size_up_rings; + SFC_ASSERT(txq_entries >= EFX_TXQ_MINNDESCS); + SFC_ASSERT(txq_entries <= sa->txq_max_entries); + SFC_ASSERT(txq_entries >= nb_tx_desc); + SFC_ASSERT(txq_max_fill_level <= nb_tx_desc); rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf); if (rc != 0) @@ -168,7 +172,6 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index, SFC_ASSERT(sw_index < sa->txq_count); txq_info = &sa->txq_info[sw_index]; - SFC_ASSERT(txq_entries <= sa->txq_max_entries); txq_info->entries = txq_entries; rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index, -- 2.7.4