>
> >At queue configure stage always allocate space for maximum possible
> >number (RTE_MAX_QUEUES_PER_PORT) of queue pointers.
> >That will allow 'fast' inline functions (eth_rx_burst, etc.) to refer
> >pointer to internal queue data without extra checking of current
> >number
> >of configured queues.
> >That would help in future to hide rte_eth_dev and related structures.
> >
> >Signed-off-by: Konstantin Ananyev <konstantin.anan...@intel.com>
> >---
> > lib/ethdev/rte_ethdev.c | 36 +++++++++---------------------------
> > 1 file changed, 9 insertions(+), 27 deletions(-)
> >
> >diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
> >index daf5ca9242..424bc260fa 100644
> >--- a/lib/ethdev/rte_ethdev.c
> >+++ b/lib/ethdev/rte_ethdev.c
> >@@ -898,7 +898,8 @@ eth_dev_rx_queue_config(struct rte_eth_dev
> >*dev, uint16_t nb_queues)
> >
> > if (dev->data->rx_queues == NULL && nb_queues != 0) { /*
> >first time configuration */
> > dev->data->rx_queues = rte_zmalloc("ethdev-
> >>rx_queues",
> >- sizeof(dev->data->rx_queues[0]) *
> >nb_queues,
> >+ sizeof(dev->data->rx_queues[0]) *
> >+ RTE_MAX_QUEUES_PER_PORT,
> > RTE_CACHE_LINE_SIZE);
> > if (dev->data->rx_queues == NULL) {
> > dev->data->nb_rx_queues = 0;
>
> We could get rid of this zmalloc by declaring rx_queues as array of
> pointers, it would make code much simpler.
> I believe the original code dates back to "Initial" release.
Yep we can, and yes it will simplify this peace of code.
The main reason I decided no to do this change now -
it will change layout of the_eth_dev_data structure.
In this series I tried to mininize(/avoid) changes in rte_eth_dev and
rte_eth_dev_data,
as much as possible to avoid any unforeseen performance and functional impacts.
If we'll manage to make rte_eth_dev and rte_eth_dev_data private we can in
future
consider that one and other changes in rte_eth_dev and rte_eth_dev_data layouts
without worrying about ABI breakage.
>
>
> >@@ -909,21 +910,11 @@ eth_dev_rx_queue_config(struct
> >rte_eth_dev *dev, uint16_t nb_queues)
> >
> > rxq = dev->data->rx_queues;
> >
> >- for (i = nb_queues; i < old_nb_queues; i++)
> >+ for (i = nb_queues; i < old_nb_queues; i++) {
> > (*dev->dev_ops->rx_queue_release)(rxq[i]);
> >- rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
> >- RTE_CACHE_LINE_SIZE);
> >- if (rxq == NULL)
> >- return -(ENOMEM);
> >- if (nb_queues > old_nb_queues) {
> >- uint16_t new_qs = nb_queues -
> >old_nb_queues;
> >-
> >- memset(rxq + old_nb_queues, 0,
> >- sizeof(rxq[0]) * new_qs);
> >+ rxq[i] = NULL;
> > }
> >
> >- dev->data->rx_queues = rxq;
> >-
> > } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
> > RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >>rx_queue_release, -ENOTSUP);
> >
> >@@ -1138,8 +1129,9 @@ eth_dev_tx_queue_config(struct
> >rte_eth_dev *dev, uint16_t nb_queues)
> >
> > if (dev->data->tx_queues == NULL && nb_queues != 0) { /*
> >first time configuration */
> > dev->data->tx_queues = rte_zmalloc("ethdev-
> >>tx_queues",
> >- sizeof(dev->data-
> >>tx_queues[0]) * nb_queues,
> >-
> >RTE_CACHE_LINE_SIZE);
> >+ sizeof(dev->data->tx_queues[0]) *
> >+ RTE_MAX_QUEUES_PER_PORT,
> >+ RTE_CACHE_LINE_SIZE);
> > if (dev->data->tx_queues == NULL) {
> > dev->data->nb_tx_queues = 0;
> > return -(ENOMEM);
> >@@ -1149,21 +1141,11 @@ eth_dev_tx_queue_config(struct
> >rte_eth_dev *dev, uint16_t nb_queues)
> >
> > txq = dev->data->tx_queues;
> >
> >- for (i = nb_queues; i < old_nb_queues; i++)
> >+ for (i = nb_queues; i < old_nb_queues; i++) {
> > (*dev->dev_ops->tx_queue_release)(txq[i]);
> >- txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
> >- RTE_CACHE_LINE_SIZE);
> >- if (txq == NULL)
> >- return -ENOMEM;
> >- if (nb_queues > old_nb_queues) {
> >- uint16_t new_qs = nb_queues -
> >old_nb_queues;
> >-
> >- memset(txq + old_nb_queues, 0,
> >- sizeof(txq[0]) * new_qs);
> >+ txq[i] = NULL;
> > }
> >
> >- dev->data->tx_queues = txq;
> >-
> > } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
> > RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops-
> >>tx_queue_release, -ENOTSUP);
> >
> >--
> >2.26.3