Before this patch, the GQ format only supported a static ring size provided by the device. Using the maximum and minimum ring sizes provided in the modify_ring_size adminq command, this patch enables the ability to change the ring size within those bounds for the GQ queue format.
Note that the ring size must be a power of two in size, and any other value would fail. Signed-off-by: Joshua Washington <joshw...@google.com> Reviewed-by: Rushil Gupta <rush...@google.com> Reviewed-by: Harshitha Ramamurthy <hramamur...@google.com> --- drivers/net/gve/base/gve_adminq.c | 6 ++---- drivers/net/gve/gve_ethdev.c | 12 ++++-------- drivers/net/gve/gve_rx.c | 10 ++++++---- drivers/net/gve/gve_tx.c | 10 ++++++---- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c index 72c05c8237..09c6bff026 100644 --- a/drivers/net/gve/base/gve_adminq.c +++ b/drivers/net/gve/base/gve_adminq.c @@ -540,6 +540,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cpu_to_be64(txq->qres_mz->iova), .tx_ring_addr = cpu_to_be64(txq->tx_ring_phys_addr), .ntfy_id = cpu_to_be32(txq->ntfy_id), + .tx_ring_size = cpu_to_be16(txq->nb_tx_desc), }; if (gve_is_gqi(priv)) { @@ -548,8 +549,6 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); } else { - cmd.create_tx_queue.tx_ring_size = - cpu_to_be16(txq->nb_tx_desc); cmd.create_tx_queue.tx_comp_ring_addr = cpu_to_be64(txq->compl_ring_phys_addr); cmd.create_tx_queue.tx_comp_ring_size = @@ -584,6 +583,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) .queue_id = cpu_to_be32(queue_index), .ntfy_id = cpu_to_be32(rxq->ntfy_id), .queue_resources_addr = cpu_to_be64(rxq->qres_mz->iova), + .rx_ring_size = cpu_to_be16(rxq->nb_rx_desc), }; if (gve_is_gqi(priv)) { @@ -598,8 +598,6 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rxq->rx_buf_len); } else { - cmd.create_rx_queue.rx_ring_size = - cpu_to_be16(rxq->nb_rx_desc); cmd.create_rx_queue.rx_desc_ring_addr = cpu_to_be64(rxq->compl_ring_phys_addr); cmd.create_rx_queue.rx_data_ring_addr = diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c index 603644735d..2a674e748f 100644 --- a/drivers/net/gve/gve_ethdev.c +++ b/drivers/net/gve/gve_ethdev.c @@ -510,19 +510,15 @@ gve_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_rxportconf.ring_size = priv->default_rx_desc_cnt; dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { - .nb_max = gve_is_gqi(priv) ? - priv->default_rx_desc_cnt : - GVE_MAX_QUEUE_SIZE_DQO, - .nb_min = priv->default_rx_desc_cnt, + .nb_max = priv->max_rx_desc_cnt, + .nb_min = priv->min_rx_desc_cnt, .nb_align = 1, }; dev_info->default_txportconf.ring_size = priv->default_tx_desc_cnt; dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { - .nb_max = gve_is_gqi(priv) ? - priv->default_tx_desc_cnt : - GVE_MAX_QUEUE_SIZE_DQO, - .nb_min = priv->default_tx_desc_cnt, + .nb_max = priv->max_tx_desc_cnt, + .nb_min = priv->min_tx_desc_cnt, .nb_align = 1, }; diff --git a/drivers/net/gve/gve_rx.c b/drivers/net/gve/gve_rx.c index 43cb368be9..89b6ef384a 100644 --- a/drivers/net/gve/gve_rx.c +++ b/drivers/net/gve/gve_rx.c @@ -304,11 +304,12 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint32_t mbuf_len; int err = 0; - if (nb_desc != hw->default_rx_desc_cnt) { - PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.", - hw->default_rx_desc_cnt); + /* Ring size is required to be a power of two. */ + if (!rte_is_power_of_2(nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid ring size %u. GVE ring size must be a power of 2.\n", + nb_desc); + return -EINVAL; } - nb_desc = hw->default_rx_desc_cnt; /* Free memory if needed. */ if (dev->data->rx_queues[queue_id]) { @@ -388,6 +389,7 @@ gve_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, rxq->qpl = gve_setup_queue_page_list(hw, queue_id, true, nb_desc); if (!rxq->qpl) { + err = -ENOMEM; PMD_DRV_LOG(ERR, "Failed to alloc rx qpl for queue %hu.", queue_id); diff --git a/drivers/net/gve/gve_tx.c b/drivers/net/gve/gve_tx.c index 8c255bd0f2..b1ea36e509 100644 --- a/drivers/net/gve/gve_tx.c +++ b/drivers/net/gve/gve_tx.c @@ -559,11 +559,12 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, uint16_t free_thresh; int err = 0; - if (nb_desc != hw->default_tx_desc_cnt) { - PMD_DRV_LOG(WARNING, "gve doesn't support nb_desc config, use hw nb_desc %u.", - hw->default_tx_desc_cnt); + /* Ring size is required to be a power of two. */ + if (!rte_is_power_of_2(nb_desc)) { + PMD_DRV_LOG(ERR, "Invalid ring size %u. GVE ring size must be a power of 2.\n", + nb_desc); + return -EINVAL; } - nb_desc = hw->default_tx_desc_cnt; /* Free memory if needed. */ if (dev->data->tx_queues[queue_id]) { @@ -633,6 +634,7 @@ gve_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, uint16_t nb_desc, txq->qpl = gve_setup_queue_page_list(hw, queue_id, false, hw->tx_pages_per_qpl); if (!txq->qpl) { + err = -ENOMEM; PMD_DRV_LOG(ERR, "Failed to alloc tx qpl for queue %hu.", queue_id); goto err_iov_ring; -- 2.45.2.803.g4e1b14247a-goog