We should only check that MPRQ stride size is bigger than the mbuf size
in case no devarg configuration has been provided. Headroom check was
indtroduced recently and removed this condition inadvertently.
Restore this condition and only check if mprq_log_stride_size is not set.

Fixes: 8e7925aa77 ("net/mlx5: fix MPRQ stride size to accommodate the headroom")

Signed-off-by: Alexander Kozyrev <akozy...@nvidia.com>
---
 drivers/net/mlx5/mlx5.c     |  2 +-
 drivers/net/mlx5/mlx5_rxq.c | 25 ++++++++++++++-----------
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 997df595d0..d10b5c8510 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2616,7 +2616,7 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct 
mlx5_kvargs_ctrl *mkvlist,
        config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
        config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
        config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
-       config->mprq.log_stride_size = MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE;
+       config->mprq.log_stride_size = MLX5_ARG_UNSET;
        config->log_hp_size = MLX5_ARG_UNSET;
        config->std_delay_drop = 0;
        config->hp_delay_drop = 0;
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 8ef7860e16..fd2d8c0a3d 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1605,18 +1605,19 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t 
idx, uint16_t desc,
                *actual_log_stride_num = config->mprq.log_stride_num;
        }
        /* Checks if chosen size of stride is in supported range. */
-       if (config->mprq.log_stride_size > log_max_stride_size ||
-           config->mprq.log_stride_size < log_min_stride_size) {
-               *actual_log_stride_size = log_def_stride_size;
-               DRV_LOG(WARNING,
-                       "Port %u Rx queue %u size of a stride for Multi-Packet 
RQ is out of range, setting default value (%u)",
-                       dev->data->port_id, idx,
-                       RTE_BIT32(log_def_stride_size));
+       if (config->mprq.log_stride_size != (uint32_t)MLX5_ARG_UNSET) {
+               if (config->mprq.log_stride_size > log_max_stride_size ||
+                       config->mprq.log_stride_size < log_min_stride_size) {
+                       *actual_log_stride_size = log_def_stride_size;
+                       DRV_LOG(WARNING,
+                               "Port %u Rx queue %u size of a stride for 
Multi-Packet RQ is out of range, setting default value (%u)",
+                               dev->data->port_id, idx,
+                               RTE_BIT32(log_def_stride_size));
+               } else {
+                       *actual_log_stride_size = config->mprq.log_stride_size;
+               }
        } else {
-               *actual_log_stride_size = config->mprq.log_stride_size;
-       }
-       /* Make the stride fit the mbuf size by default. */
-       if (*actual_log_stride_size == MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) {
+               /* Make the stride fit the mbuf size by default. */
                if (min_mbuf_size <= RTE_BIT32(log_max_stride_size)) {
                        DRV_LOG(WARNING,
                                "Port %u Rx queue %u size of a stride for 
Multi-Packet RQ is adjusted to match the mbuf size (%u)",
@@ -1675,6 +1676,8 @@ mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                        " min_stride_sz = %u, max_stride_sz = %u).\n"
                        "Rx segment is %senabled. External mempool is %sused.",
                        dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
+                       config->mprq.log_stride_size == 
(uint32_t)MLX5_ARG_UNSET ?
+                       RTE_BIT32(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE) :
                        RTE_BIT32(config->mprq.log_stride_size),
                        RTE_BIT32(config->mprq.log_stride_num),
                        config->mprq.min_rxqs_num,
-- 
2.18.2

Reply via email to