The split feature for receiving packets was added to the mlx5
PMD, now Rx queue can receive the data to the buffers belonging
to the different pools and the memory of all the involved pool
must be registered for DMA operations in order to allow hardware
to store the data.

Signed-off-by: Viacheslav Ovsiienko <viachesl...@nvidia.com>
Acked-by: Matan Azrad <ma...@nvidia.com>
---
 drivers/net/mlx5/mlx5_mr.c      |  3 +++
 drivers/net/mlx5/mlx5_trigger.c | 20 ++++++++++++--------
 2 files changed, 15 insertions(+), 8 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index dbcf0aa..c308ecc 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -536,6 +536,9 @@ struct mr_update_mp_data {
                .ret = 0,
        };
 
+       DRV_LOG(DEBUG, "Port %u Rx queue registering mp %s "
+                      "having %u chunks.", dev->data->port_id,
+                      mp->name, mp->nb_mem_chunks);
        rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
        if (data.ret < 0 && rte_errno == ENXIO) {
                /* Mempool may have externally allocated memory. */
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 7735f02..19f2d66 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -145,18 +145,22 @@
                dev->data->port_id, priv->sh->device_attr.max_sge);
        for (i = 0; i != priv->rxqs_n; ++i) {
                struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
-               struct rte_mempool *mp;
 
                if (!rxq_ctrl)
                        continue;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /* Pre-register Rx mempool. */
-                       mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-                            rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-                       DRV_LOG(DEBUG, "Port %u Rx queue %u registering mp %s"
-                               " having %u chunks.", dev->data->port_id,
-                               rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
-                       mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+                       /* Pre-register Rx mempools. */
+                       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+                               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
+                                                 rxq_ctrl->rxq.mprq_mp);
+                       } else {
+                               uint32_t s;
+
+                               for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
+                                       mlx5_mr_update_mp
+                                               (dev, &rxq_ctrl->rxq.mr_ctrl,
+                                               rxq_ctrl->rxq.rxseg[s].mp);
+                       }
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)
                                goto error;
-- 
1.8.3.1

Reply via email to