Add host port option for sharing steering objects between
multiple ports of the same physical NIC.

Signed-off-by: Viacheslav Ovsiienko <viachesl...@nvidia.com>
Acked-by: Ori Kam <or...@nvidia.com>
---
 drivers/net/mlx5/mlx5.c         |  6 +++
 drivers/net/mlx5/mlx5.h         |  2 +
 drivers/net/mlx5/mlx5_flow_hw.c | 78 +++++++++++++++++++++++++++++++--
 drivers/net/mlx5/mlx5_hws_cnt.c | 12 +++++
 4 files changed, 94 insertions(+), 4 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index b8643cebdd..2eca2cceef 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -2013,6 +2013,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        if (!priv->sh)
                return 0;
+       if (priv->shared_refcnt) {
+               DRV_LOG(ERR, "port %u is shared host in use (%u)",
+                       dev->data->port_id, priv->shared_refcnt);
+               rte_errno = EBUSY;
+               return -EBUSY;
+       }
        DRV_LOG(DEBUG, "port %u closing device \"%s\"",
                dev->data->port_id,
                ((priv->sh->cdev->ctx != NULL) ?
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 16b33e1548..525bdd47f7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1780,6 +1780,8 @@ struct mlx5_priv {
        struct mlx5_flow_hw_ctrl_rx *hw_ctrl_rx;
        /**< HW steering templates used to create control flow rules. */
 #endif
+       struct rte_eth_dev *shared_host; /* Host device for HW steering. */
+       uint16_t shared_refcnt; /* HW steering host reference counter. */
 };
 
 #define PORT_ID(priv) ((priv)->dev_data->port_id)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index aacde224f2..3b9789aa53 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -5,6 +5,8 @@
 #include <rte_flow.h>
 
 #include <mlx5_malloc.h>
+
+#include "mlx5.h"
 #include "mlx5_defs.h"
 #include "mlx5_flow.h"
 #include "mlx5_rx.h"
@@ -6303,6 +6305,12 @@ flow_hw_ct_pool_create(struct rte_eth_dev *dev,
        int reg_id;
        uint32_t flags;
 
+       if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+               DRV_LOG(ERR, "Connection tracking is not supported "
+                            "in cross vHCA sharing mode");
+               rte_errno = ENOTSUP;
+               return NULL;
+       }
        pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
        if (!pool) {
                rte_errno = ENOMEM;
@@ -6787,6 +6795,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
                  struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
+       struct mlx5_priv *host_priv = NULL;
        struct mlx5dr_context *dr_ctx = NULL;
        struct mlx5dr_context_attr dr_ctx_attr = {0};
        struct mlx5_hw_q *hw_q;
@@ -6801,7 +6810,8 @@ flow_hw_configure(struct rte_eth_dev *dev,
                .free = mlx5_free,
                .type = "mlx5_hw_action_construct_data",
        };
-       /* Adds one queue to be used by PMD.
+       /*
+        * Adds one queue to be used by PMD.
         * The last queue will be used by the PMD.
         */
        uint16_t nb_q_updated = 0;
@@ -6920,6 +6930,57 @@ flow_hw_configure(struct rte_eth_dev *dev,
        dr_ctx_attr.queues = nb_q_updated;
        /* Queue size should all be the same. Take the first one. */
        dr_ctx_attr.queue_size = _queue_attr[0]->size;
+       if (port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+               struct rte_eth_dev *host_dev = NULL;
+               uint16_t port_id;
+
+               MLX5_ASSERT(rte_eth_dev_is_valid_port(port_attr->host_port_id));
+               if (is_proxy) {
+                       DRV_LOG(ERR, "cross vHCA shared mode not supported "
+                                    " for E-Switch confgiurations");
+                       rte_errno = ENOTSUP;
+                       goto err;
+               }
+               MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
+                       if (port_id == port_attr->host_port_id) {
+                               host_dev = &rte_eth_devices[port_id];
+                               break;
+                       }
+               }
+               if (!host_dev || host_dev == dev ||
+                   !host_dev->data || !host_dev->data->dev_private) {
+                       DRV_LOG(ERR, "Invalid cross vHCA host port %u",
+                               port_attr->host_port_id);
+                       rte_errno = EINVAL;
+                       goto err;
+               }
+               host_priv = host_dev->data->dev_private;
+               if (host_priv->sh->cdev->ctx == priv->sh->cdev->ctx) {
+                       DRV_LOG(ERR, "Sibling ports %u and %u do not "
+                                    "require cross vHCA sharing mode",
+                               dev->data->port_id, port_attr->host_port_id);
+                       rte_errno = EINVAL;
+                       goto err;
+               }
+               if (host_priv->shared_host) {
+                       DRV_LOG(ERR, "Host port %u is not the sharing base",
+                               port_attr->host_port_id);
+                       rte_errno = EINVAL;
+                       goto err;
+               }
+               if (port_attr->nb_counters ||
+                   port_attr->nb_aging_objects ||
+                   port_attr->nb_meters ||
+                   port_attr->nb_conn_tracks) {
+                       DRV_LOG(ERR,
+                               "Object numbers on guest port must be zeros");
+                       rte_errno = EINVAL;
+                       goto err;
+               }
+               dr_ctx_attr.shared_ibv_ctx = host_priv->sh->cdev->ctx;
+               priv->shared_host = host_dev;
+               __atomic_fetch_add(&host_priv->shared_refcnt, 1, 
__ATOMIC_RELAXED);
+       }
        dr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);
        /* rte_errno has been updated by HWS layer. */
        if (!dr_ctx)
@@ -6935,7 +6996,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
                goto err;
        }
        /* Initialize meter library*/
-       if (port_attr->nb_meters)
+       if (port_attr->nb_meters || (host_priv && host_priv->hws_mpool))
                if (mlx5_flow_meter_init(dev, port_attr->nb_meters, 1, 1, 
nb_q_updated))
                        goto err;
        /* Add global actions. */
@@ -6972,7 +7033,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
                        goto err;
                }
        }
-       if (port_attr->nb_conn_tracks) {
+       if (port_attr->nb_conn_tracks || (host_priv && host_priv->hws_ctpool)) {
                mem_size = sizeof(struct mlx5_aso_sq) * nb_q_updated +
                           sizeof(*priv->ct_mng);
                priv->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, mem_size,
@@ -6986,7 +7047,7 @@ flow_hw_configure(struct rte_eth_dev *dev,
                        goto err;
                priv->sh->ct_aso_en = 1;
        }
-       if (port_attr->nb_counters) {
+       if (port_attr->nb_counters || (host_priv && host_priv->hws_cpool)) {
                priv->hws_cpool = mlx5_hws_cnt_pool_create(dev, port_attr,
                                                           nb_queue);
                if (priv->hws_cpool == NULL)
@@ -7055,6 +7116,10 @@ flow_hw_configure(struct rte_eth_dev *dev,
        }
        if (_queue_attr)
                mlx5_free(_queue_attr);
+       if (priv->shared_host) {
+               __atomic_fetch_sub(&host_priv->shared_refcnt, 1, 
__ATOMIC_RELAXED);
+               priv->shared_host = NULL;
+       }
        /* Do not overwrite the internal errno information. */
        if (ret)
                return ret;
@@ -7133,6 +7198,11 @@ flow_hw_resource_release(struct rte_eth_dev *dev)
        mlx5_free(priv->hw_q);
        priv->hw_q = NULL;
        claim_zero(mlx5dr_context_close(priv->dr_ctx));
+       if (priv->shared_host) {
+               struct mlx5_priv *host_priv = 
priv->shared_host->data->dev_private;
+               __atomic_fetch_sub(&host_priv->shared_refcnt, 1, 
__ATOMIC_RELAXED);
+               priv->shared_host = NULL;
+       }
        priv->dr_ctx = NULL;
        priv->nb_queue = 0;
 }
diff --git a/drivers/net/mlx5/mlx5_hws_cnt.c b/drivers/net/mlx5/mlx5_hws_cnt.c
index 05cc954903..797844439f 100644
--- a/drivers/net/mlx5/mlx5_hws_cnt.c
+++ b/drivers/net/mlx5/mlx5_hws_cnt.c
@@ -619,6 +619,12 @@ mlx5_hws_cnt_pool_create(struct rte_eth_dev *dev,
        int ret = 0;
        size_t sz;
 
+       if (pattr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+               DRV_LOG(ERR, "Counters are not supported "
+                            "in cross vHCA sharing mode");
+               rte_errno = ENOTSUP;
+               return NULL;
+       }
        /* init cnt service if not. */
        if (priv->sh->cnt_svc == NULL) {
                ret = mlx5_hws_cnt_svc_init(priv->sh);
@@ -1190,6 +1196,12 @@ mlx5_hws_age_pool_init(struct rte_eth_dev *dev,
 
        strict_queue = !!(attr->flags & RTE_FLOW_PORT_FLAG_STRICT_QUEUE);
        MLX5_ASSERT(priv->hws_cpool);
+       if (attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) {
+               DRV_LOG(ERR, "Aging sn not supported "
+                            "in cross vHCA sharing mode");
+               rte_errno = ENOTSUP;
+               return -ENOTSUP;
+       }
        nb_alloc_cnts = mlx5_hws_cnt_pool_get_size(priv->hws_cpool);
        if (strict_queue) {
                rsize = mlx5_hws_aged_out_q_ring_size_get(nb_alloc_cnts,
-- 
2.18.1

Reply via email to