Use common context device structure as a sh field.

Signed-off-by: Michael Baum <michae...@nvidia.com>
---
 drivers/common/mlx5/mlx5_common.c            |   2 +-
 drivers/common/mlx5/mlx5_common.h            |   6 +-
 drivers/common/mlx5/version.map              |   2 +-
 drivers/common/mlx5/windows/mlx5_common_os.c |   2 +-
 drivers/net/mlx5/linux/mlx5_ethdev_os.c      |   8 +-
 drivers/net/mlx5/linux/mlx5_mp_os.c          |   9 +-
 drivers/net/mlx5/linux/mlx5_os.c             | 432 ++++++++++---------
 drivers/net/mlx5/linux/mlx5_verbs.c          |  55 +--
 drivers/net/mlx5/mlx5.c                      | 103 +++--
 drivers/net/mlx5/mlx5.h                      |  12 +-
 drivers/net/mlx5/mlx5_devx.c                 |  34 +-
 drivers/net/mlx5/mlx5_flow.c                 |   6 +-
 drivers/net/mlx5/mlx5_flow_aso.c             |  24 +-
 drivers/net/mlx5/mlx5_flow_dv.c              |  51 +--
 drivers/net/mlx5/mlx5_flow_verbs.c           |   4 +-
 drivers/net/mlx5/mlx5_mr.c                   |  14 +-
 drivers/net/mlx5/mlx5_txpp.c                 |  17 +-
 drivers/net/mlx5/windows/mlx5_ethdev_os.c    |  14 +-
 drivers/net/mlx5/windows/mlx5_os.c           | 113 ++---
 19 files changed, 453 insertions(+), 455 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_common.c 
b/drivers/common/mlx5/mlx5_common.c
index be3d0f2627..ffd2c2c129 100644
--- a/drivers/common/mlx5/mlx5_common.c
+++ b/drivers/common/mlx5/mlx5_common.c
@@ -152,7 +152,7 @@ mlx5_common_args_check(const char *key, const char *val, 
void *opaque)
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
+int
 mlx5_parse_db_map_arg(struct rte_devargs *devargs, int *dbnc)
 {
        struct rte_kvargs *kvlist;
diff --git a/drivers/common/mlx5/mlx5_common.h 
b/drivers/common/mlx5/mlx5_common.h
index 10061f364f..c4e86c3175 100644
--- a/drivers/common/mlx5/mlx5_common.h
+++ b/drivers/common/mlx5/mlx5_common.h
@@ -459,14 +459,16 @@ __rte_internal
 bool
 mlx5_dev_is_pci(const struct rte_device *dev);
 
+__rte_internal
+int
+mlx5_parse_db_map_arg(struct rte_devargs *devargs, int *dbnc);
+
 /* mlx5_common_os.c */
 
 int mlx5_os_devx_open_device(struct mlx5_dev_ctx *dev_ctx,
                             struct rte_device *dev, int dbnc,
                             uint32_t classes);
 int mlx5_os_pd_create(struct mlx5_dev_ctx *dev_ctx);
-__rte_internal
-struct devx_device_bdf *mlx5_os_get_devx_device(struct rte_device *dev);
 
 
 #endif /* RTE_PMD_MLX5_COMMON_H_ */
diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map
index 18856c198e..a1a8bae5bd 100644
--- a/drivers/common/mlx5/version.map
+++ b/drivers/common/mlx5/version.map
@@ -9,6 +9,7 @@ INTERNAL {
 
        mlx5_common_init;
 
+       mlx5_parse_db_map_arg; # WINDOWS_NO_EXPORT
        mlx5_dev_ctx_release;
        mlx5_dev_ctx_prepare;
 
@@ -145,7 +146,6 @@ INTERNAL {
        mlx5_os_dealloc_pd;
        mlx5_os_dereg_mr;
        mlx5_os_get_ibv_dev; # WINDOWS_NO_EXPORT
-       mlx5_os_get_devx_device;
        mlx5_os_reg_mr;
        mlx5_os_umem_dereg;
        mlx5_os_umem_reg;
diff --git a/drivers/common/mlx5/windows/mlx5_common_os.c 
b/drivers/common/mlx5/windows/mlx5_common_os.c
index 12819383c1..5d178b0452 100644
--- a/drivers/common/mlx5/windows/mlx5_common_os.c
+++ b/drivers/common/mlx5/windows/mlx5_common_os.c
@@ -144,7 +144,7 @@ mlx5_match_devx_devices_to_addr(struct devx_device_bdf 
*devx_bdf,
  * @return
  *   A device match on success, NULL otherwise and rte_errno is set.
  */
-struct devx_device_bdf *
+static struct devx_device_bdf *
 mlx5_os_get_devx_device(struct rte_device *dev)
 {
        int n;
diff --git a/drivers/net/mlx5/linux/mlx5_ethdev_os.c 
b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
index f34133e2c6..b4bbf841cc 100644
--- a/drivers/net/mlx5/linux/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/linux/mlx5_ethdev_os.c
@@ -324,7 +324,7 @@ int
 mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct ibv_context *ctx = priv->sh->ctx;
+       struct ibv_context *ctx = priv->sh->dev_ctx->ctx;
        struct ibv_values_ex values;
        int err = 0;
 
@@ -778,7 +778,7 @@ mlx5_dev_interrupt_handler(void *cb_arg)
                struct rte_eth_dev *dev;
                uint32_t tmp;
 
-               if (mlx5_glue->get_async_event(sh->ctx, &event))
+               if (mlx5_glue->get_async_event(sh->dev_ctx->ctx, &event))
                        break;
                /* Retrieve and check IB port index. */
                tmp = (uint32_t)event.element.port_num;
@@ -987,10 +987,10 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
 int
 mlx5_is_removed(struct rte_eth_dev *dev)
 {
-       struct ibv_device_attr device_attr;
+       struct ibv_device_attr dev_attr;
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       if (mlx5_glue->query_device(priv->sh->ctx, &device_attr) == EIO)
+       if (mlx5_glue->query_device(priv->sh->dev_ctx->ctx, &dev_attr) == EIO)
                return 1;
        return 0;
 }
diff --git a/drivers/net/mlx5/linux/mlx5_mp_os.c 
b/drivers/net/mlx5/linux/mlx5_mp_os.c
index 3a4aa766f8..53e372694c 100644
--- a/drivers/net/mlx5/linux/mlx5_mp_os.c
+++ b/drivers/net/mlx5/linux/mlx5_mp_os.c
@@ -29,6 +29,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, 
const void *peer)
                (const struct mlx5_mp_param *)mp_msg->param;
        struct rte_eth_dev *dev;
        struct mlx5_priv *priv;
+       struct mlx5_dev_ctx *dev_ctx;
        struct mr_cache_entry entry;
        uint32_t lkey;
        int ret;
@@ -41,10 +42,11 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, 
const void *peer)
        }
        dev = &rte_eth_devices[param->port_id];
        priv = dev->data->dev_private;
+       dev_ctx = priv->sh->dev_ctx;
        switch (param->type) {
        case MLX5_MP_REQ_CREATE_MR:
                mp_init_msg(&priv->mp_id, &mp_res, param->type);
-               lkey = mlx5_mr_create_primary(priv->sh->pd,
+               lkey = mlx5_mr_create_primary(dev_ctx->pd,
                                              &priv->sh->share_cache,
                                              &entry, param->args.addr,
                                              priv->config.mr_ext_memseg_en);
@@ -55,7 +57,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, 
const void *peer)
        case MLX5_MP_REQ_VERBS_CMD_FD:
                mp_init_msg(&priv->mp_id, &mp_res, param->type);
                mp_res.num_fds = 1;
-               mp_res.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;
+               mp_res.fds[0] = ((struct ibv_context *)dev_ctx->ctx)->cmd_fd;
                res->result = 0;
                ret = rte_mp_reply(&mp_res, peer);
                break;
@@ -202,7 +204,8 @@ mp_req_on_rxtx(struct rte_eth_dev *dev, enum 
mlx5_mp_req_type type)
        mp_init_msg(&priv->mp_id, &mp_req, type);
        if (type == MLX5_MP_REQ_START_RXTX) {
                mp_req.num_fds = 1;
-               mp_req.fds[0] = ((struct ibv_context *)priv->sh->ctx)->cmd_fd;
+               mp_req.fds[0] =
+                       ((struct ibv_context *)priv->sh->dev_ctx->ctx)->cmd_fd;
        }
        ret = rte_mp_request_sync(&mp_req, &mp_rep, &ts);
        if (ret) {
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index b4670fad6e..e2a7c3d09c 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -214,7 +214,7 @@ mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr 
*device_attr)
 static void *
 mlx5_alloc_verbs_buf(size_t size, void *data)
 {
-       struct mlx5_dev_ctx_shared *sh = data;
+       struct mlx5_dev_ctx *dev_ctx = data;
        void *ret;
        size_t alignment = rte_mem_page_size();
        if (alignment == (size_t)-1) {
@@ -224,7 +224,7 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
        }
 
        MLX5_ASSERT(data != NULL);
-       ret = mlx5_malloc(0, size, alignment, sh->numa_node);
+       ret = mlx5_malloc(0, size, alignment, dev_ctx->numa_node);
        if (!ret && size)
                rte_errno = ENOMEM;
        return ret;
@@ -290,7 +290,7 @@ __mlx5_discovery_misc5_cap(struct mlx5_priv *priv)
                         metadata_reg_c_0, 0xffff);
        }
 #endif
-       matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->ctx,
+       matcher = mlx5_glue->dv_create_flow_matcher(priv->sh->dev_ctx->ctx,
                                                    &dv_attr, tbl);
        if (matcher) {
                priv->sh->misc5_cap = 1;
@@ -389,7 +389,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
        void *domain;
 
        /* Reference counter is zero, we should initialize structures. */
-       domain = mlx5_glue->dr_create_domain(sh->ctx,
+       domain = mlx5_glue->dr_create_domain(sh->dev_ctx->ctx,
                                             MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
        if (!domain) {
                DRV_LOG(ERR, "ingress mlx5dv_dr_create_domain failed");
@@ -397,7 +397,7 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
                goto error;
        }
        sh->rx_domain = domain;
-       domain = mlx5_glue->dr_create_domain(sh->ctx,
+       domain = mlx5_glue->dr_create_domain(sh->dev_ctx->ctx,
                                             MLX5DV_DR_DOMAIN_TYPE_NIC_TX);
        if (!domain) {
                DRV_LOG(ERR, "egress mlx5dv_dr_create_domain failed");
@@ -407,8 +407,8 @@ mlx5_alloc_shared_dr(struct mlx5_priv *priv)
        sh->tx_domain = domain;
 #ifdef HAVE_MLX5DV_DR_ESWITCH
        if (priv->config.dv_esw_en) {
-               domain  = mlx5_glue->dr_create_domain
-                       (sh->ctx, MLX5DV_DR_DOMAIN_TYPE_FDB);
+               domain = mlx5_glue->dr_create_domain(sh->dev_ctx->ctx,
+                                                    MLX5DV_DR_DOMAIN_TYPE_FDB);
                if (!domain) {
                        DRV_LOG(ERR, "FDB mlx5dv_dr_create_domain failed");
                        err = errno;
@@ -816,7 +816,7 @@ static void
 mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       void *ctx = priv->sh->ctx;
+       void *ctx = priv->sh->dev_ctx->ctx;
 
        priv->q_counters = mlx5_devx_cmd_queue_counter_alloc(ctx);
        if (!priv->q_counters) {
@@ -833,7 +833,7 @@ mlx5_queue_counter_id_prepare(struct rte_eth_dev *dev)
                                                    .wq_type = IBV_WQT_RQ,
                                                    .max_wr = 1,
                                                    .max_sge = 1,
-                                                   .pd = priv->sh->pd,
+                                                   .pd = priv->sh->dev_ctx->pd,
                                                    .cq = cq,
                                                });
                        if (wq) {
@@ -934,6 +934,8 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
  *
  * @param dpdk_dev
  *   Backing DPDK device.
+ * @param dev_ctx
+ *   Pointer to the context device data structure.
  * @param spawn
  *   Verbs device parameters (name, port, switch_info) to spawn.
  * @param config
@@ -950,6 +952,7 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn,
  */
 static struct rte_eth_dev *
 mlx5_dev_spawn(struct rte_device *dpdk_dev,
+              struct mlx5_dev_ctx *dev_ctx,
               struct mlx5_dev_spawn_data *spawn,
               struct mlx5_dev_config *config,
               struct rte_eth_devargs *eth_da)
@@ -1073,10 +1076,9 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                        config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
        }
        mlx5_malloc_mem_select(config->sys_mem_en);
-       sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+       sh = mlx5_alloc_shared_dev_ctx(spawn, dev_ctx, config);
        if (!sh)
                return NULL;
-       config->devx = sh->devx;
 #ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
        config->dest_tir = 1;
 #endif
@@ -1093,7 +1095,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
        dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
 #endif
-       mlx5_glue->dv_query_device(sh->ctx, &dv_attr);
+       mlx5_glue->dv_query_device(sh->dev_ctx->ctx, &dv_attr);
        if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
                if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
                        DRV_LOG(DEBUG, "enhanced MPW is supported");
@@ -1170,7 +1172,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 #endif
        config->mpls_en = mpls_en;
        /* Check port status. */
-       err = mlx5_glue->query_port(sh->ctx, spawn->phys_port, &port_attr);
+       err = mlx5_glue->query_port(sh->dev_ctx->ctx, spawn->phys_port,
+                                   &port_attr);
        if (err) {
                DRV_LOG(ERR, "port query failed: %s", strerror(err));
                goto error;
@@ -1220,7 +1223,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
         * register is defined by mask.
         */
        if (switch_info->representor || switch_info->master) {
-               err = mlx5_glue->devx_port_query(sh->ctx,
+               err = mlx5_glue->devx_port_query(sh->dev_ctx->ctx,
                                                 spawn->phys_port,
                                                 &vport_info);
                if (err) {
@@ -1377,7 +1380,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                config->mps == MLX5_MPW ? "legacy " : "",
                config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
        if (config->devx) {
-               err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
+               err = mlx5_devx_cmd_query_hca_attr(sh->dev_ctx->ctx,
+                                                  &config->hca_attr);
                if (err) {
                        err = -err;
                        goto error;
@@ -1600,7 +1604,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 
                err = config->hca_attr.access_register_user ?
                        mlx5_devx_cmd_register_read
-                               (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+                               (sh->dev_ctx->ctx, MLX5_REGISTER_ID_MTUTC, 0,
                                reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
                if (!err) {
                        uint32_t ts_mode;
@@ -1741,12 +1745,12 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        if (!priv->mtr_profile_tbl)
                goto error;
        /* Hint libmlx5 to use PMD allocator for data plane resources */
-       mlx5_glue->dv_set_context_attr(sh->ctx,
+       mlx5_glue->dv_set_context_attr(sh->dev_ctx->ctx,
                        MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
                        (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
                                .alloc = &mlx5_alloc_verbs_buf,
                                .free = &mlx5_free_verbs_buf,
-                               .data = sh,
+                               .data = dev_ctx,
                        }));
        /* Bring Ethernet device up. */
        DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
@@ -1923,9 +1927,10 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                        eth_dev->data->dev_private = NULL;
        }
        if (eth_dev != NULL) {
-               /* mac_addrs must not be freed alone because part of
+               /*
+                * mac_addrs must not be freed alone because part of
                 * dev_private
-                **/
+                */
                eth_dev->data->mac_addrs = NULL;
                rte_eth_dev_release_port(eth_dev);
        }
@@ -2144,6 +2149,8 @@ mlx5_os_config_default(struct mlx5_dev_config *config)
  *
  * @param[in] pci_dev
  *   PCI device information.
+ * @param dev_ctx
+ *   Pointer to the context device data structure.
  * @param[in] req_eth_da
  *   Requested ethdev device argument.
  * @param[in] owner_id
@@ -2154,8 +2161,9 @@ mlx5_os_config_default(struct mlx5_dev_config *config)
  */
 static int
 mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
+                    struct mlx5_dev_ctx *dev_ctx,
                     struct rte_eth_devargs *req_eth_da,
-                    uint16_t owner_id)
+                    uint16_t owner_id, uint8_t devx)
 {
        struct ibv_device **ibv_list;
        /*
@@ -2181,13 +2189,14 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
         *   < 0 - no bonding device (single one)
         *  >= 0 - bonding device (value is slave PF index)
         */
-       int bd = -1;
+       int bd;
        struct mlx5_dev_spawn_data *list = NULL;
        struct mlx5_dev_config dev_config;
        unsigned int dev_config_vf;
        struct rte_eth_devargs eth_da = *req_eth_da;
        struct rte_pci_addr owner_pci = pci_dev->addr; /* Owner PF. */
        struct mlx5_bond_info bond_info;
+       const char *ibdev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);
        int ret = -1;
 
        errno = 0;
@@ -2206,38 +2215,22 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
        int nl_rdma = mlx5_nl_init(NETLINK_RDMA);
        unsigned int i;
 
-       while (ret-- > 0) {
-               struct rte_pci_addr pci_addr;
+       bd = mlx5_device_bond_pci_match(ibdev_name, &owner_pci, nl_rdma,
+                                       owner_id, &bond_info);
+       if (bd >= 0) {
+               /* Amend owner pci address if owner PF ID specified. */
+               if (eth_da.nb_representor_ports)
+                       owner_pci.function += owner_id;
+               DRV_LOG(INFO,
+                       "PCI information matches for slave %d bonding device 
\"%s\".",
+                       bd, ibdev_name);
+               nd++;
+       } else {
+               while (ret-- > 0) {
+                       struct rte_pci_addr pci_addr;
 
-               DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
-               bd = mlx5_device_bond_pci_match(ibv_list[ret]->name, &owner_pci,
-                                               nl_rdma, owner_id, &bond_info);
-               if (bd >= 0) {
-                       /*
-                        * Bonding device detected. Only one match is allowed,
-                        * the bonding is supported over multi-port IB device,
-                        * there should be no matches on representor PCI
-                        * functions or non VF LAG bonding devices with
-                        * specified address.
-                        */
-                       if (nd) {
-                               DRV_LOG(ERR,
-                                       "multiple PCI match on bonding device"
-                                       "\"%s\" found", ibv_list[ret]->name);
-                               rte_errno = ENOENT;
-                               ret = -rte_errno;
-                               goto exit;
-                       }
-                       /* Amend owner pci address if owner PF ID specified. */
-                       if (eth_da.nb_representor_ports)
-                               owner_pci.function += owner_id;
-                       DRV_LOG(INFO,
-                               "PCI information matches for slave %d bonding 
device \"%s\"",
-                               bd, ibv_list[ret]->name);
-                       ibv_match[nd++] = ibv_list[ret];
-                       break;
-               } else {
-                       /* Bonding device not found. */
+                       DRV_LOG(DEBUG, "checking device \"%s\"",
+                               ibv_list[ret]->name);
                        if (mlx5_get_pci_addr(ibv_list[ret]->ibdev_path,
                                              &pci_addr))
                                continue;
@@ -2246,22 +2239,26 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
                            owner_pci.devid != pci_addr.devid ||
                            owner_pci.function != pci_addr.function)
                                continue;
-                       DRV_LOG(INFO, "PCI information matches for device 
\"%s\"",
+                       DRV_LOG(INFO,
+                               "PCI information matches for device \"%s\"",
                                ibv_list[ret]->name);
                        ibv_match[nd++] = ibv_list[ret];
                }
        }
        ibv_match[nd] = NULL;
-       if (!nd) {
-               /* No device matches, just complain and bail out. */
-               DRV_LOG(WARNING,
-                       "no Verbs device matches PCI device " PCI_PRI_FMT ","
-                       " are kernel drivers loaded?",
-                       owner_pci.domain, owner_pci.bus,
-                       owner_pci.devid, owner_pci.function);
-               rte_errno = ENOENT;
-               ret = -rte_errno;
-               goto exit;
+       if (bd >= 0 && nd > 1) {
+               /*
+                * Bonding device detected. Only one match is allowed, the
+                * bonding is supported over multi-port IB device, there should
+                * be no matches on representor PCI functions or non VF LAG
+                * bonding devices with specified address.
+                */
+               DRV_LOG(ERR,
+                       "Multiple PCI match on bonding device \"%s\" found.",
+                       ibdev_name);
+                       rte_errno = ENOENT;
+                       ret = -rte_errno;
+                       goto exit;
        }
        if (nd == 1) {
                /*
@@ -2270,11 +2267,11 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
                 * number and check the representors existence.
                 */
                if (nl_rdma >= 0)
-                       np = mlx5_nl_portnum(nl_rdma, ibv_match[0]->name);
+                       np = mlx5_nl_portnum(nl_rdma, ibdev_name);
                if (!np)
                        DRV_LOG(WARNING,
                                "Cannot get IB device \"%s\" ports number.",
-                               ibv_match[0]->name);
+                               ibdev_name);
                if (bd >= 0 && !np) {
                        DRV_LOG(ERR, "Cannot get ports for bonding device.");
                        rte_errno = ENOENT;
@@ -2306,15 +2303,12 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
                        list[ns].bond_info = &bond_info;
                        list[ns].max_port = np;
                        list[ns].phys_port = i;
-                       list[ns].phys_dev = ibv_match[0];
-                       list[ns].phys_dev_name = ibv_match[0]->name;
+                       list[ns].phys_dev_name = ibdev_name;
                        list[ns].eth_dev = NULL;
                        list[ns].pci_dev = pci_dev;
                        list[ns].pf_bond = bd;
-                       list[ns].ifindex = mlx5_nl_ifindex
-                               (nl_rdma,
-                               mlx5_os_get_dev_device_name
-                                               (list[ns].phys_dev), i);
+                       list[ns].ifindex = mlx5_nl_ifindex(nl_rdma,
+                                                          ibdev_name, i);
                        if (!list[ns].ifindex) {
                                /*
                                 * No network interface index found for the
@@ -2403,17 +2397,15 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
                        list[ns].bond_info = NULL;
                        list[ns].max_port = 1;
                        list[ns].phys_port = 1;
-                       list[ns].phys_dev = ibv_match[i];
-                       list[ns].phys_dev_name = ibv_match[i]->name;
+                       list[ns].phys_dev_name = ibdev_name;
                        list[ns].eth_dev = NULL;
                        list[ns].pci_dev = pci_dev;
                        list[ns].pf_bond = -1;
                        list[ns].ifindex = 0;
                        if (nl_rdma >= 0)
-                               list[ns].ifindex = mlx5_nl_ifindex
-                               (nl_rdma,
-                               mlx5_os_get_dev_device_name
-                                               (list[ns].phys_dev), 1);
+                               list[ns].ifindex = mlx5_nl_ifindex(nl_rdma,
+                                                                  ibdev_name,
+                                                                  1);
                        if (!list[ns].ifindex) {
                                char ifname[IF_NAMESIZE];
 
@@ -2477,7 +2469,7 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
                                 * May be SRIOV is not enabled or there is no
                                 * representors.
                                 */
-                               DRV_LOG(INFO, "no E-Switch support detected");
+                               DRV_LOG(INFO, "No E-Switch support detected.");
                                ns++;
                                break;
                        }
@@ -2546,12 +2538,11 @@ mlx5_os_pci_probe_pf(struct rte_pci_device *pci_dev,
 
                /* Default configuration. */
                mlx5_os_config_default(&dev_config);
+               dev_config.devx = devx;
                dev_config.vf = dev_config_vf;
                dev_config.allow_duplicate_pattern = 1;
-               list[i].numa_node = pci_dev->device.numa_node;
-               list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
-                                                &list[i],
-                                                &dev_config,
+               list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device, dev_ctx,
+                                                &list[i], &dev_config,
                                                 &eth_da);
                if (!list[i].eth_dev) {
                        if (rte_errno != EBUSY && rte_errno != EEXIST)
@@ -2671,7 +2662,8 @@ mlx5_os_parse_eth_devargs(struct rte_device *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-mlx5_os_pci_probe(struct rte_pci_device *pci_dev)
+mlx5_os_pci_probe(struct rte_pci_device *pci_dev, struct mlx5_dev_ctx *dev_ctx,
+                 uint8_t devx)
 {
        struct rte_eth_devargs eth_da = { .nb_ports = 0 };
        int ret = 0;
@@ -2684,8 +2676,8 @@ mlx5_os_pci_probe(struct rte_pci_device *pci_dev)
        if (eth_da.nb_ports > 0) {
                /* Iterate all port if devargs pf is range: "pf[0-1]vf[...]". */
                for (p = 0; p < eth_da.nb_ports; p++) {
-                       ret = mlx5_os_pci_probe_pf(pci_dev, &eth_da,
-                                                  eth_da.ports[p]);
+                       ret = mlx5_os_pci_probe_pf(pci_dev, dev_ctx, &eth_da,
+                                                  eth_da.ports[p], devx);
                        if (ret)
                                break;
                }
@@ -2698,14 +2690,15 @@ mlx5_os_pci_probe(struct rte_pci_device *pci_dev)
                        mlx5_net_remove(&pci_dev->device);
                }
        } else {
-               ret = mlx5_os_pci_probe_pf(pci_dev, &eth_da, 0);
+               ret = mlx5_os_pci_probe_pf(pci_dev, dev_ctx, &eth_da, 0, devx);
        }
        return ret;
 }
 
 /* Probe a single SF device on auxiliary bus, no representor support. */
 static int
-mlx5_os_auxiliary_probe(struct rte_device *dev)
+mlx5_os_auxiliary_probe(struct rte_device *dev, struct mlx5_dev_ctx *dev_ctx,
+                       uint8_t devx)
 {
        struct rte_eth_devargs eth_da = { .nb_ports = 0 };
        struct mlx5_dev_config config;
@@ -2721,22 +2714,19 @@ mlx5_os_auxiliary_probe(struct rte_device *dev)
        /* Set default config data. */
        mlx5_os_config_default(&config);
        config.sf = 1;
+       config.devx = devx;
        /* Init spawn data. */
        spawn.max_port = 1;
        spawn.phys_port = 1;
-       spawn.phys_dev = mlx5_os_get_ibv_dev(dev);
-       if (spawn.phys_dev == NULL)
-               return -rte_errno;
-       spawn.phys_dev_name = mlx5_os_get_dev_device_name(spawn.phys_dev);
+       spawn.phys_dev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);
        ret = mlx5_auxiliary_get_ifindex(dev->name);
        if (ret < 0) {
                DRV_LOG(ERR, "failed to get ethdev ifindex: %s", dev->name);
                return ret;
        }
        spawn.ifindex = ret;
-       spawn.numa_node = dev->numa_node;
        /* Spawn device. */
-       eth_dev = mlx5_dev_spawn(dev, &spawn, &config, &eth_da);
+       eth_dev = mlx5_dev_spawn(dev, dev_ctx, &spawn, &config, &eth_da);
        if (eth_dev == NULL)
                return -rte_errno;
        /* Post create. */
@@ -2750,38 +2740,8 @@ mlx5_os_auxiliary_probe(struct rte_device *dev)
        return 0;
 }
 
-/**
- * Net class driver callback to probe a device.
- *
- * This function probe PCI bus device(s) or a single SF on auxiliary bus.
- *
- * @param[in] dev
- *   Pointer to the generic device.
- *
- * @return
- *   0 on success, the function cannot fail.
- */
-int
-mlx5_os_net_probe(struct rte_device *dev)
-{
-       int ret;
-
-       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-               mlx5_pmd_socket_init();
-       ret = mlx5_init_once();
-       if (ret) {
-               DRV_LOG(ERR, "unable to init PMD global data: %s",
-                       strerror(rte_errno));
-               return -rte_errno;
-       }
-       if (mlx5_dev_is_pci(dev))
-               return mlx5_os_pci_probe(RTE_DEV_TO_PCI(dev));
-       else
-               return mlx5_os_auxiliary_probe(dev);
-}
-
 static int
-mlx5_config_doorbell_mapping_env(const struct mlx5_dev_config *config)
+mlx5_config_doorbell_mapping_env(int dbnc)
 {
        char *env;
        int value;
@@ -2790,11 +2750,11 @@ mlx5_config_doorbell_mapping_env(const struct 
mlx5_dev_config *config)
        /* Get environment variable to store. */
        env = getenv(MLX5_SHUT_UP_BF);
        value = env ? !!strcmp(env, "0") : MLX5_ARG_UNSET;
-       if (config->dbnc == MLX5_ARG_UNSET)
+       if (dbnc == MLX5_ARG_UNSET)
                setenv(MLX5_SHUT_UP_BF, MLX5_SHUT_UP_BF_DEFAULT, 1);
        else
                setenv(MLX5_SHUT_UP_BF,
-                      config->dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
+                      dbnc == MLX5_TXDB_NCACHED ? "1" : "0", 1);
        return value;
 }
 
@@ -2810,104 +2770,163 @@ mlx5_restore_doorbell_mapping_env(int value)
 }
 
 /**
- * Extract pdn of PD object using DV API.
+ * Function API to open IB device using Verbs.
+ *
+ * This function calls the Linux glue APIs to open a device.
  *
- * @param[in] pd
- *   Pointer to the verbs PD object.
- * @param[out] pdn
- *   Pointer to the PD object number variable.
+ * @param dev_ctx
+ *   Pointer to the context device data structure.
+ * @param dev
+ *   Pointer to the generic device.
+ * @param dbnc
+ *   Device argument help configure the environment variable.
  *
  * @return
- *   0 on success, error value otherwise.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
+static int
+mlx5_verbs_open_device(struct mlx5_dev_ctx *dev_ctx, struct rte_device *dev,
+                      int dbnc)
 {
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
-       struct mlx5dv_obj obj;
-       struct mlx5dv_pd pd_info;
-       int ret = 0;
+       struct ibv_device *ibv;
+       struct ibv_context *ctx = NULL;
+       int dbmap_env;
 
-       obj.pd.in = pd;
-       obj.pd.out = &pd_info;
-       ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
-       if (ret) {
-               DRV_LOG(DEBUG, "Fail to get PD object info");
+       ibv = mlx5_os_get_ibv_dev(dev);
+       if (!ibv)
+               return -rte_errno;
+       DRV_LOG(INFO, "Dev information matches for device \"%s\".", ibv->name);
+       /*
+        * Configure environment variable "MLX5_BF_SHUT_UP" before the device
+        * creation. The rdma_core library checks the variable at device
+        * creation and stores the result internally.
+        */
+       dbmap_env = mlx5_config_doorbell_mapping_env(dbnc);
+       /* Try to open IB device with Verbs. */
+       errno = 0;
+       ctx = mlx5_glue->open_device(ibv);
+       /*
+        * The environment variable is not needed anymore, all device creation
+        * attempts are completed.
+        */
+       mlx5_restore_doorbell_mapping_env(dbmap_env);
+       if (!ctx) {
+               DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
+               rte_errno = errno ? errno : ENODEV;
+               return -rte_errno;
+       }
+       /* Hint libmlx5 to use PMD allocator for data plane resources */
+       mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+                           (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
+                                   .alloc = &mlx5_alloc_verbs_buf,
+                                   .free = &mlx5_free_verbs_buf,
+                                   .data = dev_ctx,
+                           }));
+       dev_ctx->ctx = ctx;
+       return 0;
+}
+
+/**
+ * Initialize context device and allocate all its resources.
+ *
+ * @param dev_ctx
+ *   Pointer to the context device data structure.
+ * @param dev
+ *   Pointer to mlx5 device structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_verbs_dev_ctx_prepare(struct mlx5_dev_ctx *dev_ctx, struct rte_device 
*dev)
+{
+       int dbnc = MLX5_ARG_UNSET;
+       int ret;
+
+       /*
+        * Parse Tx doorbell mapping parameter. It helps to configure
+        * environment variable "MLX5_BF_SHUT_UP" before the device creation.
+        */
+       ret = mlx5_parse_db_map_arg(dev->devargs, &dbnc);
+       if (ret < 0)
+               return ret;
+       /* Open device using Verbs. */
+       ret = mlx5_verbs_open_device(dev_ctx, dev, dbnc);
+       if (ret < 0)
                return ret;
+       /* Allocate Protection Domain object. */
+       dev_ctx->pd = mlx5_glue->alloc_pd(dev_ctx->ctx);
+       if (dev_ctx->pd == NULL) {
+               DRV_LOG(ERR, "Failed to allocate PD.");
+               rte_errno = errno ? errno : ENOMEM;
+               claim_zero(mlx5_glue->close_device(dev_ctx->ctx));
+               dev_ctx->ctx = NULL;
+               return -rte_errno;
        }
-       *pdn = pd_info.pdn;
        return 0;
-#else
-       (void)pd;
-       (void)pdn;
-       return -ENOTSUP;
-#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
 }
 
+
 /**
- * Function API to open IB device.
+ * Net class driver callback to probe a device.
  *
- * This function calls the Linux glue APIs to open a device.
+ * This function probe PCI bus device(s) or a single SF on auxiliary bus.
  *
- * @param[in] spawn
- *   Pointer to the IB device attributes (name, port, etc).
- * @param[out] config
- *   Pointer to device configuration structure.
- * @param[out] sh
- *   Pointer to shared context structure.
+ * @param[in] dev
+ *   Pointer to the generic device.
  *
  * @return
- *   0 on success, a positive error value otherwise.
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
-                    const struct mlx5_dev_config *config,
-                    struct mlx5_dev_ctx_shared *sh)
+mlx5_os_net_probe(struct rte_device *dev)
 {
-       int dbmap_env;
-       int err = 0;
+       struct mlx5_dev_ctx *dev_ctx;
+       uint8_t devx = 0;
+       int ret;
 
-       pthread_mutex_init(&sh->txpp.mutex, NULL);
+       dev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),
+                             RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+       if (dev_ctx == NULL) {
+               DRV_LOG(ERR, "Device context allocation failure.");
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
        /*
-        * Configure environment variable "MLX5_BF_SHUT_UP"
-        * before the device creation. The rdma_core library
-        * checks the variable at device creation and
-        * stores the result internally.
+        * Initialize context device and allocate all its resources.
+        * Try to do it with DV first, then usual Verbs.
         */
-       dbmap_env = mlx5_config_doorbell_mapping_env(config);
-       /* Try to open IB device with DV first, then usual Verbs. */
-       errno = 0;
-       sh->ctx = mlx5_glue->dv_open_device(spawn->phys_dev);
-       if (sh->ctx) {
-               sh->devx = 1;
-               DRV_LOG(DEBUG, "DevX is supported");
-               /* The device is created, no need for environment. */
-               mlx5_restore_doorbell_mapping_env(dbmap_env);
+       ret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_ETH);
+       if (ret < 0) {
+               goto error;
+       } else if (dev_ctx->ctx) {
+               devx = 1;
+               DRV_LOG(DEBUG, "DevX is supported.");
        } else {
-               /* The environment variable is still configured. */
-               sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
-               err = errno ? errno : ENODEV;
-               /*
-                * The environment variable is not needed anymore,
-                * all device creation attempts are completed.
-                */
-               mlx5_restore_doorbell_mapping_env(dbmap_env);
-               if (!sh->ctx)
-                       return err;
-               DRV_LOG(DEBUG, "DevX is NOT supported");
-               err = 0;
-       }
-       if (!err && sh->ctx) {
-               /* Hint libmlx5 to use PMD allocator for data plane resources */
-               mlx5_glue->dv_set_context_attr(sh->ctx,
-                       MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
-                       (void *)((uintptr_t)&(struct mlx5dv_ctx_allocators){
-                               .alloc = &mlx5_alloc_verbs_buf,
-                               .free = &mlx5_free_verbs_buf,
-                               .data = sh,
-                       }));
+               ret = mlx5_verbs_dev_ctx_prepare(dev_ctx, dev);
+               if (ret < 0)
+                       goto error;
+               DRV_LOG(DEBUG, "DevX is NOT supported.");
        }
-       return err;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               mlx5_pmd_socket_init();
+       ret = mlx5_init_once();
+       if (ret) {
+               DRV_LOG(ERR, "unable to init PMD global data: %s",
+                       strerror(rte_errno));
+               goto error;
+       }
+       if (mlx5_dev_is_pci(dev))
+               ret = mlx5_os_pci_probe(RTE_DEV_TO_PCI(dev), dev_ctx, devx);
+       else
+               ret = mlx5_os_auxiliary_probe(dev, dev_ctx, devx);
+       if (ret)
+               goto error;
+       return ret;
+error:
+       mlx5_dev_ctx_release(dev_ctx);
+       mlx5_free(dev_ctx);
+       return ret;
 }
 
 /**
@@ -2921,18 +2940,18 @@ mlx5_os_open_device(const struct mlx5_dev_spawn_data 
*spawn,
 void
 mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh)
 {
+       struct ibv_context *ctx = sh->dev_ctx->ctx;
        int ret;
        int flags;
 
        sh->intr_handle.fd = -1;
-       flags = fcntl(((struct ibv_context *)sh->ctx)->async_fd, F_GETFL);
-       ret = fcntl(((struct ibv_context *)sh->ctx)->async_fd,
-                   F_SETFL, flags | O_NONBLOCK);
+       flags = fcntl(ctx->async_fd, F_GETFL);
+       ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
        if (ret) {
                DRV_LOG(INFO, "failed to change file descriptor async event"
                        " queue");
        } else {
-               sh->intr_handle.fd = ((struct ibv_context *)sh->ctx)->async_fd;
+               sh->intr_handle.fd = ctx->async_fd;
                sh->intr_handle.type = RTE_INTR_HANDLE_EXT;
                if (rte_intr_callback_register(&sh->intr_handle,
                                        mlx5_dev_interrupt_handler, sh)) {
@@ -2943,8 +2962,7 @@ mlx5_os_dev_shared_handler_install(struct 
mlx5_dev_ctx_shared *sh)
        if (sh->devx) {
 #ifdef HAVE_IBV_DEVX_ASYNC
                sh->intr_handle_devx.fd = -1;
-               sh->devx_comp =
-                       (void *)mlx5_glue->devx_create_cmd_comp(sh->ctx);
+               sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx);
                struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp;
                if (!devx_comp) {
                        DRV_LOG(INFO, "failed to allocate devx_comp.");
diff --git a/drivers/net/mlx5/linux/mlx5_verbs.c 
b/drivers/net/mlx5/linux/mlx5_verbs.c
index d4fa202ac4..7c266981cd 100644
--- a/drivers/net/mlx5/linux/mlx5_verbs.c
+++ b/drivers/net/mlx5/linux/mlx5_verbs.c
@@ -249,9 +249,9 @@ mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t 
idx)
                cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
        }
 #endif
-       return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
-                                                             &cq_attr.ibv,
-                                                             &cq_attr.mlx5));
+       return mlx5_glue->cq_ex_to_cq
+                       (mlx5_glue->dv_create_cq(priv->sh->dev_ctx->ctx,
+                                                &cq_attr.ibv, &cq_attr.mlx5));
 }
 
 /**
@@ -288,7 +288,7 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t 
idx)
                .max_wr = wqe_n >> rxq_data->sges_n,
                /* Max number of scatter/gather elements in a WR. */
                .max_sge = 1 << rxq_data->sges_n,
-               .pd = priv->sh->pd,
+               .pd = priv->sh->dev_ctx->pd,
                .cq = rxq_obj->ibv_cq,
                .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
                .create_flags = (rxq_data->vlan_strip ?
@@ -323,10 +323,11 @@ mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t 
idx)
                        .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
                };
        }
-       rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
-                                             &wq_attr.mlx5);
+       rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->dev_ctx->ctx,
+                                             &wq_attr.ibv, &wq_attr.mlx5);
 #else
-       rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
+       rxq_obj->wq = mlx5_glue->create_wq(priv->sh->dev_ctx->ctx,
+                                          &wq_attr.ibv);
 #endif
        if (rxq_obj->wq) {
                /*
@@ -378,8 +379,8 @@ mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
        MLX5_ASSERT(tmpl);
        tmpl->rxq_ctrl = rxq_ctrl;
        if (rxq_ctrl->irq) {
-               tmpl->ibv_channel =
-                               mlx5_glue->create_comp_channel(priv->sh->ctx);
+               tmpl->ibv_channel = mlx5_glue->create_comp_channel
+                                                      (priv->sh->dev_ctx->ctx);
                if (!tmpl->ibv_channel) {
                        DRV_LOG(ERR, "Port %u: comp channel creation failure.",
                                dev->data->port_id);
@@ -542,12 +543,13 @@ mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const 
unsigned int log_n,
        /* Finalise indirection table. */
        for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
                wq[i] = wq[j];
-       ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
-                                       &(struct ibv_rwq_ind_table_init_attr){
+       ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
+                                       (priv->sh->dev_ctx->ctx,
+                                        &(struct ibv_rwq_ind_table_init_attr){
                                                .log_ind_tbl_size = log_n,
                                                .ind_tbl = wq,
                                                .comp_mask = 0,
-                                       });
+                                        });
        if (!ind_tbl->ind_table) {
                rte_errno = errno;
                return -rte_errno;
@@ -609,7 +611,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq 
*hrxq,
        }
 #endif
        qp = mlx5_glue->dv_create_qp
-                       (priv->sh->ctx,
+                       (priv->sh->dev_ctx->ctx,
                         &(struct ibv_qp_init_attr_ex){
                                .qp_type = IBV_QPT_RAW_PACKET,
                                .comp_mask =
@@ -625,12 +627,12 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct 
mlx5_hrxq *hrxq,
                                        .rx_hash_fields_mask = hash_fields,
                                },
                                .rwq_ind_tbl = ind_tbl->ind_table,
-                               .pd = priv->sh->pd,
+                               .pd = priv->sh->dev_ctx->pd,
                          },
                          &qp_init_attr);
 #else
        qp = mlx5_glue->create_qp_ex
-                       (priv->sh->ctx,
+                       (priv->sh->dev_ctx->ctx,
                         &(struct ibv_qp_init_attr_ex){
                                .qp_type = IBV_QPT_RAW_PACKET,
                                .comp_mask =
@@ -646,7 +648,7 @@ mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq 
*hrxq,
                                        .rx_hash_fields_mask = hash_fields,
                                },
                                .rwq_ind_tbl = ind_tbl->ind_table,
-                               .pd = priv->sh->pd,
+                               .pd = priv->sh->dev_ctx->pd,
                         });
 #endif
        if (!qp) {
@@ -715,7 +717,7 @@ static int
 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct ibv_context *ctx = priv->sh->ctx;
+       struct ibv_context *ctx = priv->sh->dev_ctx->ctx;
        struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
 
        if (rxq)
@@ -739,7 +741,7 @@ mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
                                                    .wq_type = IBV_WQT_RQ,
                                                    .max_wr = 1,
                                                    .max_sge = 1,
-                                                   .pd = priv->sh->pd,
+                                                   .pd = priv->sh->dev_ctx->pd,
                                                    .cq = rxq->ibv_cq,
                                              });
        if (!rxq->wq) {
@@ -779,7 +781,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
                goto error;
        rxq = priv->drop_queue.rxq;
        ind_tbl = mlx5_glue->create_rwq_ind_table
-                               (priv->sh->ctx,
+                               (priv->sh->dev_ctx->ctx,
                                 &(struct ibv_rwq_ind_table_init_attr){
                                        .log_ind_tbl_size = 0,
                                        .ind_tbl = (struct ibv_wq **)&rxq->wq,
@@ -792,7 +794,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
                rte_errno = errno;
                goto error;
        }
-       hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
+       hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->dev_ctx->ctx,
                 &(struct ibv_qp_init_attr_ex){
                        .qp_type = IBV_QPT_RAW_PACKET,
                        .comp_mask = IBV_QP_INIT_ATTR_PD |
@@ -805,7 +807,7 @@ mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
                                .rx_hash_fields_mask = 0,
                                },
                        .rwq_ind_tbl = ind_tbl,
-                       .pd = priv->sh->pd
+                       .pd = priv->sh->dev_ctx->pd
                 });
        if (!hrxq->qp) {
                DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
@@ -893,7 +895,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t 
idx)
        qp_attr.qp_type = IBV_QPT_RAW_PACKET,
        /* Do *NOT* enable this, completions events are managed per Tx burst. */
        qp_attr.sq_sig_all = 0;
-       qp_attr.pd = priv->sh->pd;
+       qp_attr.pd = priv->sh->dev_ctx->pd;
        qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
        if (txq_data->inlen_send)
                qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
@@ -901,7 +903,7 @@ mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t 
idx)
                qp_attr.max_tso_header = txq_ctrl->max_tso_header;
                qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
        }
-       qp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr);
+       qp_obj = mlx5_glue->create_qp_ex(priv->sh->dev_ctx->ctx, &qp_attr);
        if (qp_obj == NULL) {
                DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
                        dev->data->port_id, idx);
@@ -947,7 +949,8 @@ mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
        }
        cqe_n = desc / MLX5_TX_COMP_THRESH +
                1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
-       txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
+       txq_obj->cq = mlx5_glue->create_cq(priv->sh->dev_ctx->ctx, cqe_n,
+                                          NULL, NULL, 0);
        if (txq_obj->cq == NULL) {
                DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
                        dev->data->port_id, idx);
@@ -1070,7 +1073,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && 
defined(HAVE_IBV_FLOW_DV_SUPPORT)
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_dev_ctx_shared *sh = priv->sh;
-       struct ibv_context *ctx = sh->ctx;
+       struct ibv_context *ctx = sh->dev_ctx->ctx;
        struct mlx5dv_qp_init_attr qp_init_attr = {0};
        struct {
                struct ibv_cq_init_attr_ex ibv;
@@ -1114,7 +1117,7 @@ mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
                                &(struct ibv_qp_init_attr_ex){
                                        .qp_type = IBV_QPT_RAW_PACKET,
                                        .comp_mask = IBV_QP_INIT_ATTR_PD,
-                                       .pd = sh->pd,
+                                       .pd = sh->dev_ctx->pd,
                                        .send_cq = sh->self_lb.ibv_cq,
                                        .recv_cq = sh->self_lb.ibv_cq,
                                        .cap.max_recv_wr = 1,
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 08c9a6ec6f..f5f325d35a 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -910,7 +910,8 @@ mlx5_flex_parser_ecpri_alloc(struct rte_eth_dev *dev)
         * start after the common header that with the length of a DW(u32).
         */
        node.sample[1].flow_match_sample_field_base_offset = sizeof(uint32_t);
-       prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->ctx, &node);
+       prf->obj = mlx5_devx_cmd_create_flex_parser(priv->sh->dev_ctx->ctx,
+                                                   &node);
        if (!prf->obj) {
                DRV_LOG(ERR, "Failed to create flex parser node object.");
                return (rte_errno == 0) ? -ENODEV : -rte_errno;
@@ -967,6 +968,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
        uint32_t uar_mapping, retry;
        int err = 0;
        void *base_addr;
+       void *ctx = sh->dev_ctx->ctx;
 
        for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
@@ -985,7 +987,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
                 */
                uar_mapping = 0;
 #endif
-               sh->tx_uar = mlx5_glue->devx_alloc_uar(sh->ctx, uar_mapping);
+               sh->tx_uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
                if (!sh->tx_uar &&
                    uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
@@ -1004,7 +1006,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
                        DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (BF)");
                        uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
                        sh->tx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
+                                                            (ctx, uar_mapping);
                } else if (!sh->tx_uar &&
                           uar_mapping == MLX5DV_UAR_ALLOC_TYPE_NC) {
                        if (config->dbnc == MLX5_TXDB_NCACHED)
@@ -1017,7 +1019,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
                        DRV_LOG(DEBUG, "Failed to allocate Tx DevX UAR (NC)");
                        uar_mapping = MLX5DV_UAR_ALLOC_TYPE_BF;
                        sh->tx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
+                                                            (ctx, uar_mapping);
                }
 #endif
                if (!sh->tx_uar) {
@@ -1044,8 +1046,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
        }
        for (retry = 0; retry < MLX5_ALLOC_UAR_RETRY; ++retry) {
                uar_mapping = 0;
-               sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
+               sh->devx_rx_uar = mlx5_glue->devx_alloc_uar(ctx, uar_mapping);
 #ifdef MLX5DV_UAR_ALLOC_TYPE_NC
                if (!sh->devx_rx_uar &&
                    uar_mapping == MLX5DV_UAR_ALLOC_TYPE_BF) {
@@ -1057,7 +1058,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
                        DRV_LOG(DEBUG, "Failed to allocate Rx DevX UAR (BF)");
                        uar_mapping = MLX5DV_UAR_ALLOC_TYPE_NC;
                        sh->devx_rx_uar = mlx5_glue->devx_alloc_uar
-                                                       (sh->ctx, uar_mapping);
+                                                            (ctx, uar_mapping);
                }
 #endif
                if (!sh->devx_rx_uar) {
@@ -1098,6 +1099,8 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
  *
  * @param[in] spawn
  *   Pointer to the device attributes (name, port, etc).
+ * @param dev_ctx
+ *   Pointer to the context device data structure.
  * @param[in] config
  *   Pointer to device configuration structure.
  *
@@ -1107,6 +1110,7 @@ mlx5_alloc_rxtx_uars(struct mlx5_dev_ctx_shared *sh,
  */
 struct mlx5_dev_ctx_shared *
 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
+                         struct mlx5_dev_ctx *dev_ctx,
                          const struct mlx5_dev_config *config)
 {
        struct mlx5_dev_ctx_shared *sh;
@@ -1137,13 +1141,13 @@ mlx5_alloc_shared_dev_ctx(const struct 
mlx5_dev_spawn_data *spawn,
                rte_errno  = ENOMEM;
                goto exit;
        }
-       sh->numa_node = spawn->numa_node;
+       sh->devx = config->devx;
+       sh->numa_node = dev_ctx->numa_node;
        if (spawn->bond_info)
                sh->bond = *spawn->bond_info;
-       err = mlx5_os_open_device(spawn, config, sh);
-       if (!sh->ctx)
-               goto error;
-       err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
+       pthread_mutex_init(&sh->txpp.mutex, NULL);
+       sh->dev_ctx = dev_ctx;
+       err = mlx5_os_get_dev_attr(sh->dev_ctx->ctx, &sh->device_attr);
        if (err) {
                DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
                goto error;
@@ -1151,39 +1155,27 @@ mlx5_alloc_shared_dev_ctx(const struct 
mlx5_dev_spawn_data *spawn,
        sh->refcnt = 1;
        sh->max_port = spawn->max_port;
        sh->reclaim_mode = config->reclaim_mode;
-       strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx),
+       strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->dev_ctx->ctx),
                sizeof(sh->ibdev_name) - 1);
-       strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx),
+       strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->dev_ctx->ctx),
                sizeof(sh->ibdev_path) - 1);
        /*
-        * Setting port_id to max unallowed value means
-        * there is no interrupt subhandler installed for
-        * the given port index i.
+        * Setting port_id to max unallowed value means there is no interrupt
+        * subhandler installed for the given port index i.
         */
        for (i = 0; i < sh->max_port; i++) {
                sh->port[i].ih_port_id = RTE_MAX_ETHPORTS;
                sh->port[i].devx_ih_port_id = RTE_MAX_ETHPORTS;
        }
-       sh->pd = mlx5_os_alloc_pd(sh->ctx);
-       if (sh->pd == NULL) {
-               DRV_LOG(ERR, "PD allocation failure");
-               err = ENOMEM;
-               goto error;
-       }
        if (sh->devx) {
-               err = mlx5_os_get_pdn(sh->pd, &sh->pdn);
-               if (err) {
-                       DRV_LOG(ERR, "Fail to extract pdn from PD");
-                       goto error;
-               }
-               sh->td = mlx5_devx_cmd_create_td(sh->ctx);
+               sh->td = mlx5_devx_cmd_create_td(sh->dev_ctx->ctx);
                if (!sh->td) {
                        DRV_LOG(ERR, "TD allocation failure");
                        err = ENOMEM;
                        goto error;
                }
                tis_attr.transport_domain = sh->td->id;
-               sh->tis = mlx5_devx_cmd_create_tis(sh->ctx, &tis_attr);
+               sh->tis = mlx5_devx_cmd_create_tis(sh->dev_ctx->ctx, &tis_attr);
                if (!sh->tis) {
                        DRV_LOG(ERR, "TIS allocation failure");
                        err = ENOMEM;
@@ -1263,10 +1255,6 @@ mlx5_alloc_shared_dev_ctx(const struct 
mlx5_dev_spawn_data *spawn,
                mlx5_glue->devx_free_uar(sh->devx_rx_uar);
        if (sh->tx_uar)
                mlx5_glue->devx_free_uar(sh->tx_uar);
-       if (sh->pd)
-               claim_zero(mlx5_os_dealloc_pd(sh->pd));
-       if (sh->ctx)
-               claim_zero(mlx5_glue->close_device(sh->ctx));
        mlx5_free(sh);
        MLX5_ASSERT(err > 0);
        rte_errno = err;
@@ -1278,7 +1266,7 @@ mlx5_alloc_shared_dev_ctx(const struct 
mlx5_dev_spawn_data *spawn,
  * all allocated resources and close handles.
  *
  * @param[in] sh
- *   Pointer to mlx5_dev_ctx_shared object to free
+ *   Pointer to mlx5_dev_ctx_shared object to free.
  */
 void
 mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
@@ -1318,7 +1306,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
        /*
         *  Ensure there is no async event handler installed.
         *  Only primary process handles async device events.
-        **/
+        */
        mlx5_flow_counters_mng_close(sh);
        if (sh->aso_age_mng) {
                mlx5_flow_aso_age_mng_close(sh);
@@ -1336,16 +1324,12 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh)
                mlx5_glue->devx_free_uar(sh->tx_uar);
                sh->tx_uar = NULL;
        }
-       if (sh->pd)
-               claim_zero(mlx5_os_dealloc_pd(sh->pd));
        if (sh->tis)
                claim_zero(mlx5_devx_cmd_destroy(sh->tis));
        if (sh->td)
                claim_zero(mlx5_devx_cmd_destroy(sh->td));
        if (sh->devx_rx_uar)
                mlx5_glue->devx_free_uar(sh->devx_rx_uar);
-       if (sh->ctx)
-               claim_zero(mlx5_glue->close_device(sh->ctx));
        MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL);
        pthread_mutex_destroy(&sh->txpp.mutex);
        mlx5_free(sh);
@@ -1548,10 +1532,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
        }
        if (!priv->sh)
                return 0;
-       DRV_LOG(DEBUG, "port %u closing device \"%s\"",
-               dev->data->port_id,
-               ((priv->sh->ctx != NULL) ?
-               mlx5_os_get_ctx_device_name(priv->sh->ctx) : ""));
+       DRV_LOG(DEBUG, "port %u closing device \"%s\"", dev->data->port_id,
+               ((priv->sh->dev_ctx->ctx != NULL) ? priv->sh->ibdev_name : ""));
        /*
         * If default mreg copy action is removed at the stop stage,
         * the search will return none and nothing will be done anymore.
@@ -2374,6 +2356,33 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device 
*odev)
        return port_id;
 }
 
+/**
+ * Finds the device context that match the device.
+ * The existence of multiple ethdev per pci device is only with representors.
+ * On such case, it is enough to get only one of the ports as they all share
+ * the same device context.
+ *
+ * @param dev
+ *   Pointer to the device.
+ *
+ * @return
+ *   Pointer to the device context if found, NULL otherwise.
+ */
+static struct mlx5_dev_ctx *
+mlx5_get_dev_ctx(struct rte_device *dev)
+{
+       struct mlx5_priv *priv;
+       uint16_t port_id;
+
+       port_id = rte_eth_find_next_of(0, dev);
+       if (port_id == RTE_MAX_ETHPORTS)
+               return NULL;
+       priv = rte_eth_devices[port_id].data->dev_private;
+       if (priv == NULL)
+               return NULL;
+       return priv->sh->dev_ctx;
+}
+
 /**
  * Callback to remove a device.
  *
@@ -2388,6 +2397,7 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_device 
*odev)
 int
 mlx5_net_remove(struct rte_device *dev)
 {
+       struct mlx5_dev_ctx *dev_ctx = mlx5_get_dev_ctx(dev);
        uint16_t port_id;
        int ret = 0;
 
@@ -2401,6 +2411,11 @@ mlx5_net_remove(struct rte_device *dev)
                else
                        ret |= rte_eth_dev_close(port_id);
        }
+
+       if (dev_ctx) {
+               mlx5_dev_ctx_release(dev_ctx);
+               mlx5_free(dev_ctx);
+       }
        return ret == 0 ? 0 : -EIO;
 }
 
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 9a8e34535c..1e52b9ac9a 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1140,9 +1140,7 @@ struct mlx5_dev_ctx_shared {
        uint32_t reclaim_mode:1; /* Reclaim memory. */
        uint32_t max_port; /* Maximal IB device port index. */
        struct mlx5_bond_info bond; /* Bonding information. */
-       void *ctx; /* Verbs/DV/DevX context. */
-       void *pd; /* Protection Domain. */
-       uint32_t pdn; /* Protection Domain number. */
+       struct mlx5_dev_ctx *dev_ctx; /* Device context. */
        uint32_t tdn; /* Transport Domain number. */
        char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
        char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
@@ -1497,7 +1495,8 @@ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared 
*sh);
 int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
 struct mlx5_dev_ctx_shared *
 mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
-                          const struct mlx5_dev_config *config);
+                         struct mlx5_dev_ctx *dev_ctx,
+                         const struct mlx5_dev_config *config);
 void mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh);
 void mlx5_free_table_hash_list(struct mlx5_priv *priv);
 int mlx5_alloc_table_hash_list(struct mlx5_priv *priv);
@@ -1766,13 +1765,10 @@ int mlx5_flow_meter_flush(struct rte_eth_dev *dev,
 void mlx5_flow_meter_rxq_flush(struct rte_eth_dev *dev);
 
 /* mlx5_os.c */
+
 struct rte_pci_driver;
 int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
 void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
-int mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
-                        const struct mlx5_dev_config *config,
-                        struct mlx5_dev_ctx_shared *sh);
-int mlx5_os_get_pdn(void *pd, uint32_t *pdn);
 int mlx5_os_net_probe(struct rte_device *dev);
 void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
 void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
diff --git a/drivers/net/mlx5/mlx5_devx.c b/drivers/net/mlx5/mlx5_devx.c
index a1db53577a..3cafd46837 100644
--- a/drivers/net/mlx5/mlx5_devx.c
+++ b/drivers/net/mlx5/mlx5_devx.c
@@ -276,12 +276,12 @@ mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev 
*dev, uint16_t idx)
        rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
                                                MLX5_WQ_END_PAD_MODE_ALIGN :
                                                MLX5_WQ_END_PAD_MODE_NONE;
-       rq_attr.wq_attr.pd = priv->sh->pdn;
+       rq_attr.wq_attr.pd = priv->sh->dev_ctx->pdn;
        rq_attr.counter_set_id = priv->counter_set_id;
        /* Create RQ using DevX API. */
-       return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
-                                  wqe_size, log_desc_n, &rq_attr,
-                                  rxq_ctrl->socket);
+       return mlx5_devx_rq_create(priv->sh->dev_ctx->ctx,
+                                  &rxq_ctrl->obj->rq_obj, wqe_size, log_desc_n,
+                                  &rq_attr, rxq_ctrl->socket);
 }
 
 /**
@@ -365,8 +365,8 @@ mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, 
uint16_t idx)
        cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
        log_cqe_n = log2above(cqe_n);
        /* Create CQ using DevX API. */
-       ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,
-                                 &cq_attr, sh->numa_node);
+       ret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &rxq_ctrl->obj->cq_obj,
+                                 log_cqe_n, &cq_attr, sh->numa_node);
        if (ret)
                return ret;
        cq_obj = &rxq_ctrl->obj->cq_obj;
@@ -442,7 +442,7 @@ mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t 
idx)
                        attr.wq_attr.log_hairpin_data_sz -
                        MLX5_HAIRPIN_QUEUE_STRIDE;
        attr.counter_set_id = priv->counter_set_id;
-       tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
+       tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->dev_ctx->ctx, &attr,
                                           rxq_ctrl->socket);
        if (!tmpl->rq) {
                DRV_LOG(ERR,
@@ -486,8 +486,7 @@ mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
                          MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
 
                tmpl->devx_channel = mlx5_os_devx_create_event_channel
-                                                               (priv->sh->ctx,
-                                                                devx_ev_flag);
+                                        (priv->sh->dev_ctx->ctx, devx_ev_flag);
                if (!tmpl->devx_channel) {
                        rte_errno = errno;
                        DRV_LOG(ERR, "Failed to create event channel %d.",
@@ -602,7 +601,8 @@ mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const 
unsigned int log_n,
                                                        ind_tbl->queues_n);
        if (!rqt_attr)
                return -rte_errno;
-       ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
+       ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->dev_ctx->ctx,
+                                               rqt_attr);
        mlx5_free(rqt_attr);
        if (!ind_tbl->rqt) {
                DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
@@ -770,7 +770,7 @@ mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct 
mlx5_hrxq *hrxq,
 
        mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
                               hrxq->ind_table, tunnel, &tir_attr);
-       hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
+       hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->dev_ctx->ctx, &tir_attr);
        if (!hrxq->tir) {
                DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
                        dev->data->port_id);
@@ -936,7 +936,7 @@ mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t 
idx)
                        attr.wq_attr.log_hairpin_data_sz -
                        MLX5_HAIRPIN_QUEUE_STRIDE;
        attr.tis_num = priv->sh->tis->id;
-       tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
+       tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->dev_ctx->ctx, &attr);
        if (!tmpl->sq) {
                DRV_LOG(ERR,
                        "Port %u tx hairpin queue %u can't create SQ object.",
@@ -994,15 +994,15 @@ mlx5_txq_create_devx_sq_resources(struct rte_eth_dev 
*dev, uint16_t idx,
                .tis_lst_sz = 1,
                .tis_num = priv->sh->tis->id,
                .wq_attr = (struct mlx5_devx_wq_attr){
-                       .pd = priv->sh->pdn,
+                       .pd = priv->sh->dev_ctx->pdn,
                        .uar_page =
                                 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
                },
                .ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
        };
        /* Create Send Queue object with DevX. */
-       return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
-                                  &sq_attr, priv->sh->numa_node);
+       return mlx5_devx_sq_create(priv->sh->dev_ctx->ctx, &txq_obj->sq_obj,
+                                  log_desc_n, &sq_attr, priv->sh->numa_node);
 }
 #endif
 
@@ -1058,8 +1058,8 @@ mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t 
idx)
                return 0;
        }
        /* Create completion queue object with DevX. */
-       ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
-                                 &cq_attr, priv->sh->numa_node);
+       ret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &txq_obj->cq_obj,
+                                 log_desc_n, &cq_attr, sh->numa_node);
        if (ret) {
                DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
                        dev->data->port_id, idx);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 4762fa0f5f..b97790cf38 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -7604,7 +7604,7 @@ mlx5_flow_create_counter_stat_mem_mng(struct 
mlx5_dev_ctx_shared *sh)
        }
        mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
        size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
-       mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
+       mem_mng->umem = mlx5_os_umem_reg(sh->dev_ctx->ctx, mem, size,
                                                 IBV_ACCESS_LOCAL_WRITE);
        if (!mem_mng->umem) {
                rte_errno = errno;
@@ -7615,10 +7615,10 @@ mlx5_flow_create_counter_stat_mem_mng(struct 
mlx5_dev_ctx_shared *sh)
        mkey_attr.addr = (uintptr_t)mem;
        mkey_attr.size = size;
        mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
-       mkey_attr.pd = sh->pdn;
+       mkey_attr.pd = sh->dev_ctx->pdn;
        mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
        mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
-       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
+       mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->dev_ctx->ctx, &mkey_attr);
        if (!mem_mng->dm) {
                mlx5_os_umem_dereg(mem_mng->umem);
                rte_errno = errno;
diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c
index e11327a11b..6b90d0d7c1 100644
--- a/drivers/net/mlx5/mlx5_flow_aso.c
+++ b/drivers/net/mlx5/mlx5_flow_aso.c
@@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t 
length,
                DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
                return -1;
        }
-       ret = sh->share_cache.reg_mr_cb(sh->pd, mr->addr, length, mr);
+       ret = sh->share_cache.reg_mr_cb(sh->dev_ctx->pd, mr->addr, length, mr);
        if (ret) {
                DRV_LOG(ERR, "Failed to create direct Mkey.");
                mlx5_free(mr->addr);
@@ -309,24 +309,27 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                    enum mlx5_access_aso_opc_mod aso_opc_mod)
 {
        uint32_t sq_desc_n = 1 << MLX5_ASO_QUEUE_LOG_DESC;
+       struct mlx5_dev_ctx *dev_ctx = sh->dev_ctx;
 
        switch (aso_opc_mod) {
        case ASO_OPC_MOD_FLOW_HIT:
                if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
                                    sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
                        return -1;
-               if (mlx5_aso_sq_create(sh->ctx, &sh->aso_age_mng->aso_sq, 0,
-                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                                 sh->sq_ts_format)) {
+               if (mlx5_aso_sq_create(dev_ctx->ctx, &sh->aso_age_mng->aso_sq,
+                                      0, sh->tx_uar, dev_ctx->pdn,
+                                      MLX5_ASO_QUEUE_LOG_DESC,
+                                      sh->sq_ts_format)) {
                        mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
                        return -1;
                }
                mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
                break;
        case ASO_OPC_MOD_POLICER:
-               if (mlx5_aso_sq_create(sh->ctx, &sh->mtrmng->pools_mng.sq, 0,
-                                 sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                                 sh->sq_ts_format))
+               if (mlx5_aso_sq_create(dev_ctx->ctx, &sh->mtrmng->pools_mng.sq,
+                                      0, sh->tx_uar, dev_ctx->pdn,
+                                      MLX5_ASO_QUEUE_LOG_DESC,
+                                      sh->sq_ts_format))
                        return -1;
                mlx5_aso_mtr_init_sq(&sh->mtrmng->pools_mng.sq);
                break;
@@ -335,9 +338,10 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh,
                if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
                                    &sh->ct_mng->aso_sq.mr, 0))
                        return -1;
-               if (mlx5_aso_sq_create(sh->ctx, &sh->ct_mng->aso_sq, 0,
-                               sh->tx_uar, sh->pdn, MLX5_ASO_QUEUE_LOG_DESC,
-                               sh->sq_ts_format)) {
+               if (mlx5_aso_sq_create(dev_ctx->ctx, &sh->ct_mng->aso_sq, 0,
+                                      sh->tx_uar, dev_ctx->pdn,
+                                      MLX5_ASO_QUEUE_LOG_DESC,
+                                      sh->sq_ts_format)) {
                        mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
                        return -1;
                }
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 5bb6d89a3f..6a336ac128 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -3684,8 +3684,8 @@ flow_dv_encap_decap_create_cb(void *tool_ctx, void 
*cb_ctx)
        }
        *resource = *ctx_resource;
        resource->idx = idx;
-       ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->ctx, domain,
-                                                             resource,
+       ret = mlx5_flow_os_create_flow_action_packet_reformat(sh->dev_ctx->ctx,
+                                                             domain, resource,
                                                             &resource->action);
        if (ret) {
                mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
@@ -5485,7 +5485,7 @@ flow_dv_modify_create_cb(void *tool_ctx, void *cb_ctx)
        else
                ns = sh->rx_domain;
        ret = mlx5_flow_os_create_flow_action_modify_header
-                                       (sh->ctx, ns, entry,
+                                       (sh->dev_ctx->ctx, ns, entry,
                                         data_len, &entry->action);
        if (ret) {
                mlx5_ipool_free(sh->mdh_ipools[ref->actions_num - 1], idx);
@@ -6096,6 +6096,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
+       struct mlx5_dev_ctx *dev_ctx = priv->sh->dev_ctx;
        struct mlx5_flow_counter_pool *pool;
        struct mlx5_counters tmp_tq;
        struct mlx5_devx_obj *dcs = NULL;
@@ -6107,7 +6108,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
 
        if (fallback) {
                /* bulk_bitmap must be 0 for single counter allocation. */
-               dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
+               dcs = mlx5_devx_cmd_flow_counter_alloc(dev_ctx->ctx, 0);
                if (!dcs)
                        return NULL;
                pool = flow_dv_find_pool_by_id(cmng, dcs->id);
@@ -6125,7 +6126,7 @@ flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
                *cnt_free = cnt;
                return pool;
        }
-       dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+       dcs = mlx5_devx_cmd_flow_counter_alloc(dev_ctx->ctx, 0x4);
        if (!dcs) {
                rte_errno = ENODATA;
                return NULL;
@@ -6477,16 +6478,17 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
                             struct mlx5_aso_mtr **mtr_free)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct mlx5_aso_mtr_pools_mng *pools_mng =
-                               &priv->sh->mtrmng->pools_mng;
+       struct mlx5_dev_ctx *dev_ctx = priv->sh->dev_ctx;
+       struct mlx5_aso_mtr_pools_mng *pools_mng = &priv->sh->mtrmng->pools_mng;
        struct mlx5_aso_mtr_pool *pool = NULL;
        struct mlx5_devx_obj *dcs = NULL;
        uint32_t i;
        uint32_t log_obj_size;
 
        log_obj_size = rte_log2_u32(MLX5_ASO_MTRS_PER_POOL >> 1);
-       dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(priv->sh->ctx,
-                       priv->sh->pdn, log_obj_size);
+       dcs = mlx5_devx_cmd_create_flow_meter_aso_obj(dev_ctx->ctx,
+                                                     dev_ctx->pdn,
+                                                     log_obj_size);
        if (!dcs) {
                rte_errno = ENODATA;
                return NULL;
@@ -6508,8 +6510,7 @@ flow_dv_mtr_pool_create(struct rte_eth_dev *dev,
        pools_mng->n_valid++;
        for (i = 1; i < MLX5_ASO_MTRS_PER_POOL; ++i) {
                pool->mtrs[i].offset = i;
-               LIST_INSERT_HEAD(&pools_mng->meters,
-                                               &pool->mtrs[i], next);
+               LIST_INSERT_HEAD(&pools_mng->meters, &pool->mtrs[i], next);
        }
        pool->mtrs[0].offset = 0;
        *mtr_free = &pool->mtrs[0];
@@ -9181,7 +9182,7 @@ flow_dev_geneve_tlv_option_resource_register(struct 
rte_eth_dev *dev,
                }
        } else {
                /* Create a GENEVE TLV object and resource. */
-               obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
+               obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->dev_ctx->ctx,
                                geneve_opt_v->option_class,
                                geneve_opt_v->option_type,
                                geneve_opt_v->option_len);
@@ -10539,7 +10540,8 @@ flow_dv_matcher_create_cb(void *tool_ctx, void *cb_ctx)
        dv_attr.priority = ref->priority;
        if (tbl->is_egress)
                dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
-       ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
+       ret = mlx5_flow_os_create_flow_matcher(sh->dev_ctx->ctx, &dv_attr,
+                                              tbl->tbl.obj,
                                               &resource->matcher_object);
        if (ret) {
                mlx5_free(resource);
@@ -11958,8 +11960,8 @@ flow_dv_age_pool_create(struct rte_eth_dev *dev,
        struct mlx5_devx_obj *obj = NULL;
        uint32_t i;
 
-       obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
-                                                   priv->sh->pdn);
+       obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->dev_ctx->ctx,
+                                                   priv->sh->dev_ctx->pdn);
        if (!obj) {
                rte_errno = ENODATA;
                DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
@@ -12371,13 +12373,15 @@ flow_dv_ct_pool_create(struct rte_eth_dev *dev,
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
+       struct mlx5_dev_ctx *dev_ctx = priv->sh->dev_ctx;
        struct mlx5_aso_ct_pool *pool = NULL;
        struct mlx5_devx_obj *obj = NULL;
        uint32_t i;
        uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL);
 
-       obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx,
-                                               priv->sh->pdn, log_obj_size);
+       obj = mlx5_devx_cmd_create_conn_track_offload_obj(dev_ctx->ctx,
+                                                         dev_ctx->pdn,
+                                                         log_obj_size);
        if (!obj) {
                rte_errno = ENODATA;
                DRV_LOG(ERR, "Failed to create conn_track_offload_obj using 
DevX.");
@@ -17123,8 +17127,7 @@ flow_dv_destroy_sub_policy_with_rxq(struct rte_eth_dev 
*dev,
                        break;
                case MLX5_FLOW_FATE_QUEUE:
                        sub_policy = mtr_policy->sub_policys[domain][0];
-                       __flow_dv_destroy_sub_policy_rules(dev,
-                                                          sub_policy);
+                       __flow_dv_destroy_sub_policy_rules(dev, sub_policy);
                        break;
                default:
                        /*Other actions without queue and do nothing*/
@@ -17173,8 +17176,8 @@ mlx5_flow_discover_dr_action_support(struct rte_eth_dev 
*dev)
                goto err;
        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
        __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
-       ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
-                                              &matcher);
+       ret = mlx5_flow_os_create_flow_matcher(sh->dev_ctx->ctx, &dv_attr,
+                                              tbl->obj, &matcher);
        if (ret)
                goto err;
        __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
@@ -17242,7 +17245,7 @@ mlx5_flow_dv_discover_counter_offset_support(struct 
rte_eth_dev *dev)
                                        0, 0, 0, NULL);
        if (!tbl)
                goto err;
-       dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
+       dcs = mlx5_devx_cmd_flow_counter_alloc(sh->dev_ctx->ctx, 0x4);
        if (!dcs)
                goto err;
        ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
@@ -17251,8 +17254,8 @@ mlx5_flow_dv_discover_counter_offset_support(struct 
rte_eth_dev *dev)
                goto err;
        dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
        __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
-       ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
-                                              &matcher);
+       ret = mlx5_flow_os_create_flow_matcher(sh->dev_ctx->ctx, &dv_attr,
+                                              tbl->obj, &matcher);
        if (ret)
                goto err;
        __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c 
b/drivers/net/mlx5/mlx5_flow_verbs.c
index b93fd4d2c9..2c132a8c16 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -198,7 +198,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
 {
 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct ibv_context *ctx = priv->sh->ctx;
+       struct ibv_context *ctx = priv->sh->dev_ctx->ctx;
        struct ibv_counter_set_init_attr init = {
                         .counter_set_id = counter->shared_info.id};
 
@@ -210,7 +210,7 @@ flow_verbs_counter_create(struct rte_eth_dev *dev,
        return 0;
 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct ibv_context *ctx = priv->sh->ctx;
+       struct ibv_context *ctx = priv->sh->dev_ctx->ctx;
        struct ibv_counters_init_attr init = {0};
        struct ibv_counter_attach_attr attach;
        int ret;
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 44afda731f..b7297f22fe 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -84,7 +84,7 @@ mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
        struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
        struct mlx5_priv *priv = rxq_ctrl->priv;
 
-       return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+       return mlx5_mr_addr2mr_bh(priv->sh->dev_ctx->pd, &priv->mp_id,
                                  &priv->sh->share_cache, mr_ctrl, addr,
                                  priv->config.mr_ext_memseg_en);
 }
@@ -108,7 +108,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t 
addr)
        struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
        struct mlx5_priv *priv = txq_ctrl->priv;
 
-       return mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+       return mlx5_mr_addr2mr_bh(priv->sh->dev_ctx->pd, &priv->mp_id,
                                  &priv->sh->share_cache, mr_ctrl, addr,
                                  priv->config.mr_ext_memseg_en);
 }
@@ -177,7 +177,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void 
*opaque,
                return;
        DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
                dev->data->port_id, mem_idx, mp->name);
-       mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+       mr = mlx5_create_mr_ext(sh->dev_ctx->pd, addr, len, mp->socket_id,
                                sh->share_cache.reg_mr_cb);
        if (!mr) {
                DRV_LOG(WARNING,
@@ -193,7 +193,7 @@ mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void 
*opaque,
        mlx5_mr_insert_cache(&sh->share_cache, mr);
        rte_rwlock_write_unlock(&sh->share_cache.rwlock);
        /* Insert to the local cache table */
-       mlx5_mr_addr2mr_bh(sh->pd, &priv->mp_id, &sh->share_cache,
+       mlx5_mr_addr2mr_bh(sh->dev_ctx->pd, &priv->mp_id, &sh->share_cache,
                           mr_ctrl, addr, priv->config.mr_ext_memseg_en);
 }
 
@@ -253,8 +253,8 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr,
        }
        priv = dev->data->dev_private;
        sh = priv->sh;
-       mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
-                               sh->share_cache.reg_mr_cb);
+       mr = mlx5_create_mr_ext(sh->dev_ctx->pd, (uintptr_t)addr, len,
+                               SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
        if (!mr) {
                DRV_LOG(WARNING,
                        "port %u unable to dma map", dev->data->port_id);
@@ -409,7 +409,7 @@ mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, 
void *opaque,
        if (data->ret < 0)
                return;
        /* Register address of the chunk and update local caches. */
-       lkey = mlx5_mr_addr2mr_bh(priv->sh->pd, &priv->mp_id,
+       lkey = mlx5_mr_addr2mr_bh(priv->sh->dev_ctx->pd, &priv->mp_id,
                                  &priv->sh->share_cache, data->mr_ctrl,
                                  (uintptr_t)memhdr->addr,
                                  priv->config.mr_ext_memseg_en);
diff --git a/drivers/net/mlx5/mlx5_txpp.c b/drivers/net/mlx5/mlx5_txpp.c
index 4f6da9f2d1..ff1c3d204c 100644
--- a/drivers/net/mlx5/mlx5_txpp.c
+++ b/drivers/net/mlx5/mlx5_txpp.c
@@ -49,7 +49,7 @@ static int
 mlx5_txpp_create_event_channel(struct mlx5_dev_ctx_shared *sh)
 {
        MLX5_ASSERT(!sh->txpp.echan);
-       sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->ctx,
+       sh->txpp.echan = mlx5_os_devx_create_event_channel(sh->dev_ctx->ctx,
                        MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA);
        if (!sh->txpp.echan) {
                rte_errno = errno;
@@ -104,7 +104,7 @@ mlx5_txpp_alloc_pp_index(struct mlx5_dev_ctx_shared *sh)
        MLX5_SET(set_pp_rate_limit_context, &pp, rate_mode,
                 sh->txpp.test ? MLX5_DATA_RATE : MLX5_WQE_RATE);
        sh->txpp.pp = mlx5_glue->dv_alloc_pp
-                               (sh->ctx, sizeof(pp), &pp,
+                               (sh->dev_ctx->ctx, sizeof(pp), &pp,
                                 MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX);
        if (sh->txpp.pp == NULL) {
                DRV_LOG(ERR, "Failed to allocate packet pacing index.");
@@ -232,7 +232,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
                .tis_lst_sz = 1,
                .tis_num = sh->tis->id,
                .wq_attr = (struct mlx5_devx_wq_attr){
-                       .pd = sh->pdn,
+                       .pd = sh->dev_ctx->pdn,
                        .uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
                },
                .ts_format = mlx5_ts_format_conv(sh->sq_ts_format),
@@ -245,7 +245,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
        int ret;
 
        /* Create completion queue object for Rearm Queue. */
-       ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+       ret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &wq->cq_obj,
                                  log2above(MLX5_TXPP_REARM_CQ_SIZE), &cq_attr,
                                  sh->numa_node);
        if (ret) {
@@ -259,7 +259,7 @@ mlx5_txpp_create_rearm_queue(struct mlx5_dev_ctx_shared *sh)
        /* Create send queue object for Rearm Queue. */
        sq_attr.cqn = wq->cq_obj.cq->id;
        /* There should be no WQE leftovers in the cyclic queue. */
-       ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj,
+       ret = mlx5_devx_sq_create(sh->dev_ctx->ctx, &wq->sq_obj,
                                  log2above(MLX5_TXPP_REARM_SQ_SIZE), &sq_attr,
                                  sh->numa_node);
        if (ret) {
@@ -409,7 +409,7 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared *sh)
        sh->txpp.ts_p = 0;
        sh->txpp.ts_n = 0;
        /* Create completion queue object for Clock Queue. */
-       ret = mlx5_devx_cq_create(sh->ctx, &wq->cq_obj,
+       ret = mlx5_devx_cq_create(sh->dev_ctx->ctx, &wq->cq_obj,
                                  log2above(MLX5_TXPP_CLKQ_SIZE), &cq_attr,
                                  sh->numa_node);
        if (ret) {
@@ -444,9 +444,10 @@ mlx5_txpp_create_clock_queue(struct mlx5_dev_ctx_shared 
*sh)
        sq_attr.packet_pacing_rate_limit_index = sh->txpp.pp_id;
        sq_attr.wq_attr.cd_slave = 1;
        sq_attr.wq_attr.uar_page = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
-       sq_attr.wq_attr.pd = sh->pdn;
+       sq_attr.wq_attr.pd = sh->dev_ctx->pdn;
        sq_attr.ts_format = mlx5_ts_format_conv(sh->sq_ts_format);
-       ret = mlx5_devx_sq_create(sh->ctx, &wq->sq_obj, log2above(wq->sq_size),
+       ret = mlx5_devx_sq_create(sh->dev_ctx->ctx, &wq->sq_obj,
+                                 log2above(wq->sq_size),
                                  &sq_attr, sh->numa_node);
        if (ret) {
                rte_errno = errno;
diff --git a/drivers/net/mlx5/windows/mlx5_ethdev_os.c 
b/drivers/net/mlx5/windows/mlx5_ethdev_os.c
index c709dd19be..352dfa9331 100644
--- a/drivers/net/mlx5/windows/mlx5_ethdev_os.c
+++ b/drivers/net/mlx5/windows/mlx5_ethdev_os.c
@@ -38,7 +38,7 @@ mlx5_get_mac(struct rte_eth_dev *dev, uint8_t 
(*mac)[RTE_ETHER_ADDR_LEN])
                return -rte_errno;
        }
        priv = dev->data->dev_private;
-       context_obj = (mlx5_context_st *)priv->sh->ctx;
+       context_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;
        memcpy(mac, context_obj->mlx5_dev.eth_mac, RTE_ETHER_ADDR_LEN);
        return 0;
 }
@@ -66,7 +66,7 @@ mlx5_get_ifname(const struct rte_eth_dev *dev, char 
(*ifname)[MLX5_NAMESIZE])
                return -rte_errno;
        }
        priv = dev->data->dev_private;
-       context_obj = (mlx5_context_st *)priv->sh->ctx;
+       context_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;
        strncpy(*ifname, context_obj->mlx5_dev.name, MLX5_NAMESIZE);
        return 0;
 }
@@ -93,7 +93,7 @@ mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
                return -rte_errno;
        }
        priv = dev->data->dev_private;
-       context_obj = (mlx5_context_st *)priv->sh->ctx;
+       context_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;
        *mtu = context_obj->mlx5_dev.mtu_bytes;
        return 0;
 }
@@ -253,7 +253,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int 
wait_to_complete)
                return -rte_errno;
        }
        priv = dev->data->dev_private;
-       context_obj = (mlx5_context_st *)priv->sh->ctx;
+       context_obj = (mlx5_context_st *)priv->sh->dev_ctx->ctx;
        dev_link.link_speed = context_obj->mlx5_dev.link_speed / (1000 * 1000);
        dev_link.link_status =
              (context_obj->mlx5_dev.link_state == 1 && !mlx5_is_removed(dev))
@@ -359,7 +359,8 @@ mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
        int err;
        struct mlx5_devx_clock mlx5_clock;
        struct mlx5_priv *priv = dev->data->dev_private;
-       mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;
+       mlx5_context_st *context_obj =
+                       (mlx5_context_st *)priv->sh->dev_ctx->ctx;
 
        err = mlx5_glue->query_rt_values(context_obj, &mlx5_clock);
        if (err != 0) {
@@ -383,7 +384,8 @@ int
 mlx5_is_removed(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       mlx5_context_st *context_obj = (mlx5_context_st *)priv->sh->ctx;
+       mlx5_context_st *context_obj =
+                       (mlx5_context_st *)priv->sh->dev_ctx->ctx;
 
        if (*context_obj->shutdown_event_obj.p_flag)
                return 1;
diff --git a/drivers/net/mlx5/windows/mlx5_os.c 
b/drivers/net/mlx5/windows/mlx5_os.c
index 2f5c29662e..f6a7fbaca1 100644
--- a/drivers/net/mlx5/windows/mlx5_os.c
+++ b/drivers/net/mlx5/windows/mlx5_os.c
@@ -240,50 +240,6 @@ mlx5_os_set_nonblock_channel_fd(int fd)
        return -ENOTSUP;
 }
 
-/**
- * Function API open device under Windows
- *
- * This function calls the Windows glue APIs to open a device.
- *
- * @param[in] spawn
- *   Pointer to the device attributes (name, port, etc).
- * @param[out] config
- *   Pointer to device configuration structure.
- * @param[out] sh
- *   Pointer to shared context structure.
- *
- * @return
- *   0 on success, a positive error value otherwise.
- */
-int
-mlx5_os_open_device(const struct mlx5_dev_spawn_data *spawn,
-                const struct mlx5_dev_config *config,
-                struct mlx5_dev_ctx_shared *sh)
-{
-       RTE_SET_USED(config);
-       int err = 0;
-       struct mlx5_context *mlx5_ctx;
-
-       pthread_mutex_init(&sh->txpp.mutex, NULL);
-       /* Set numa node from pci probe */
-       sh->numa_node = spawn->pci_dev->device.numa_node;
-
-       /* Try to open device with DevX */
-       rte_errno = 0;
-       sh->ctx = mlx5_glue->open_device(spawn->phys_dev);
-       if (!sh->ctx) {
-               DRV_LOG(ERR, "open_device failed");
-               err = errno;
-               return err;
-       }
-       sh->devx = 1;
-       mlx5_ctx = (struct mlx5_context *)sh->ctx;
-       err = mlx5_glue->query_device(spawn->phys_dev, &mlx5_ctx->mlx5_dev);
-       if (err)
-               DRV_LOG(ERR, "Failed to query device context fields.");
-       return err;
-}
-
 /**
  * DV flow counter mode detect and config.
  *
@@ -328,6 +284,8 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev 
__rte_unused)
  *
  * @param dpdk_dev
  *   Backing DPDK device.
+ * @param dev_ctx
+ *   Pointer to the context device data structure.
  * @param spawn
  *   Verbs device parameters (name, port, switch_info) to spawn.
  * @param config
@@ -341,6 +299,7 @@ mlx5_flow_counter_mode_config(struct rte_eth_dev *dev 
__rte_unused)
  */
 static struct rte_eth_dev *
 mlx5_dev_spawn(struct rte_device *dpdk_dev,
+              struct mlx5_dev_ctx *dev_ctx,
               struct mlx5_dev_spawn_data *spawn,
               struct mlx5_dev_config *config)
 {
@@ -378,21 +337,20 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                goto error;
        }
        mlx5_malloc_mem_select(config->sys_mem_en);
-       sh = mlx5_alloc_shared_dev_ctx(spawn, config);
+       sh = mlx5_alloc_shared_dev_ctx(spawn, dev_ctx, config);
        if (!sh)
                return NULL;
-       config->devx = sh->devx;
        /* Initialize the shutdown event in mlx5_dev_spawn to
         * support mlx5_is_removed for Windows.
         */
-       err = mlx5_glue->devx_init_showdown_event(sh->ctx);
+       err = mlx5_glue->devx_init_showdown_event(sh->dev_ctx->ctx);
        if (err) {
                DRV_LOG(ERR, "failed to init showdown event: %s",
                        strerror(errno));
                goto error;
        }
        DRV_LOG(DEBUG, "MPW isn't supported");
-       mlx5_os_get_dev_attr(sh->ctx, &device_attr);
+       mlx5_os_get_dev_attr(sh->dev_ctx->ctx, &device_attr);
        config->swp = 0;
        config->ind_table_max_size =
                sh->device_attr.max_rwq_indirection_table_size;
@@ -485,7 +443,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
                config->cqe_comp = 0;
        }
        if (config->devx) {
-               err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config->hca_attr);
+               err = mlx5_devx_cmd_query_hca_attr(sh->dev_ctx->ctx,
+                                                  &config->hca_attr);
                if (err) {
                        err = -err;
                        goto error;
@@ -508,7 +467,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
 
                err = config->hca_attr.access_register_user ?
                        mlx5_devx_cmd_register_read
-                               (sh->ctx, MLX5_REGISTER_ID_MTUTC, 0,
+                               (sh->dev_ctx->ctx, MLX5_REGISTER_ID_MTUTC, 0,
                                reg, MLX5_ST_SZ_DW(register_mtutc)) : ENOTSUP;
                if (!err) {
                        uint32_t ts_mode;
@@ -701,7 +660,7 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
        if (eth_dev != NULL) {
                /* mac_addrs must not be freed alone because part of
                 * dev_private
-                **/
+                */
                eth_dev->data->mac_addrs = NULL;
                rte_eth_dev_release_port(eth_dev);
        }
@@ -919,15 +878,13 @@ int
 mlx5_os_net_probe(struct rte_device *dev)
 {
        struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev);
+       struct mlx5_dev_ctx *dev_ctx;
        struct mlx5_dev_spawn_data spawn = { .pf_bond = -1 };
-       struct devx_device_bdf *devx_bdf_match = mlx5_os_get_devx_device(dev);
        struct mlx5_dev_config dev_config;
        unsigned int dev_config_vf;
        int ret;
        uint32_t restore;
 
-       if (devx_bdf_match == NULL)
-               return -rte_errno;
        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
                DRV_LOG(ERR, "Secondary process is not supported on Windows.");
                return -ENOTSUP;
@@ -938,11 +895,20 @@ mlx5_os_net_probe(struct rte_device *dev)
                        strerror(rte_errno));
                return -rte_errno;
        }
+       dev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx),
+                             RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+       if (dev_ctx == NULL) {
+               DRV_LOG(ERR, "Device context allocation failure.");
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       ret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_ETH);
+       if (ret < 0)
+               goto error;
        memset(&spawn.info, 0, sizeof(spawn.info));
        spawn.max_port = 1;
        spawn.phys_port = 1;
-       spawn.phys_dev = devx_bdf_match;
-       spawn.phys_dev_name = mlx5_os_get_dev_device_name(devx_bdf_match);
+       spawn.phys_dev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx);
        spawn.eth_dev = NULL;
        spawn.pci_dev = pci_dev;
        spawn.ifindex = -1; /* Spawn will assign */
@@ -972,6 +938,7 @@ mlx5_os_net_probe(struct rte_device *dev)
        /* Default configuration. */
        memset(&dev_config, 0, sizeof(struct mlx5_dev_config));
        dev_config.vf = dev_config_vf;
+       dev_config.devx = 1;
        dev_config.mps = 0;
        dev_config.dbnc = MLX5_ARG_UNSET;
        dev_config.rx_vec_en = 1;
@@ -987,16 +954,21 @@ mlx5_os_net_probe(struct rte_device *dev)
        dev_config.dv_flow_en = 1;
        dev_config.decap_en = 0;
        dev_config.log_hp_size = MLX5_ARG_UNSET;
-       spawn.numa_node = pci_dev->device.numa_node;
-       spawn.eth_dev = mlx5_dev_spawn(dev, &spawn, &dev_config);
-       if (!spawn.eth_dev)
-               return -rte_errno;
+       spawn.eth_dev = mlx5_dev_spawn(dev, dev_ctx, &spawn, &dev_config);
+       if (!spawn.eth_dev) {
+               ret = -rte_errno;
+               goto error;
+       }
        restore = spawn.eth_dev->data->dev_flags;
        rte_eth_copy_pci_info(spawn.eth_dev, pci_dev);
        /* Restore non-PCI flags cleared by the above call. */
        spawn.eth_dev->data->dev_flags |= restore;
        rte_eth_dev_probing_finish(spawn.eth_dev);
        return 0;
+error:
+       mlx5_dev_ctx_release(dev_ctx);
+       mlx5_free(dev_ctx);
+       return ret;
 }
 
 /**
@@ -1016,25 +988,4 @@ mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
        *dereg_mr_cb = mlx5_os_dereg_mr;
 }
 
-/**
- * Extract pdn of PD object using DevX
- *
- * @param[in] pd
- *   Pointer to the DevX PD object.
- * @param[out] pdn
- *   Pointer to the PD object number variable.
- *
- * @return
- *   0 on success, error value otherwise.
- */
-int
-mlx5_os_get_pdn(void *pd, uint32_t *pdn)
-{
-       if (!pd)
-               return -EINVAL;
-
-       *pdn = ((struct mlx5_pd *)pd)->pdn;
-       return 0;
-}
-
 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
-- 
2.25.1

Reply via email to