Use common context device structure as a priv field. Signed-off-by: Michael Baum <michae...@nvidia.com> --- drivers/crypto/mlx5/mlx5_crypto.c | 114 ++++++++++---------------- drivers/crypto/mlx5/mlx5_crypto.h | 4 +- drivers/crypto/mlx5/mlx5_crypto_dek.c | 5 +- 3 files changed, 49 insertions(+), 74 deletions(-)
diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c index b3d5200ca3..7cb5bb5445 100644 --- a/drivers/crypto/mlx5/mlx5_crypto.c +++ b/drivers/crypto/mlx5/mlx5_crypto.c @@ -347,7 +347,8 @@ mlx5_crypto_addr2mr(struct mlx5_crypto_priv *priv, uintptr_t addr, if (likely(lkey != UINT32_MAX)) return lkey; /* Take slower bottom-half on miss. */ - return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr, + return mlx5_mr_addr2mr_bh(priv->dev_ctx->pd, 0, &priv->mr_scache, + mr_ctrl, addr, !!(ol_flags & EXT_ATTACHED_MBUF)); } @@ -621,7 +622,7 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv, struct mlx5_umr_wqe *umr; uint32_t i; struct mlx5_devx_mkey_attr attr = { - .pd = priv->pdn, + .pd = priv->dev_ctx->pdn, .umr_en = 1, .crypto_en = 1, .set_remote_rw = 1, @@ -631,7 +632,8 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv, for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0; i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) { attr.klm_array = (struct mlx5_klm *)&umr->kseg[0]; - qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr); + qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->dev_ctx->ctx, + &attr); if (!qp->mkey[i]) { DRV_LOG(ERR, "Failed to allocate indirect mkey."); return -1; @@ -670,7 +672,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, rte_errno = ENOMEM; return -rte_errno; } - if (mlx5_devx_cq_create(priv->ctx, &qp->cq_obj, log_nb_desc, + if (mlx5_devx_cq_create(priv->dev_ctx->ctx, &qp->cq_obj, log_nb_desc, &cq_attr, socket_id) != 0) { DRV_LOG(ERR, "Failed to create CQ."); goto error; @@ -681,7 +683,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, rte_errno = ENOMEM; goto error; } - qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx, + qp->umem_obj = mlx5_glue->devx_umem_reg(priv->dev_ctx->ctx, (void *)(uintptr_t)qp->umem_buf, umem_size, IBV_ACCESS_LOCAL_WRITE); @@ -697,7 +699,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, goto error; } qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen; - attr.pd = priv->pdn; + attr.pd = priv->dev_ctx->pdn; attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar); attr.cqn = qp->cq_obj.cq->id; attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE)); @@ -708,7 +710,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, attr.wq_umem_offset = 0; attr.dbr_umem_id = qp->umem_obj->umem_id; attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size; - qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr); + qp->qp_obj = mlx5_devx_cmd_create_qp(priv->dev_ctx->ctx, &attr); if (qp->qp_obj == NULL) { DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno); goto error; @@ -782,58 +784,20 @@ static struct rte_cryptodev_ops mlx5_crypto_ops = { static void mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv) { - if (priv->pd != NULL) { - claim_zero(mlx5_glue->dealloc_pd(priv->pd)); - priv->pd = NULL; - } if (priv->uar != NULL) { mlx5_glue->devx_free_uar(priv->uar); priv->uar = NULL; } } -static int -mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv) -{ -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - struct mlx5dv_obj obj; - struct mlx5dv_pd pd_info; - int ret; - - priv->pd = mlx5_glue->alloc_pd(priv->ctx); - if (priv->pd == NULL) { - DRV_LOG(ERR, "Failed to allocate PD."); - return errno ? -errno : -ENOMEM; - } - obj.pd.in = priv->pd; - obj.pd.out = &pd_info; - ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); - if (ret != 0) { - DRV_LOG(ERR, "Fail to get PD object info."); - mlx5_glue->dealloc_pd(priv->pd); - priv->pd = NULL; - return -errno; - } - priv->pdn = pd_info.pdn; - return 0; -#else - (void)priv; - DRV_LOG(ERR, "Cannot get pdn - no DV support."); - return -ENOTSUP; -#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ -} - static int mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv) { - if (mlx5_crypto_pd_create(priv) != 0) - return -1; - priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1); + priv->uar = mlx5_devx_alloc_uar(priv->dev_ctx->ctx, -1); if (priv->uar) priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar); if (priv->uar == NULL || priv->uar_addr == NULL) { rte_errno = errno; - claim_zero(mlx5_glue->dealloc_pd(priv->pd)); DRV_LOG(ERR, "Failed to allocate UAR."); return -1; } @@ -966,7 +930,8 @@ mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, /* Iterate all the existing mlx5 devices. */ TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next) mlx5_free_mr_by_addr(&priv->mr_scache, - priv->ctx->device->name, + mlx5_os_get_ctx_device_name + (priv->dev_ctx->ctx), addr, len); pthread_mutex_unlock(&priv_list_lock); break; @@ -979,9 +944,8 @@ mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, static int mlx5_crypto_dev_probe(struct rte_device *dev) { - struct ibv_device *ibv; struct rte_cryptodev *crypto_dev; - struct ibv_context *ctx; + struct mlx5_dev_ctx *dev_ctx; struct mlx5_devx_obj *login; struct mlx5_crypto_priv *priv; struct mlx5_crypto_devarg_params devarg_prms = { 0 }; @@ -993,6 +957,7 @@ mlx5_crypto_dev_probe(struct rte_device *dev) .max_nb_queue_pairs = RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, }; + const char *ibdev_name; uint16_t rdmw_wqe_size; int ret; @@ -1001,57 +966,66 @@ mlx5_crypto_dev_probe(struct rte_device *dev) rte_errno = ENOTSUP; return -rte_errno; } - ibv = mlx5_os_get_ibv_dev(dev); - if (ibv == NULL) + dev_ctx = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_dev_ctx), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (dev_ctx == NULL) { + DRV_LOG(ERR, "Device context allocation failure."); + rte_errno = ENOMEM; return -rte_errno; - ctx = mlx5_glue->dv_open_device(ibv); - if (ctx == NULL) { - DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name); + } + ret = mlx5_dev_ctx_prepare(dev_ctx, dev, MLX5_CLASS_CRYPTO); + if (ret < 0) { + DRV_LOG(ERR, "Failed to create device context."); + mlx5_free(dev_ctx); rte_errno = ENODEV; return -rte_errno; } - if (mlx5_devx_cmd_query_hca_attr(ctx, &attr) != 0 || + ibdev_name = mlx5_os_get_ctx_device_name(dev_ctx->ctx); + if (mlx5_devx_cmd_query_hca_attr(dev_ctx->ctx, &attr) != 0 || attr.crypto == 0 || attr.aes_xts == 0) { DRV_LOG(ERR, "Not enough capabilities to support crypto " "operations, maybe old FW/OFED version?"); - claim_zero(mlx5_glue->close_device(ctx)); + mlx5_dev_ctx_release(dev_ctx); + mlx5_free(dev_ctx); rte_errno = ENOTSUP; return -ENOTSUP; } ret = mlx5_crypto_parse_devargs(dev->devargs, &devarg_prms); if (ret) { DRV_LOG(ERR, "Failed to parse devargs."); - claim_zero(mlx5_glue->close_device(ctx)); + mlx5_dev_ctx_release(dev_ctx); + mlx5_free(dev_ctx); return -rte_errno; } - login = mlx5_devx_cmd_create_crypto_login_obj(ctx, + login = mlx5_devx_cmd_create_crypto_login_obj(dev_ctx->ctx, &devarg_prms.login_attr); if (login == NULL) { DRV_LOG(ERR, "Failed to configure login."); - claim_zero(mlx5_glue->close_device(ctx)); + mlx5_dev_ctx_release(dev_ctx); + mlx5_free(dev_ctx); return -rte_errno; } - crypto_dev = rte_cryptodev_pmd_create(ibv->name, dev, - &init_params); + crypto_dev = rte_cryptodev_pmd_create(ibdev_name, dev, &init_params); if (crypto_dev == NULL) { - DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name); - claim_zero(mlx5_glue->close_device(ctx)); + DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name); + mlx5_dev_ctx_release(dev_ctx); + mlx5_free(dev_ctx); return -ENODEV; } - DRV_LOG(INFO, - "Crypto device %s was created successfully.", ibv->name); + DRV_LOG(INFO, "Crypto device %s was created successfully.", ibdev_name); crypto_dev->dev_ops = &mlx5_crypto_ops; crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst; crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst; crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS; crypto_dev->driver_id = mlx5_crypto_driver_id; priv = crypto_dev->data->dev_private; - priv->ctx = ctx; + priv->dev_ctx = dev_ctx; priv->login_obj = login; priv->crypto_dev = crypto_dev; if (mlx5_crypto_hw_global_prepare(priv) != 0) { rte_cryptodev_pmd_destroy(priv->crypto_dev); - claim_zero(mlx5_glue->close_device(priv->ctx)); + mlx5_dev_ctx_release(priv->dev_ctx); + mlx5_free(priv->dev_ctx); return -1; } if (mlx5_mr_btree_init(&priv->mr_scache.cache, @@ -1059,7 +1033,8 @@ mlx5_crypto_dev_probe(struct rte_device *dev) DRV_LOG(ERR, "Failed to allocate shared cache MR memory."); mlx5_crypto_hw_global_release(priv); rte_cryptodev_pmd_destroy(priv->crypto_dev); - claim_zero(mlx5_glue->close_device(priv->ctx)); + mlx5_dev_ctx_release(priv->dev_ctx); + mlx5_free(priv->dev_ctx); rte_errno = ENOMEM; return -rte_errno; } @@ -1109,7 +1084,8 @@ mlx5_crypto_dev_remove(struct rte_device *dev) mlx5_crypto_hw_global_release(priv); rte_cryptodev_pmd_destroy(priv->crypto_dev); claim_zero(mlx5_devx_cmd_destroy(priv->login_obj)); - claim_zero(mlx5_glue->close_device(priv->ctx)); + mlx5_dev_ctx_release(priv->dev_ctx); + mlx5_free(priv->dev_ctx); } return 0; } diff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h index d49b0001f0..7ae05f0b00 100644 --- a/drivers/crypto/mlx5/mlx5_crypto.h +++ b/drivers/crypto/mlx5/mlx5_crypto.h @@ -19,13 +19,11 @@ struct mlx5_crypto_priv { TAILQ_ENTRY(mlx5_crypto_priv) next; - struct ibv_context *ctx; /* Device context. */ + struct mlx5_dev_ctx *dev_ctx; /* Device context. */ struct rte_cryptodev *crypto_dev; void *uar; /* User Access Region. */ volatile uint64_t *uar_addr; - uint32_t pdn; /* Protection Domain number. */ uint32_t max_segs_num; /* Maximum supported data segs. */ - struct ibv_pd *pd; struct mlx5_hlist *dek_hlist; /* Dek hash list. */ struct rte_cryptodev_config dev_config; struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */ diff --git a/drivers/crypto/mlx5/mlx5_crypto_dek.c b/drivers/crypto/mlx5/mlx5_crypto_dek.c index 67b1fa3819..91c06fffbb 100644 --- a/drivers/crypto/mlx5/mlx5_crypto_dek.c +++ b/drivers/crypto/mlx5/mlx5_crypto_dek.c @@ -94,7 +94,7 @@ mlx5_crypto_dek_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) struct mlx5_crypto_dek *dek = rte_zmalloc(__func__, sizeof(*dek), RTE_CACHE_LINE_SIZE); struct mlx5_devx_dek_attr dek_attr = { - .pd = ctx->priv->pdn, + .pd = ctx->priv->dev_ctx->pdn, .key_purpose = MLX5_CRYPTO_KEY_PURPOSE_AES_XTS, .has_keytag = 1, }; @@ -117,7 +117,8 @@ mlx5_crypto_dek_create_cb(void *tool_ctx __rte_unused, void *cb_ctx) return NULL; } memcpy(&dek_attr.key, cipher_ctx->key.data, cipher_ctx->key.length); - dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->ctx, &dek_attr); + dek->obj = mlx5_devx_cmd_create_dek_obj(ctx->priv->dev_ctx->ctx, + &dek_attr); if (dek->obj == NULL) { rte_free(dek); return NULL; -- 2.25.1