Added structures for ML event adapter. Implemented ML event adapter queue-pair add and delete functions.
Signed-off-by: Srikanth Yalavarthi <syalavar...@marvell.com> --- drivers/event/cnxk/cn10k_eventdev.c | 103 ++++++++++++++++++++++++++++ drivers/event/cnxk/cnxk_eventdev.h | 4 ++ drivers/ml/cnxk/cnxk_ml_ops.h | 12 ++++ 3 files changed, 119 insertions(+) diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 09eff569052..201972cec9e 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -1033,6 +1033,107 @@ cn10k_ml_adapter_caps_get(const struct rte_eventdev *event_dev, const struct rte return 0; } +static int +ml_adapter_qp_free(struct cnxk_ml_qp *qp) +{ + rte_mempool_free(qp->mla.req_mp); + qp->mla.enabled = false; + + return 0; +} + +static int +ml_adapter_qp_setup(const struct rte_ml_dev *mldev, struct cnxk_ml_qp *qp) +{ + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t cache_size, nb_req; + unsigned int req_size; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_mla_req_%u_%u", mldev->data->dev_id, qp->id); + req_size = sizeof(struct cn10k_ml_req); + cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; + nb_req = cache_size * rte_lcore_count(); + qp->mla.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, 0, NULL, NULL, NULL, + NULL, rte_socket_id(), 0); + if (qp->mla.req_mp == NULL) + return -ENOMEM; + + qp->mla.enabled = true; + + return 0; +} + +static int +cn10k_ml_adapter_qp_del(const struct rte_eventdev *event_dev, const struct rte_ml_dev *mldev, + int32_t queue_pair_id) +{ + struct cnxk_ml_qp *qp; + + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(mldev->device, "ml_cn10k", EINVAL); + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < mldev->data->nb_queue_pairs; qp_id++) { + qp = mldev->data->queue_pairs[qp_id]; + if (qp->mla.enabled) + ml_adapter_qp_free(qp); + } + } else { + qp = mldev->data->queue_pairs[queue_pair_id]; + if (qp->mla.enabled) + ml_adapter_qp_free(qp); + } + + return 0; +} + +static int +cn10k_ml_adapter_qp_add(const struct rte_eventdev *event_dev, const struct rte_ml_dev *mldev, + int32_t queue_pair_id, const struct rte_event *event) +{ + struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); + uint32_t adptr_xae_cnt = 0; + struct cnxk_ml_qp *qp; + int ret; + + PLT_SET_USED(event); + + CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL); + CNXK_VALID_DEV_OR_ERR_RET(mldev->device, "ml_cn10k", EINVAL); + + sso_evdev->is_mla_internal_port = 1; + cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev); + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < mldev->data->nb_queue_pairs; qp_id++) { + qp = mldev->data->queue_pairs[qp_id]; + ret = ml_adapter_qp_setup(mldev, qp); + if (ret != 0) { + cn10k_ml_adapter_qp_del(event_dev, mldev, -1); + return ret; + } + adptr_xae_cnt += qp->mla.req_mp->size; + } + } else { + qp = mldev->data->queue_pairs[queue_pair_id]; + ret = ml_adapter_qp_setup(mldev, qp); + if (ret != 0) + return ret; + + adptr_xae_cnt = qp->mla.req_mp->size; + } + + /* Update ML adapter XAE count */ + sso_evdev->adptr_xae_cnt += adptr_xae_cnt; + cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); + + return ret; +} + static struct eventdev_ops cn10k_sso_dev_ops = { .dev_infos_get = cn10k_sso_info_get, .dev_configure = cn10k_sso_dev_configure, @@ -1075,6 +1176,8 @@ static struct eventdev_ops cn10k_sso_dev_ops = { .crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits, .ml_adapter_caps_get = cn10k_ml_adapter_caps_get, + .ml_adapter_queue_pair_add = cn10k_ml_adapter_qp_add, + .ml_adapter_queue_pair_del = cn10k_ml_adapter_qp_del, .xstats_get = cnxk_sso_xstats_get, .xstats_reset = cnxk_sso_xstats_reset, diff --git a/drivers/event/cnxk/cnxk_eventdev.h b/drivers/event/cnxk/cnxk_eventdev.h index d42d1afa1a1..bc51e952c9a 100644 --- a/drivers/event/cnxk/cnxk_eventdev.h +++ b/drivers/event/cnxk/cnxk_eventdev.h @@ -124,6 +124,10 @@ struct cnxk_sso_evdev { uint32_t gw_mode; uint16_t stash_cnt; struct cnxk_sso_stash *stash_parse_data; + /* Crypto adapter */ + uint8_t is_ca_internal_port; + /* ML adapter */ + uint8_t is_mla_internal_port; } __rte_cache_aligned; /* Event port a.k.a GWS */ diff --git a/drivers/ml/cnxk/cnxk_ml_ops.h b/drivers/ml/cnxk/cnxk_ml_ops.h index 7b49793a574..81f91df2a80 100644 --- a/drivers/ml/cnxk/cnxk_ml_ops.h +++ b/drivers/ml/cnxk/cnxk_ml_ops.h @@ -5,6 +5,7 @@ #ifndef _CNXK_ML_OPS_H_ #define _CNXK_ML_OPS_H_ +#include <rte_mempool.h> #include <rte_mldev.h> #include <rte_mldev_core.h> @@ -56,6 +57,14 @@ struct cnxk_ml_queue { uint64_t wait_cycles; }; +struct cnxk_ml_adapter_info { + /**< Set if queue pair is added to ML adapter */ + bool enabled; + + /**< ML in-flight request mempool */ + struct rte_mempool *req_mp; +}; + /* Queue-pair structure */ struct cnxk_ml_qp { /* ID */ @@ -69,6 +78,9 @@ struct cnxk_ml_qp { /* Statistics per queue-pair */ struct rte_ml_dev_stats stats; + + /**< ML adapter related info */ + struct cnxk_ml_adapter_info mla; }; extern struct rte_ml_dev_ops cnxk_ml_ops; -- 2.42.0