On Wed, Jul 27, 2022 at 12:46 PM Shijith Thotton <sthot...@marvell.com> wrote:
>
> Moved the common crypto adapter ops to file specific to eventdev
> adapters.
>
> Signed-off-by: Shijith Thotton <sthot...@marvell.com>


Applied to dpdk-next-net-eventdev/for-main. Thanks


> ---
>  drivers/event/cnxk/cnxk_eventdev.c       | 121 -----------------------
>  drivers/event/cnxk/cnxk_eventdev.h       |  10 +-
>  drivers/event/cnxk/cnxk_eventdev_adptr.c | 115 +++++++++++++++++++++
>  3 files changed, 118 insertions(+), 128 deletions(-)
>
> diff --git a/drivers/event/cnxk/cnxk_eventdev.c 
> b/drivers/event/cnxk/cnxk_eventdev.c
> index 97dcf7b66e..b7b93778c6 100644
> --- a/drivers/event/cnxk/cnxk_eventdev.c
> +++ b/drivers/event/cnxk/cnxk_eventdev.c
> @@ -2,129 +2,8 @@
>   * Copyright(C) 2021 Marvell.
>   */
>
> -#include "cnxk_cryptodev_ops.h"
>  #include "cnxk_eventdev.h"
>
> -static int
> -crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
> -                       struct cnxk_cpt_qp *qp)
> -{
> -       char name[RTE_MEMPOOL_NAMESIZE];
> -       uint32_t cache_size, nb_req;
> -       unsigned int req_size;
> -       uint32_t nb_desc_min;
> -
> -       /*
> -        * Update CPT FC threshold. Decrement by hardware burst size to allow
> -        * simultaneous enqueue from all available cores.
> -        */
> -       if (roc_model_is_cn10k())
> -               nb_desc_min = rte_lcore_count() * 32;
> -       else
> -               nb_desc_min = rte_lcore_count() * 2;
> -
> -       if (qp->lmtline.fc_thresh < nb_desc_min) {
> -               plt_err("CPT queue depth not sufficient to allow enqueueing 
> from %d cores",
> -                       rte_lcore_count());
> -               return -ENOSPC;
> -       }
> -
> -       qp->lmtline.fc_thresh -= nb_desc_min;
> -
> -       snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
> -                cdev->data->dev_id, qp->lf.lf_id);
> -       req_size = sizeof(struct cpt_inflight_req);
> -       cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 
> 1.5);
> -       nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
> -       qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
> -                                          0, NULL, NULL, NULL, NULL,
> -                                          rte_socket_id(), 0);
> -       if (qp->ca.req_mp == NULL)
> -               return -ENOMEM;
> -
> -       qp->ca.enabled = true;
> -
> -       return 0;
> -}
> -
> -int
> -cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> -                          const struct rte_cryptodev *cdev,
> -                          int32_t queue_pair_id)
> -{
> -       struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
> -       uint32_t adptr_xae_cnt = 0;
> -       struct cnxk_cpt_qp *qp;
> -       int ret;
> -
> -       if (queue_pair_id == -1) {
> -               uint16_t qp_id;
> -
> -               for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> -                       qp = cdev->data->queue_pairs[qp_id];
> -                       ret = crypto_adapter_qp_setup(cdev, qp);
> -                       if (ret) {
> -                               cnxk_crypto_adapter_qp_del(cdev, -1);
> -                               return ret;
> -                       }
> -                       adptr_xae_cnt += qp->ca.req_mp->size;
> -               }
> -       } else {
> -               qp = cdev->data->queue_pairs[queue_pair_id];
> -               ret = crypto_adapter_qp_setup(cdev, qp);
> -               if (ret)
> -                       return ret;
> -               adptr_xae_cnt = qp->ca.req_mp->size;
> -       }
> -
> -       /* Update crypto adapter XAE count */
> -       sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
> -       cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
> -
> -       return 0;
> -}
> -
> -static int
> -crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
> -{
> -       int ret;
> -
> -       rte_mempool_free(qp->ca.req_mp);
> -       qp->ca.enabled = false;
> -
> -       ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, 
> qp->lf.lf_id);
> -       if (ret < 0) {
> -               plt_err("Could not reset lmtline for queue pair %d",
> -                       qp->lf.lf_id);
> -               return ret;
> -       }
> -
> -       return 0;
> -}
> -
> -int
> -cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
> -                          int32_t queue_pair_id)
> -{
> -       struct cnxk_cpt_qp *qp;
> -
> -       if (queue_pair_id == -1) {
> -               uint16_t qp_id;
> -
> -               for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> -                       qp = cdev->data->queue_pairs[qp_id];
> -                       if (qp->ca.enabled)
> -                               crypto_adapter_qp_free(qp);
> -               }
> -       } else {
> -               qp = cdev->data->queue_pairs[queue_pair_id];
> -               if (qp->ca.enabled)
> -                       crypto_adapter_qp_free(qp);
> -       }
> -
> -       return 0;
> -}
> -
>  void
>  cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
>                   struct rte_event_dev_info *dev_info)
> diff --git a/drivers/event/cnxk/cnxk_eventdev.h 
> b/drivers/event/cnxk/cnxk_eventdev.h
> index bfd0c5627e..c9a0686b4d 100644
> --- a/drivers/event/cnxk/cnxk_eventdev.h
> +++ b/drivers/event/cnxk/cnxk_eventdev.h
> @@ -287,13 +287,6 @@ int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
>                           int16_t queue_port_id, const uint32_t ids[],
>                           uint32_t n);
>
> -/* Crypto adapter APIs. */
> -int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> -                              const struct rte_cryptodev *cdev,
> -                              int32_t queue_pair_id);
> -int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
> -                              int32_t queue_pair_id);
> -
>  /* CN9K */
>  void cn9k_sso_set_rsrc(void *arg);
>
> @@ -318,5 +311,8 @@ int cnxk_sso_tx_adapter_queue_del(const struct 
> rte_eventdev *event_dev,
>  int cnxk_sso_tx_adapter_start(uint8_t id, const struct rte_eventdev 
> *event_dev);
>  int cnxk_sso_tx_adapter_stop(uint8_t id, const struct rte_eventdev 
> *event_dev);
>  int cnxk_sso_tx_adapter_free(uint8_t id, const struct rte_eventdev 
> *event_dev);
> +int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
> +                              const struct rte_cryptodev *cdev, int32_t 
> queue_pair_id);
> +int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t 
> queue_pair_id);
>
>  #endif /* __CNXK_EVENTDEV_H__ */
> diff --git a/drivers/event/cnxk/cnxk_eventdev_adptr.c 
> b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> index 1f2e1b4b5d..3f46e79ba8 100644
> --- a/drivers/event/cnxk/cnxk_eventdev_adptr.c
> +++ b/drivers/event/cnxk/cnxk_eventdev_adptr.c
> @@ -2,6 +2,7 @@
>   * Copyright(C) 2021 Marvell.
>   */
>
> +#include "cnxk_cryptodev_ops.h"
>  #include "cnxk_ethdev.h"
>  #include "cnxk_eventdev.h"
>
> @@ -628,3 +629,117 @@ cnxk_sso_tx_adapter_free(uint8_t id __rte_unused,
>
>         return 0;
>  }
> +
> +static int
> +crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, struct cnxk_cpt_qp 
> *qp)
> +{
> +       char name[RTE_MEMPOOL_NAMESIZE];
> +       uint32_t cache_size, nb_req;
> +       unsigned int req_size;
> +       uint32_t nb_desc_min;
> +
> +       /*
> +        * Update CPT FC threshold. Decrement by hardware burst size to allow
> +        * simultaneous enqueue from all available cores.
> +        */
> +       if (roc_model_is_cn10k())
> +               nb_desc_min = rte_lcore_count() * 32;
> +       else
> +               nb_desc_min = rte_lcore_count() * 2;
> +
> +       if (qp->lmtline.fc_thresh < nb_desc_min) {
> +               plt_err("CPT queue depth not sufficient to allow enqueueing 
> from %d cores",
> +                       rte_lcore_count());
> +               return -ENOSPC;
> +       }
> +
> +       qp->lmtline.fc_thresh -= nb_desc_min;
> +
> +       snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", 
> cdev->data->dev_id, qp->lf.lf_id);
> +       req_size = sizeof(struct cpt_inflight_req);
> +       cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 
> 1.5);
> +       nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
> +       qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, 
> cache_size, 0, NULL, NULL, NULL,
> +                                          NULL, rte_socket_id(), 0);
> +       if (qp->ca.req_mp == NULL)
> +               return -ENOMEM;
> +
> +       qp->ca.enabled = true;
> +
> +       return 0;
> +}
> +
> +int
> +cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, const 
> struct rte_cryptodev *cdev,
> +                          int32_t queue_pair_id)
> +{
> +       struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
> +       uint32_t adptr_xae_cnt = 0;
> +       struct cnxk_cpt_qp *qp;
> +       int ret;
> +
> +       if (queue_pair_id == -1) {
> +               uint16_t qp_id;
> +
> +               for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> +                       qp = cdev->data->queue_pairs[qp_id];
> +                       ret = crypto_adapter_qp_setup(cdev, qp);
> +                       if (ret) {
> +                               cnxk_crypto_adapter_qp_del(cdev, -1);
> +                               return ret;
> +                       }
> +                       adptr_xae_cnt += qp->ca.req_mp->size;
> +               }
> +       } else {
> +               qp = cdev->data->queue_pairs[queue_pair_id];
> +               ret = crypto_adapter_qp_setup(cdev, qp);
> +               if (ret)
> +                       return ret;
> +               adptr_xae_cnt = qp->ca.req_mp->size;
> +       }
> +
> +       /* Update crypto adapter XAE count */
> +       sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
> +       cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
> +
> +       return 0;
> +}
> +
> +static int
> +crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
> +{
> +       int ret;
> +
> +       rte_mempool_free(qp->ca.req_mp);
> +       qp->ca.enabled = false;
> +
> +       ret = roc_cpt_lmtline_init(qp->lf.roc_cpt, &qp->lmtline, 
> qp->lf.lf_id);
> +       if (ret < 0) {
> +               plt_err("Could not reset lmtline for queue pair %d", 
> qp->lf.lf_id);
> +               return ret;
> +       }
> +
> +       return 0;
> +}
> +
> +int
> +cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, int32_t 
> queue_pair_id)
> +{
> +       struct cnxk_cpt_qp *qp;
> +
> +       if (queue_pair_id == -1) {
> +               uint16_t qp_id;
> +
> +               for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
> +                       qp = cdev->data->queue_pairs[qp_id];
> +                       if (qp->ca.enabled)
> +                               crypto_adapter_qp_free(qp);
> +               }
> +       } else {
> +               qp = cdev->data->queue_pairs[queue_pair_id];
> +               if (qp->ca.enabled)
> +                       crypto_adapter_qp_free(qp);
> +       }
> +
> +       return 0;
> +}
> --
> 2.25.1
>

Reply via email to