Hi Abhinandan,

>> > Subject: [PATCH v3] app/eventdev: add crypto producer mode
>> >
>> > In crypto producer mode, producer core enqueues cryptodev with
>> > software generated crypto ops and worker core dequeues crypto
>> > completion events from the eventdev. Event crypto metadata used for
>> > above processing is pre- populated in each crypto session.
>> >
>> > Parameter --prod_type_cryptodev can be used to enable crypto producer
>> > mode. Parameter --crypto_adptr_mode can be set to select the crypto
>> > adapter mode, 0 for OP_NEW and 1 for OP_FORWARD.
>> >
>> > This mode can be used to measure the performance of crypto adapter.
>> >
>> > Example:
>> >   ./dpdk-test-eventdev -l 0-2 -w <EVENTDEV> -w <CRYPTODEV> -- \
>> >   --prod_type_cryptodev --crypto_adptr_mode 1 --test=perf_atq \
>> >   --stlist=a --wlcores 1 --plcores 2
>> >
>> > Signed-off-by: Shijith Thotton <sthot...@marvell.com>
>> > ---
>> > v3:
>> > * Reduce dereference inside loop.
>> >
>> > v2:
>> > * Fix RHEL compilation warning.
>> >
>> >  app/test-eventdev/evt_common.h       |   3 +
>> >  app/test-eventdev/evt_main.c         |  13 +-
>> >  app/test-eventdev/evt_options.c      |  27 ++
>> >  app/test-eventdev/evt_options.h      |  12 +
>> >  app/test-eventdev/evt_test.h         |   6 +
>> >  app/test-eventdev/test_perf_atq.c    |  51 ++++
>> >  app/test-eventdev/test_perf_common.c | 410
>> > ++++++++++++++++++++++++++-  app/test-
>> > eventdev/test_perf_common.h |  16 ++  app/test-
>> > eventdev/test_perf_queue.c  |  52 ++++
>> >  doc/guides/tools/testeventdev.rst    |  13 +
>> >  10 files changed, 596 insertions(+), 7 deletions(-)
>> >
>> > diff --git a/app/test-eventdev/evt_common.h b/app/test-
>> > eventdev/evt_common.h index f466434459..2f301a7e79 100644
>> > --- a/app/test-eventdev/evt_common.h
>> > +++ b/app/test-eventdev/evt_common.h
>> > @@ -7,6 +7,7 @@
>> >
>> >  #include <rte_common.h>
>> >  #include <rte_debug.h>
>> > +#include <rte_event_crypto_adapter.h>
>> >  #include <rte_eventdev.h>
>> >  #include <rte_service.h>

<SNIP>

>> >
>> > +  if (opt->verbose_level > 1)
>> > +          printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id
>> > %u\n",
>> > +                 __func__, rte_lcore_id(), p->queue_id, p->cdev_id,
>> > +                 p->cdev_qp_id);
>> > +
>> > +  len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
>> > +
>> > +  while (count < nb_pkts && t->done == false) {
>> > +          m = rte_pktmbuf_alloc(pool);
>> > +          if (m == NULL)
>> > +                  continue;
>> > +
>> > +          rte_pktmbuf_append(m, len);
>> > +          op = rte_crypto_op_alloc(t->crypto_adptr.op_pool,
>NULL check for op is missing
 
Not added as it is fast path to avoid performance drop.

>> > +
>> > RTE_CRYPTO_OP_TYPE_SYMMETRIC);
>> > +          sym_op = op->sym;
>> > +          sym_op->m_src = m;
>> > +          sym_op->cipher.data.offset = 0;
>> > +          sym_op->cipher.data.length = len;
>> > +          rte_crypto_op_attach_sym_session(
>> > +                  op, crypto_sess[flow_counter++ % nb_flows]);
>> > +          while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op,
>> > 1) !=
>> > +                 1) {
>> > +                  if (t->done)
>> > +                          break;
>> > +                  rte_pause();
>> > +          }
>> > +          count++;
>> > +  }
>> > +}
>> > +
>> > +static inline void
>> > +crypto_adapter_enq_op_fwd(struct prod_data *p) {
>> > +  struct rte_cryptodev_sym_session **crypto_sess = p->crypto_sess;
>> > +  const uint8_t dev_id = p->dev_id;
>> > +  const uint8_t port = p->port_id;
>> > +  struct test_perf *t = p->t;
>> > +  const uint32_t nb_flows = t->nb_flows;
>> > +  const uint64_t nb_pkts = t->nb_pkts;
>> > +  struct rte_mempool *pool = t->pool;
>> > +  struct evt_options *opt = t->opt;
>> > +  struct rte_crypto_sym_op *sym_op;
>> > +  uint32_t flow_counter = 0;
>> > +  struct rte_crypto_op *op;
>> > +  struct rte_event ev;
>> > +  struct rte_mbuf *m;
>> > +  uint64_t count = 0;
>> > +  uint16_t len;
>> > +
>> > +  if (opt->verbose_level > 1)
>> > +          printf("%s(): lcore %d port %d queue %d cdev_id %u
>> > cdev_qp_id %u\n",
>> > +                 __func__, rte_lcore_id(), port, p->queue_id, p->cdev_id,
>> > +                 p->cdev_qp_id);
>> > +
>> > +  ev.event = 0;
>> > +  ev.op = RTE_EVENT_OP_NEW;
>So, this is an new event and adapter treats it as OP_FWD?
>> > +  ev.queue_id = p->queue_id;
>> > +  ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
>> > +  ev.event_type = RTE_EVENT_TYPE_CPU;
>> > +  len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
>> > +
>> > +  while (count < nb_pkts && t->done == false) {
>> > +          m = rte_pktmbuf_alloc(pool);
>> > +          if (m == NULL)
>> > +                  continue;
>> > +
>> > +          rte_pktmbuf_append(m, len);
>> > +          op = rte_crypto_op_alloc(t->crypto_adptr.op_pool,
>> > +
>> > RTE_CRYPTO_OP_TYPE_SYMMETRIC);
>> > +          sym_op = op->sym;
>> > +          sym_op->m_src = m;
>> > +          sym_op->cipher.data.offset = 0;
>> > +          sym_op->cipher.data.length = len;
>> > +          rte_crypto_op_attach_sym_session(
>> > +                  op, crypto_sess[flow_counter++ % nb_flows]);
>> > +          ev.event_ptr = op;
>> > +          while (rte_event_crypto_adapter_enqueue(dev_id, port,
>> > &ev, 1) !=
>> > +                 1) {
>If the adapter OP_FWD is supported by rte_event_enqueue_burst(), then even
>that path has to be tested. Please add it.
 
Added in patch to avoid software adapter.

>> > +                  if (t->done)
>> > +                          break;
>> > +                  rte_pause();
>> > +          }
>> > +          count++;
>> > +  }
>> > +}
>> > +
>> > +static inline int
>> > +perf_event_crypto_producer(void *arg) {
>> > +  struct prod_data *p = arg;
>> > +  struct evt_options *opt = p->t->opt;
>> > +
>> > +  if (opt->crypto_adptr_mode ==
>> > RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
>> > +          crypto_adapter_enq_op_new(p);
>> > +  else
>> > +          crypto_adapter_enq_op_fwd(p);
>> > +
>> > +  return 0;
>> > +}
>> > +
>> >  static int
>> >  perf_producer_wrapper(void *arg)
>> >  {
>> > @@ -298,6 +419,8 @@ perf_producer_wrapper(void *arg)
>> >    else if (t->opt->prod_type ==
>> > EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
>> >                    t->opt->timdev_use_burst)
>> >            return perf_event_timer_producer_burst(arg);
>> > +  else if (t->opt->prod_type ==
>> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
>> > +          return perf_event_crypto_producer(arg);
>> >    return 0;
>> >  }
>> >
>> > @@ -405,8 +528,10 @@ perf_launch_lcores(struct evt_test *test, struct
>> > evt_options *opt,
>> >                    if (remaining <= 0) {
>> >                            t->result = EVT_TEST_SUCCESS;
>> >                            if (opt->prod_type ==
>> > EVT_PROD_TYPE_SYNT ||
>> > -                                  opt->prod_type ==
>> > -
>> >    EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
>> > +                              opt->prod_type ==
>> > +
>> > EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
>> > +                              opt->prod_type ==
>> > +
>> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
>> >                                    t->done = true;
>> >                                    break;
>> >                            }
>> > @@ -415,7 +540,8 @@ perf_launch_lcores(struct evt_test *test, struct
>> > evt_options *opt,
>> >
>> >            if (new_cycles - dead_lock_cycles > dead_lock_sample &&
>> >                (opt->prod_type == EVT_PROD_TYPE_SYNT ||
>> > -               opt->prod_type ==
>> > EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
>> > +               opt->prod_type ==
>> > EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
>> > +               opt->prod_type ==
>> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
>> >                    remaining = t->outstand_pkts - processed_pkts(t);
>> >                    if (dead_lock_remaining == remaining) {
>> >                            rte_event_dev_dump(opt->dev_id, stdout);
>> @@ -537,6 +663,96 @@
>> > perf_event_timer_adapter_setup(struct test_perf
>> > *t)
>> >    return 0;
>> >  }
>> >
>> > +static int
>> > +perf_event_crypto_adapter_setup(struct test_perf *t,
>> > +                          struct rte_event_port_conf port_conf) {
>> > +  struct evt_options *opt = t->opt;
>> > +  uint8_t cdev_id, cdev_count;
>> > +  int ret;
>> > +
>> > +  t->crypto_adptr.id = 0;
>> > +  ret = rte_event_crypto_adapter_create(t->crypto_adptr.id, opt-
>> > >dev_id,
>> > +                                        &port_conf, 0);
>> > +  if (ret)
>> > +          return ret;
>> > +
>> > +  cdev_count = rte_cryptodev_count();
>> > +  for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
>> > +          uint32_t cap;
>> > +
>> > +          ret = rte_event_crypto_adapter_caps_get(opt->dev_id,
>> > cdev_id,
>> > +                                                  &cap);
>> > +          if (ret) {
>> > +                  evt_err("Failed to get crypto adapter capabilities");
>> > +                  return ret;
>> > +          }
>> > +
>> > +          if (((opt->crypto_adptr_mode ==
>> > +                RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
>> > +               !(cap &
>> > +
>> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
>> > +              ((opt->crypto_adptr_mode ==
>> > +                RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
>> > +               !(cap &
>> > +
>> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
>> > +                  evt_err("crypto adapter %s mode unsupported\n",
>> > +                          opt->crypto_adptr_mode ? "OP_FORWARD"
>> > :
>> > +                                                   "OP_NEW");
>> > +                  return -EINVAL;
>> > +          }
>> > +
>> > +          if (!(cap &
>> > +
>> > RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
>> > +                  evt_err("Storing crypto session not supported");
>> > +                  return -EINVAL;
>> > +          }
>> > +
>> > +          if (cap &
>> > +
>> > RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
>> > +                  struct rte_event response_info;
>> > +
>> > +                  response_info.event = 0;
>It is good that you are covering even this case. Should not you fill event 
>with valid
>values?
 
Done

>> > +                  ret = rte_event_crypto_adapter_queue_pair_add(
>> > +                          t->crypto_adptr.id, cdev_id, -1,
>> > +                          &response_info);
>> > +          } else {
>> > +                  ret = rte_event_crypto_adapter_queue_pair_add(
>> > +                          t->crypto_adptr.id, cdev_id, -1, NULL);
>> > +          }
>New line
 
Done

>> > +          if (ret)
>> > +                  return ret;
>> > +  }
>> > +
>> > +  return 0;
>> > +}
>> > +
>> > +static struct rte_cryptodev_sym_session *
>> > +cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t) {
>> > +  struct rte_crypto_sym_xform cipher_xform;
>> > +  struct rte_cryptodev_sym_session *sess;
>> > +
>> > +  cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
>> > +  cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
>> > +  cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
>> > +  cipher_xform.next = NULL;
>> > +
>> > +  sess = rte_cryptodev_sym_session_create(t-
>> > >crypto_adptr.sess_pool);
>> > +  if (sess == NULL) {
>> > +          evt_err("Failed to create sym session");
>> > +          return NULL;
>> > +  }
>> > +
>> > +  if (rte_cryptodev_sym_session_init(p->cdev_id, sess,
>> > &cipher_xform,
>> > +                                     t->crypto_adptr.sess_priv_pool)) {
>> > +          evt_err("Failed to init session");
>> > +          return NULL;
>> > +  }
>> > +
>> > +  return sess;
>> > +}
>> > +
>> >  int
>> >  perf_event_dev_port_setup(struct evt_test *test, struct evt_options
>> *opt,
>> >                            uint8_t stride, uint8_t nb_queues, @@ -
>> 598,6 +814,55 @@
>> > perf_event_dev_port_setup(struct evt_test *test, struct evt_options
>> > *opt,
>> >            ret = perf_event_timer_adapter_setup(t);
>> >            if (ret)
>> >                    return ret;
>> > +  } else if (opt->prod_type ==
>> > EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
>> > +          uint8_t cdev_id = 0;
>> > +          uint16_t qp_id = 0;
>> > +
>> > +          prod = 0;
>> > +          for (; port < perf_nb_event_ports(opt); port++) {
>> > +                  struct rte_cryptodev_sym_session *crypto_sess;
>> > +                  union rte_event_crypto_metadata m_data;
>> > +                  struct prod_data *p = &t->prod[port];
>> > +                  uint32_t flow_id;
>> > +
>> > +                  if (qp_id ==
>> > rte_cryptodev_queue_pair_count(cdev_id)) {
>> > +                          cdev_id++;
>> > +                          qp_id = 0;
>> > +                  }
>> > +
>> > +                  p->dev_id = opt->dev_id;
>> > +                  p->port_id = port;
>> > +                  p->queue_id = prod * stride;
>> > +                  p->cdev_id = cdev_id;
>> > +                  p->cdev_qp_id = qp_id;
>> > +                  p->crypto_sess = rte_zmalloc_socket(
>> > +                          NULL, sizeof(crypto_sess) * t->nb_flows,
>> > +                          RTE_CACHE_LINE_SIZE, opt->socket_id);
>> > +
>> > +                  m_data.request_info.cdev_id = p->cdev_id;
>> > +                  m_data.request_info.queue_pair_id = p-
>> > >cdev_qp_id;
>> > +                  m_data.response_info.op = RTE_EVENT_OP_NEW;
>> > +                  m_data.response_info.sched_type =
>> > RTE_SCHED_TYPE_ATOMIC;
>> > +                  m_data.response_info.event_type =
>> > RTE_EVENT_TYPE_CPU;
>> > +                  m_data.response_info.queue_id = p->queue_id;
>> > +                  for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
>> > +                          crypto_sess =
>> > cryptodev_sym_sess_create(p, t);
>> > +                          if (crypto_sess == NULL)
>> > +                                  return -ENOMEM;
>> > +
>> > +                          m_data.response_info.flow_id = flow_id;
>> > +                          rte_cryptodev_sym_session_set_user_data(
>> > +                                  crypto_sess, &m_data,
>> > sizeof(m_data));
>> > +                          p->crypto_sess[flow_id] = crypto_sess;
>> > +                  }
>New line
 
Done

<SNIP>
 
Please ack v4 if there are no more comments.

I tried adding support for software adapter implementation, but is getting a
crash in sw_event PMD after some packets. I have posted the respective changes
here: 
https://patchwork.dpdk.org/project/dpdk/patch/0677cbafa5145f1b9f64dd007594e033f2d9ab8a.1644337310.git.sthot...@marvell.com/
Please take it forward.

Command used to test is:
dpdk-test-eventdev -l 0-8 -s 0xf0 --vdev=event_sw0  --vdev="crypto_null" --    \
      --prod_type_cryptodev --crypto_adptr_mode 1 --test=perf_queue --stlist=a \
      --wlcores 1 --plcores 2

Reply via email to