Hi Akhil, > -----Original Message----- > From: Akhil Goyal <gak...@marvell.com> > Sent: Monday, October 11, 2021 1:43 PM > To: dev@dpdk.org > Cc: tho...@monjalon.net; david.march...@redhat.com; > hemant.agra...@nxp.com; ano...@marvell.com; De Lara Guarch, Pablo > <pablo.de.lara.gua...@intel.com>; Trahe, Fiona <fiona.tr...@intel.com>; > Doherty, Declan <declan.dohe...@intel.com>; ma...@nvidia.com; > g.si...@nxp.com; Zhang, Roy Fan <roy.fan.zh...@intel.com>; > jianjay.z...@huawei.com; asoma...@amd.com; ruifeng.w...@arm.com; > Ananyev, Konstantin <konstantin.anan...@intel.com>; Nicolau, Radu > <radu.nico...@intel.com>; ajit.khapa...@broadcom.com; > rnagadhee...@marvell.com; adwiv...@marvell.com; Power, Ciara > <ciara.po...@intel.com>; Akhil Goyal <gak...@marvell.com> > Subject: [PATCH v2 4/5] cryptodev: update fast path APIs to use new flat > array > > Rework fast-path cryptodev functions to use rte_crypto_fp_ops[]. > While it is an API/ABI breakage, this change is intended to be > transparent for both users (no changes in user app is required) and > PMD developers (no changes in PMD is required). > > Signed-off-by: Akhil Goyal <gak...@marvell.com> > --- > lib/cryptodev/rte_cryptodev.h | 27 +++++++++++++++++---------- > 1 file changed, 17 insertions(+), 10 deletions(-) > > diff --git a/lib/cryptodev/rte_cryptodev.h b/lib/cryptodev/rte_cryptodev.h > index ce0dca72be..739ad529e5 100644 > --- a/lib/cryptodev/rte_cryptodev.h > +++ b/lib/cryptodev/rte_cryptodev.h > @@ -1832,13 +1832,18 @@ static inline uint16_t > rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, > struct rte_crypto_op **ops, uint16_t nb_ops) > { > - struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; > + struct rte_crypto_fp_ops *fp_ops;
We may need to use const for fp_ops since we only call the function pointers in it. > + void *qp; > > rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, > nb_ops); > - nb_ops = (*dev->dequeue_burst) > - (dev->data->queue_pairs[qp_id], ops, nb_ops); > + > + fp_ops = &rte_crypto_fp_ops[dev_id]; > + qp = fp_ops->qp.data[qp_id]; > + > + nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops); > + > #ifdef RTE_CRYPTO_CALLBACKS > - if (unlikely(dev->deq_cbs != NULL)) { > + if (unlikely(fp_ops->qp.deq_cb != NULL)) { > struct rte_cryptodev_cb_rcu *list; > struct rte_cryptodev_cb *cb; > > @@ -1848,7 +1853,7 @@ rte_cryptodev_dequeue_burst(uint8_t dev_id, > uint16_t qp_id, > * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory > order is > * not required. > */ > - list = &dev->deq_cbs[qp_id]; > + list = (struct rte_cryptodev_cb_rcu *)&fp_ops- > >qp.deq_cb[qp_id]; > rte_rcu_qsbr_thread_online(list->qsbr, 0); > cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); > > @@ -1899,10 +1904,13 @@ static inline uint16_t > rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, > struct rte_crypto_op **ops, uint16_t nb_ops) > { > - struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; > + struct rte_crypto_fp_ops *fp_ops; Same as above > + void *qp; > > + fp_ops = &rte_crypto_fp_ops[dev_id]; > + qp = fp_ops->qp.data[qp_id]; > #ifdef RTE_CRYPTO_CALLBACKS > - if (unlikely(dev->enq_cbs != NULL)) { > + if (unlikely(fp_ops->qp.enq_cb != NULL)) { > struct rte_cryptodev_cb_rcu *list; > struct rte_cryptodev_cb *cb; > > @@ -1912,7 +1920,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, > uint16_t qp_id, > * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory > order is > * not required. > */ > - list = &dev->enq_cbs[qp_id]; > + list = (struct rte_cryptodev_cb_rcu *)&fp_ops- > >qp.enq_cb[qp_id]; > rte_rcu_qsbr_thread_online(list->qsbr, 0); > cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); > > @@ -1927,8 +1935,7 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, > uint16_t qp_id, > #endif > > rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, > nb_ops); > - return (*dev->enqueue_burst)( > - dev->data->queue_pairs[qp_id], ops, nb_ops); > + return fp_ops->enqueue_burst(qp, ops, nb_ops); > } > > > -- > 2.25.1 Other than the minor comments above Acked-by: Fan Zhang <roy.fan.zh...@intel.com>