> > > -----Original Message----- > > From: Ananyev, Konstantin <konstantin.anan...@intel.com> > > Sent: Tuesday, October 27, 2020 6:18 PM > > To: Gujjar, Abhinandan S <abhinandan.guj...@intel.com>; dev@dpdk.org; > > Doherty, Declan <declan.dohe...@intel.com>; akhil.go...@nxp.com; > > honnappa.nagaraha...@arm.com > > Cc: Vangati, Narender <narender.vang...@intel.com>; jer...@marvell.com > > Subject: RE: [v4 1/3] cryptodev: support enqueue callback functions > > > > > > > > > > This patch adds APIs to add/remove callback functions. The callback > > > function will be called for each burst of crypto ops received on a > > > given crypto device queue pair. > > > > > > Signed-off-by: Abhinandan Gujjar <abhinandan.guj...@intel.com> > > > --- > > > config/rte_config.h | 1 + > > > lib/librte_cryptodev/meson.build | 2 +- > > > lib/librte_cryptodev/rte_cryptodev.c | 230 > > +++++++++++++++++++++++++ > > > lib/librte_cryptodev/rte_cryptodev.h | 158 ++++++++++++++++- > > > lib/librte_cryptodev/rte_cryptodev_version.map | 2 + > > > 5 files changed, 391 insertions(+), 2 deletions(-) > > > > > > diff --git a/config/rte_config.h b/config/rte_config.h index > > > 03d90d7..e999d93 100644 > > > --- a/config/rte_config.h > > > +++ b/config/rte_config.h > > > @@ -61,6 +61,7 @@ > > > /* cryptodev defines */ > > > #define RTE_CRYPTO_MAX_DEVS 64 > > > #define RTE_CRYPTODEV_NAME_LEN 64 > > > +#define RTE_CRYPTO_CALLBACKS 1 > > > > > > /* compressdev defines */ > > > #define RTE_COMPRESS_MAX_DEVS 64 > > > diff --git a/lib/librte_cryptodev/meson.build > > > b/lib/librte_cryptodev/meson.build > > > index c4c6b3b..8c5493f 100644 > > > --- a/lib/librte_cryptodev/meson.build > > > +++ b/lib/librte_cryptodev/meson.build > > > @@ -9,4 +9,4 @@ headers = files('rte_cryptodev.h', > > > 'rte_crypto.h', > > > 'rte_crypto_sym.h', > > > 'rte_crypto_asym.h') > > > -deps += ['kvargs', 'mbuf'] > > > +deps += ['kvargs', 'mbuf', 'rcu'] > > > diff --git a/lib/librte_cryptodev/rte_cryptodev.c > > > b/lib/librte_cryptodev/rte_cryptodev.c > > > index 3d95ac6..0880d9b 100644 > > > --- a/lib/librte_cryptodev/rte_cryptodev.c > > > +++ b/lib/librte_cryptodev/rte_cryptodev.c > > > @@ -448,6 +448,91 @@ struct > > rte_cryptodev_sym_session_pool_private_data { > > > return 0; > > > } > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > +/* spinlock for crypto device enq callbacks */ static rte_spinlock_t > > > +rte_cryptodev_callback_lock = RTE_SPINLOCK_INITIALIZER; > > > + > > > +static void > > > +cryptodev_cb_cleanup(struct rte_cryptodev *dev) { > > > + struct rte_cryptodev_cb **prev_cb, *curr_cb; > > > + struct rte_cryptodev_enq_cb_rcu *list; > > > + uint16_t qp_id; > > > + > > > + if (dev->enq_cbs == NULL) > > > + return; > > > + > > > + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { > > > + list = &dev->enq_cbs[qp_id]; > > > + prev_cb = &list->next; > > > + > > > + while (*prev_cb != NULL) { > > > + curr_cb = *prev_cb; > > > + /* Remove the user cb from the callback list. */ > > > + __atomic_store_n(prev_cb, curr_cb->next, > > > + __ATOMIC_RELAXED); > > > + rte_rcu_qsbr_synchronize(list->qsbr, > > > + RTE_QSBR_THRID_INVALID); > > > > You call this function (cb_cleanup) only at dev_confiture(). > > At that moment DP threads can't do enqueue/dequeue anyway. > > So you can safely skip all this synchronization code here and just do: > > > > cb = list->next; > > while (cb != NULL) { > > next = cb->next; > > rte_free(cb); > > cb = next; > > } > > > Ok > > > > > + rte_free(curr_cb); > > > > One thing that makes it sort of grey area: > > we do free() for cb itself, but user provided data will be sort of 'lost'. > > As it is not referenced from our cb struct anymore... > > I see two options here - first just document explicitly that callbacks > > wouldn't > > survive cryptodev_configure() and it is user responsibility to remove all > > installed callbacks before doing dev_configure() to avoid possible memory > > leakage. > Ok. I will update the documentation for this and send a new patch set.
Ok, please keep my ack on your new version. > > Another option - add user provided cleanup() function pointer into struct > > rte_cryptodev_cb and call it here if provided: > > struct rte_cryptodev_cb { > > struct rte_cryptodev_cb *next; > > /** < Pointer to next callback */ > > rte_cryptodev_callback_fn fn; > > /** < Pointer to callback function */ > > void *arg; > > /** < Pointer to argument */ > > void (*cleanup)(void *); > > }; > > > > And here: > > If (curr_cb->cleanup != NULL) > > curr_cb->cleanup(curr_cb->arg); > > > > rte_free(curr_cb); > > > > Rest of the code - LGTM. > > So with that addressed: > > Acked-by: Konstantin Ananyev <konstantin.anan...@intel.com> > > > > > + } > > > + > > > + rte_free(list->qsbr); > > > + } > > > + > > > + rte_free(dev->enq_cbs); > > > + dev->enq_cbs = NULL; > > > +} > > > + > > > +static int > > > +cryptodev_cb_init(struct rte_cryptodev *dev) { > > > + struct rte_cryptodev_enq_cb_rcu *list; > > > + struct rte_rcu_qsbr *qsbr; > > > + uint16_t qp_id; > > > + size_t size; > > > + > > > + /* Max thread set to 1, as one DP thread accessing a queue-pair */ > > > + const uint32_t max_threads = 1; > > > + > > > + dev->enq_cbs = rte_zmalloc(NULL, > > > + sizeof(struct rte_cryptodev_enq_cb_rcu) * > > > + dev->data->nb_queue_pairs, 0); > > > + if (dev->enq_cbs == NULL) { > > > + CDEV_LOG_ERR("Failed to allocate memory for callbacks"); > > > + rte_errno = ENOMEM; > > > + return -1; > > > + } > > > + > > > + /* Create RCU QSBR variable */ > > > + size = rte_rcu_qsbr_get_memsize(max_threads); > > > + > > > + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { > > > + list = &dev->enq_cbs[qp_id]; > > > + qsbr = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE); > > > + if (qsbr == NULL) { > > > + CDEV_LOG_ERR("Failed to allocate memory for RCU > > on " > > > + "queue_pair_id=%d", qp_id); > > > + goto cb_init_err; > > > + } > > > + > > > + if (rte_rcu_qsbr_init(qsbr, max_threads)) { > > > + CDEV_LOG_ERR("Failed to initialize for RCU on " > > > + "queue_pair_id=%d", qp_id); > > > + goto cb_init_err; > > > + } > > > + > > > + list->qsbr = qsbr; > > > + } > > > + > > > + return 0; > > > + > > > +cb_init_err: > > > + rte_errno = ENOMEM; > > > + cryptodev_cb_cleanup(dev); > > > + return -1; > > > + > > > +} > > > +#endif > > > > > > const char * > > > rte_cryptodev_get_feature_name(uint64_t flag) @@ -927,6 +1012,11 @@ > > > struct rte_cryptodev * > > > > > > RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, - > > ENOTSUP); > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > + rte_spinlock_lock(&rte_cryptodev_callback_lock); > > > + cryptodev_cb_cleanup(dev); > > > + rte_spinlock_unlock(&rte_cryptodev_callback_lock); > > > +#endif > > > /* Setup new number of queue pairs and reconfigure device. */ > > > diag = rte_cryptodev_queue_pairs_config(dev, config- > > >nb_queue_pairs, > > > config->socket_id); > > > @@ -936,6 +1026,15 @@ struct rte_cryptodev * > > > return diag; > > > } > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > + rte_spinlock_lock(&rte_cryptodev_callback_lock); > > > + diag = cryptodev_cb_init(dev); > > > + rte_spinlock_unlock(&rte_cryptodev_callback_lock); > > > + if (diag) { > > > + CDEV_LOG_ERR("Callback init failed for dev_id=%d", dev_id); > > > + return -ENOMEM; > > > + } > > > +#endif > > > rte_cryptodev_trace_configure(dev_id, config); > > > return (*dev->dev_ops->dev_configure)(dev, config); } @@ -1136,6 > > > +1235,137 @@ struct rte_cryptodev * > > > socket_id); > > > } > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > +struct rte_cryptodev_cb * > > > +rte_cryptodev_add_enq_callback(uint8_t dev_id, > > > + uint16_t qp_id, > > > + rte_cryptodev_callback_fn cb_fn, > > > + void *cb_arg) > > > +{ > > > + struct rte_cryptodev *dev; > > > + struct rte_cryptodev_enq_cb_rcu *list; > > > + struct rte_cryptodev_cb *cb, *tail; > > > + > > > + if (!cb_fn) > > > + return NULL; > > > + > > > + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { > > > + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); > > > + return NULL; > > > + } > > > + > > > + dev = &rte_crypto_devices[dev_id]; > > > + if (qp_id >= dev->data->nb_queue_pairs) { > > > + CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); > > > + return NULL; > > > + } > > > + > > > + cb = rte_zmalloc(NULL, sizeof(*cb), 0); > > > + if (cb == NULL) { > > > + CDEV_LOG_ERR("Failed to allocate memory for callback on " > > > + "dev=%d, queue_pair_id=%d", dev_id, qp_id); > > > + rte_errno = ENOMEM; > > > + return NULL; > > > + } > > > + > > > + rte_spinlock_lock(&rte_cryptodev_callback_lock); > > > + > > > + cb->fn = cb_fn; > > > + cb->arg = cb_arg; > > > + > > > + /* Add the callbacks in fifo order. */ > > > + list = &dev->enq_cbs[qp_id]; > > > + tail = list->next; > > > + > > > + if (tail) { > > > + while (tail->next) > > > + tail = tail->next; > > > + /* Stores to cb->fn and cb->param should complete before > > > + * cb is visible to data plane. > > > + */ > > > + __atomic_store_n(&tail->next, cb, __ATOMIC_RELEASE); > > > + } else { > > > + /* Stores to cb->fn and cb->param should complete before > > > + * cb is visible to data plane. > > > + */ > > > + __atomic_store_n(&list->next, cb, __ATOMIC_RELEASE); > > > + } > > > + > > > + rte_spinlock_unlock(&rte_cryptodev_callback_lock); > > > + > > > + return cb; > > > +} > > > + > > > +int > > > +rte_cryptodev_remove_enq_callback(uint8_t dev_id, > > > + uint16_t qp_id, > > > + struct rte_cryptodev_cb *cb) > > > +{ > > > + struct rte_cryptodev *dev; > > > + struct rte_cryptodev_cb **prev_cb, *curr_cb; > > > + struct rte_cryptodev_enq_cb_rcu *list; > > > + int ret; > > > + > > > + ret = -EINVAL; > > > + > > > + if (!cb) { > > > + CDEV_LOG_ERR("cb is NULL"); > > > + return ret; > > > + } > > > + > > > + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { > > > + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); > > > + return ret; > > > + } > > > + > > > + dev = &rte_crypto_devices[dev_id]; > > > + if (qp_id >= dev->data->nb_queue_pairs) { > > > + CDEV_LOG_ERR("Invalid queue_pair_id=%d", qp_id); > > > + return ret; > > > + } > > > + > > > + rte_spinlock_lock(&rte_cryptodev_callback_lock); > > > + if (dev->enq_cbs == NULL) { > > > + CDEV_LOG_ERR("Callback not initialized"); > > > + goto cb_err; > > > + } > > > + > > > + list = &dev->enq_cbs[qp_id]; > > > + if (list == NULL) { > > > + CDEV_LOG_ERR("Callback list is NULL"); > > > + goto cb_err; > > > + } > > > + > > > + if (list->qsbr == NULL) { > > > + CDEV_LOG_ERR("Rcu qsbr is NULL"); > > > + goto cb_err; > > > + } > > > + > > > + prev_cb = &list->next; > > > + for (; *prev_cb != NULL; prev_cb = &curr_cb->next) { > > > + curr_cb = *prev_cb; > > > + if (curr_cb == cb) { > > > + /* Remove the user cb from the callback list. */ > > > + __atomic_store_n(prev_cb, curr_cb->next, > > > + __ATOMIC_RELAXED); > > > + ret = 0; > > > + break; > > > + } > > > + } > > > + > > > + if (!ret) { > > > + /* Call sync with invalid thread id as this is part of > > > + * control plane API > > > + */ > > > + rte_rcu_qsbr_synchronize(list->qsbr, > > RTE_QSBR_THRID_INVALID); > > > + rte_free(cb); > > > + } > > > + > > > +cb_err: > > > + rte_spinlock_unlock(&rte_cryptodev_callback_lock); > > > + return ret; > > > +} > > > +#endif > > > > > > int > > > rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats > > > *stats) diff --git a/lib/librte_cryptodev/rte_cryptodev.h > > > b/lib/librte_cryptodev/rte_cryptodev.h > > > index 0935fd5..1b7d7ef 100644 > > > --- a/lib/librte_cryptodev/rte_cryptodev.h > > > +++ b/lib/librte_cryptodev/rte_cryptodev.h > > > @@ -23,6 +23,7 @@ > > > #include "rte_dev.h" > > > #include <rte_common.h> > > > #include <rte_config.h> > > > +#include <rte_rcu_qsbr.h> > > > > > > #include "rte_cryptodev_trace_fp.h" > > > > > > @@ -522,6 +523,34 @@ struct rte_cryptodev_qp_conf { > > > /**< The mempool for creating sess private data in sessionless mode > > > */ }; > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > +/** > > > + * Function type used for pre processing crypto ops when enqueue > > > +burst is > > > + * called. > > > + * > > > + * The callback function is called on enqueue burst immediately > > > + * before the crypto ops are put onto the hardware queue for processing. > > > + * > > > + * @param dev_id The identifier of the device. > > > + * @param qp_id The index of the queue pair in which > > > ops are > > > + * to be enqueued for processing. The value > > > + * must be in the range [0, nb_queue_pairs > > > - 1] > > > + * previously supplied to > > > + * *rte_cryptodev_configure*. > > > + * @param ops The address of an array of *nb_ops* > > > pointers > > > + * to *rte_crypto_op* structures which > > > contain > > > + * the crypto operations to be processed. > > > + * @param nb_ops The number of operations to process. > > > + * @param user_param The arbitrary user parameter passed in > > > by the > > > + * application when the callback was > > > originally > > > + * registered. > > > + * @return The number of ops to be enqueued to the > > > + * crypto device. > > > + */ > > > +typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t > > qp_id, > > > + struct rte_crypto_op **ops, uint16_t nb_ops, void > > *user_param); > > > +#endif > > > + > > > /** > > > * Typedef for application callback function to be registered by > > > application > > > * software for notification of device events @@ -822,7 +851,6 @@ > > > struct rte_cryptodev_config { > > > enum rte_cryptodev_event_type event, > > > rte_cryptodev_cb_fn cb_fn, void *cb_arg); > > > > > > - > > > typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, > > > struct rte_crypto_op **ops, uint16_t nb_ops); > > > /**< Dequeue processed packets from queue pair of a device. */ @@ > > > -839,6 +867,33 @@ typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, > > > /** Structure to keep track of registered callbacks */ > > > TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > +/** > > > + * @internal > > > + * Structure used to hold information about the callbacks to be > > > +called for a > > > + * queue pair on enqueue. > > > + */ > > > +struct rte_cryptodev_cb { > > > + struct rte_cryptodev_cb *next; > > > + /** < Pointer to next callback */ > > > + rte_cryptodev_callback_fn fn; > > > + /** < Pointer to callback function */ > > > + void *arg; > > > + /** < Pointer to argument */ > > > +}; > > > + > > > +/** > > > + * @internal > > > + * Structure used to hold information about the RCU for a queue pair. > > > + */ > > > +struct rte_cryptodev_enq_cb_rcu { > > > + struct rte_cryptodev_cb *next; > > > + /** < Pointer to next callback */ > > > + struct rte_rcu_qsbr *qsbr; > > > + /** < RCU QSBR variable per queue pair */ }; #endif > > > + > > > /** The data structure associated with each crypto device. */ struct > > > rte_cryptodev { > > > dequeue_pkt_burst_t dequeue_burst; > > > @@ -867,6 +922,10 @@ struct rte_cryptodev { > > > __extension__ > > > uint8_t attached : 1; > > > /**< Flag indicating the device is attached */ > > > + > > > + struct rte_cryptodev_enq_cb_rcu *enq_cbs; > > > + /**< User application callback for pre enqueue processing */ > > > + > > > } __rte_cache_aligned; > > > > > > void * > > > @@ -989,6 +1048,31 @@ struct rte_cryptodev_data { { > > > struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > + if (unlikely(dev->enq_cbs != NULL)) { > > > + struct rte_cryptodev_enq_cb_rcu *list; > > > + struct rte_cryptodev_cb *cb; > > > + > > > + /* __ATOMIC_RELEASE memory order was used when the > > > + * call back was inserted into the list. > > > + * Since there is a clear dependency between loading > > > + * cb and cb->fn/cb->next, __ATOMIC_ACQUIRE memory order > > is > > > + * not required. > > > + */ > > > + list = &dev->enq_cbs[qp_id]; > > > + rte_rcu_qsbr_thread_online(list->qsbr, 0); > > > + cb = __atomic_load_n(&list->next, __ATOMIC_RELAXED); > > > + > > > + while (cb != NULL) { > > > + nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops, > > > + cb->arg); > > > + cb = cb->next; > > > + }; > > > + > > > + rte_rcu_qsbr_thread_offline(list->qsbr, 0); > > > + } > > > +#endif > > > + > > > rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, > > nb_ops); > > > return (*dev->enqueue_burst)( > > > dev->data->queue_pairs[qp_id], ops, nb_ops); @@ - > > 1730,6 +1814,78 > > > @@ struct rte_crypto_raw_dp_ctx { > > > rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, > > > uint32_t n); > > > > > > +#ifdef RTE_CRYPTO_CALLBACKS > > > +/** > > > + * @warning > > > + * @b EXPERIMENTAL: this API may change without prior notice > > > + * > > > + * Add a user callback for a given crypto device and queue pair which > > > +will be > > > + * called on crypto ops enqueue. > > > + * > > > + * This API configures a function to be called for each burst of > > > +crypto ops > > > + * received on a given crypto device queue pair. The return value is > > > +a pointer > > > + * that can be used later to remove the callback using > > > + * rte_cryptodev_remove_enq_callback(). > > > + * > > > + * Multiple functions are called in the order that they are added. > > > + * > > > + * @param dev_id The identifier of the device. > > > + * @param qp_id The index of the queue pair in which > > > ops are > > > + * to be enqueued for processing. The value > > > + * must be in the range [0, nb_queue_pairs > > > - 1] > > > + * previously supplied to > > > + * *rte_cryptodev_configure*. > > > + * @param cb_fn The callback function > > > + * @param cb_arg A generic pointer parameter which will > > > be > > passed > > > + * to each invocation of the callback > > > function on > > > + * this crypto device and queue pair. > > > + * > > > + * @return > > > + * NULL on error. > > > + * On success, a pointer value which can later be used to remove the > > callback. > > > + */ > > > + > > > +__rte_experimental > > > +struct rte_cryptodev_cb * > > > +rte_cryptodev_add_enq_callback(uint8_t dev_id, > > > + uint16_t qp_id, > > > + rte_cryptodev_callback_fn cb_fn, > > > + void *cb_arg); > > > + > > > + > > > +/** > > > + * @warning > > > + * @b EXPERIMENTAL: this API may change without prior notice > > > + * > > > + * Remove a user callback function for given crypto device and queue > > > pair. > > > + * > > > + * This function is used to removed callbacks that were added to a > > > +crypto > > > + * device queue pair using rte_cryptodev_add_enq_callback(). > > > + * > > > + * > > > + * > > > + * @param dev_id The identifier of the device. > > > + * @param qp_id The index of the queue pair in which > > > ops are > > > + * to be enqueued for processing. The value > > > + * must be in the range [0, nb_queue_pairs > > > - 1] > > > + * previously supplied to > > > + * *rte_cryptodev_configure*. > > > + * @param cb Pointer to user supplied callback > > > created via > > > + * rte_cryptodev_add_enq_callback(). > > > + * > > > + * @return > > > + * - 0: Success. Callback was removed. > > > + * - -EINVAL: The dev_id or the qp_id is out of range, or the callback > > > + * is NULL or not found for the crypto device queue pair. > > > + */ > > > + > > > +__rte_experimental > > > +int rte_cryptodev_remove_enq_callback(uint8_t dev_id, > > > + uint16_t qp_id, > > > + struct rte_cryptodev_cb *cb); > > > + > > > +#endif > > > + > > > #ifdef __cplusplus > > > } > > > #endif > > > diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map > > > b/lib/librte_cryptodev/rte_cryptodev_version.map > > > index 7e4360f..5d8d6b0 100644 > > > --- a/lib/librte_cryptodev/rte_cryptodev_version.map > > > +++ b/lib/librte_cryptodev/rte_cryptodev_version.map > > > @@ -101,6 +101,7 @@ EXPERIMENTAL { > > > rte_cryptodev_get_qp_status; > > > > > > # added in 20.11 > > > + rte_cryptodev_add_enq_callback; > > > rte_cryptodev_configure_raw_dp_ctx; > > > rte_cryptodev_get_raw_dp_ctx_size; > > > rte_cryptodev_raw_dequeue; > > > @@ -109,4 +110,5 @@ EXPERIMENTAL { > > > rte_cryptodev_raw_enqueue; > > > rte_cryptodev_raw_enqueue_burst; > > > rte_cryptodev_raw_enqueue_done; > > > + rte_cryptodev_remove_enq_callback; > > > }; > > > -- > > > 1.9.1