> Initialize cryptodev of zsda and add capabilities. > > Signed-off-by: Hanxiao Li <li.hanx...@zte.com.cn> > --- > MAINTAINERS | 1 + > doc/guides/cryptodevs/features/zsda.ini | 44 ++ > doc/guides/cryptodevs/zsda.rst | 22 + > doc/guides/rel_notes/release_24_11.rst | 5 + > drivers/common/zsda/meson.build | 14 +- > drivers/crypto/zsda/zsda_sym_capabilities.h | 111 +++++ > drivers/crypto/zsda/zsda_sym_pmd.c | 434 ++++++++++++++++++++ > drivers/crypto/zsda/zsda_sym_pmd.h | 105 +++++ > 8 files changed, 735 insertions(+), 1 deletion(-) > create mode 100644 drivers/crypto/zsda/zsda_sym_capabilities.h > create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.c > create mode 100644 drivers/crypto/zsda/zsda_sym_pmd.h > > diff --git a/MAINTAINERS b/MAINTAINERS > index cad80b66ef..fae64ae680 100644 > --- a/MAINTAINERS > +++ b/MAINTAINERS > @@ -1224,6 +1224,7 @@ F: doc/guides/cryptodevs/features/virtio.ini > ZTE Storage Data Accelerator(ZSDA) > M: Hanxiao Li <li.hanx...@zte.com.cn> > F: drivers/common/zsda/ > +F: drivers/compress/zsda/
Compress? > F: doc/guides/cryptodevs/zsda.rst > F: doc/guides/cryptodevs/features/zsda.ini > > diff --git a/doc/guides/cryptodevs/features/zsda.ini > b/doc/guides/cryptodevs/features/zsda.ini > index 87c125b8b6..35cef1ed12 100644 > --- a/doc/guides/cryptodevs/features/zsda.ini > +++ b/doc/guides/cryptodevs/features/zsda.ini > @@ -5,3 +5,47 @@ > ; > [Features] > HW Accelerated = Y > +Symmetric crypto = Y > +Protocol offload = Y > +In Place SGL = Y > +OOP SGL In SGL Out = Y > +OOP SGL In LB Out = Y > +OOP LB In SGL Out = Y > +OOP LB In LB Out = Y > +Digest encrypted = Y > +Sym raw data path API = Y I don’t see code for raw data path. Even digest encrypted. Protocol offload is also not there. Update to feature list should be in same patch as the feature flag is added in code. I see a lot of mismatches in the flags supported. I see that scatter gather code is also added in later patch as well. > + > +; > +; Supported crypto algorithms of the 'zsda' crypto driver. > +; > +[Cipher] > +AES XTS (128) = Y > +AES XTS (256) = Y > +SM4 XTS = Y > +; > +; Supported authentication algorithms of the 'zsda' crypto driver. > +; > +[Auth] > +SHA1 = Y > +SHA224 = Y > +SHA256 = Y > +SHA384 = Y > +SHA512 = Y > +SM3 = Y > + > +; > +; Supported AEAD algorithms of the 'zsda' crypto driver. > +; > +[AEAD] > + > + > +; > +; Supported Asymmetric algorithms of the 'zsda' crypto driver. > +; > +[Asymmetric] > + > +; > +; Supported Operating systems of the 'zsda' crypto driver. > +; > +[OS] > +Linux = Y > diff --git a/doc/guides/cryptodevs/zsda.rst b/doc/guides/cryptodevs/zsda.rst > index 02e30d4fc7..24bf4844e3 100644 > --- a/doc/guides/cryptodevs/zsda.rst > +++ b/doc/guides/cryptodevs/zsda.rst > @@ -19,11 +19,33 @@ support for the following hardware accelerator devices: > Features > ~~~~~~~~ > > +The ZSDA SYM PMD has support for: > + > +Cipher algorithms: > + > +* ``RTE_CRYPTO_CIPHER_AES_XTS`` > +* ``RTE_CRYPTO_CIPHER_SM4_XTS`` > + > +Hash algorithms: > + > +* ``RTE_CRYPTO_AUTH_SHA1`` > +* ``RTE_CRYPTO_AUTH_SHA224`` > +* ``RTE_CRYPTO_AUTH_SHA256`` > +* ``RTE_CRYPTO_AUTH_SHA384`` > +* ``RTE_CRYPTO_AUTH_SHA512`` > +* ``RTE_CRYPTO_AUTH_SM3`` > > > Limitations > ~~~~~~~~~~~ > > +* Only supports the session-oriented API implementation (session-less APIs > are > not supported). > +* No BSD support as BSD ZSDA kernel driver not available. > + > +* Queue-pairs are thread-safe on Intel CPUs but Queues are not (that is, > within > a single > + queue-pair all enqueues to the TX queue must be done from one thread and > all > dequeues > + from the RX queue must be done from one thread, but enqueues and dequeues > may be done > + in different threads.) > > > .. _building_zsda: > diff --git a/doc/guides/rel_notes/release_24_11.rst > b/doc/guides/rel_notes/release_24_11.rst > index 583e509a45..f5cad19ac2 100644 > --- a/doc/guides/rel_notes/release_24_11.rst > +++ b/doc/guides/rel_notes/release_24_11.rst > @@ -31,6 +31,11 @@ New Features > > See the :doc:`../compressdevs/zsda` guide for more details on the new > driver. > > + * Added a new crypto driver for ZSDA devices to support > + different algorithm of encryption, decryption and hash. > + > + See the :doc:`../cryptodevs/zsda` guide for more details on the new > driver. > + > .. This section should contain new features added in this release. > Sample format: > > diff --git a/drivers/common/zsda/meson.build > b/drivers/common/zsda/meson.build > index 4d24951cdd..10f9920762 100644 > --- a/drivers/common/zsda/meson.build > +++ b/drivers/common/zsda/meson.build > @@ -7,7 +7,7 @@ if is_windows > subdir_done() > endif > > -deps += ['bus_pci', 'mbuf', 'compressdev'] > +deps += ['bus_pci', 'mbuf', 'compressdev', 'cryprodev'] 'cryprodev' typo??? > sources += files( > 'zsda_logs.c', > 'zsda_common.c', > @@ -24,3 +24,15 @@ if zsda_compress > sources += files(join_paths(zsda_compress_relpath, f)) > endforeach > endif > + > +zsda_crypto = true > +zsda_crypto_path = 'crypto/zsda' > +zsda_crypto_relpath = '../../' + zsda_crypto_path > +if zsda_crypto > + libcrypto = dependency('libcrypto', required: false, method: > 'pkg-config') > + foreach f: ['zsda_sym_pmd.c'] > + sources += files(join_paths(zsda_crypto_relpath, f)) > + endforeach > + ext_deps += libcrypto > + cflags += ['-DBUILD_ZSDA_SYM'] > +endif > diff --git a/drivers/crypto/zsda/zsda_sym_capabilities.h > b/drivers/crypto/zsda/zsda_sym_capabilities.h > new file mode 100644 > index 0000000000..d9e6dc4b40 > --- /dev/null > +++ b/drivers/crypto/zsda/zsda_sym_capabilities.h > @@ -0,0 +1,111 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2024 ZTE Corporation > + */ > + > +#ifndef _ZSDA_SYM_CAPABILITIES_H_ > +#define _ZSDA_SYM_CAPABILITIES_H_ > + > +static const struct rte_cryptodev_capabilities > zsda_crypto_sym_capabilities[] = { > + {/* SHA1 */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = {.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, > + { .auth = { > + .algo = RTE_CRYPTO_AUTH_SHA1, > + .block_size = 64, > + .key_size = {.min = 0, .max = 0, .increment = 0}, > + .digest_size = {.min = 20, .max = 20, .increment = 2}, > + .iv_size = {0} }, > + } }, > + } > + }, > + {/* SHA224 */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, > + { .auth = { > + .algo = RTE_CRYPTO_AUTH_SHA224, > + .block_size = 64, > + .key_size = {.min = 0, .max = 0, .increment = 0}, > + .digest_size = {.min = 28, .max = 28, .increment = 0}, > + .iv_size = {0} }, > + } }, > + } > + }, > + {/* SHA256 */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, > + { .auth = { > + .algo = RTE_CRYPTO_AUTH_SHA256, > + .block_size = 64, > + .key_size = {.min = 0, .max = 0, .increment = 0}, > + .digest_size = {.min = 32, .max = 32, .increment = 0}, > + .iv_size = {0} }, > + } }, > + } > + }, > + {/* SHA384 */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, > + { .auth = { > + .algo = RTE_CRYPTO_AUTH_SHA384, > + .block_size = 128, > + .key_size = {.min = 0, .max = 0, .increment = 0}, > + .digest_size = {.min = 48, .max = 48, .increment = 0}, > + .iv_size = {0} }, > + } }, > + } > + }, > + {/* SHA512 */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, > + { .auth = { > + .algo = RTE_CRYPTO_AUTH_SHA512, > + .block_size = 128, > + .key_size = {.min = 0, .max = 0, .increment = 0}, > + .digest_size = {.min = 64, .max = 64, .increment = 0}, > + .iv_size = {0} }, > + } }, > + } > + }, > + {/* SM3 */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, > + { .auth = { > + .algo = RTE_CRYPTO_AUTH_SM3, > + .block_size = 64, > + .key_size = {.min = 0, .max = 0, .increment = 0}, > + .digest_size = {.min = 32, .max = 32, .increment = 0}, > + .iv_size = {0} }, > + } }, > + } > + }, > + {/* AES XTS */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, > + { .cipher = { > + .algo = RTE_CRYPTO_CIPHER_AES_XTS, > + .block_size = 16, > + .key_size = {.min = 16, .max = 32, .increment = 16}, > + .iv_size = {.min = 16, .max = 16, .increment = 0} }, > + } }, > + } > + }, > + {/* SM4 XTS */ > + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, > + { .sym = { > + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, > + { .cipher = { > + .algo = RTE_CRYPTO_CIPHER_SM4_XTS, > + .block_size = 16, > + .key_size = {.min = 32, .max = 32, .increment = 0}, > + .iv_size = {.min = 16, .max = 16, .increment = 0} }, > + } }, > + } > + } > +}; > +#endif /* _ZSDA_SYM_CAPABILITIES_H_ */ > diff --git a/drivers/crypto/zsda/zsda_sym_pmd.c > b/drivers/crypto/zsda/zsda_sym_pmd.c > new file mode 100644 > index 0000000000..cbf34a46f9 > --- /dev/null > +++ b/drivers/crypto/zsda/zsda_sym_pmd.c > @@ -0,0 +1,434 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2024 ZTE Corporation > + */ > + > +#include <rte_cryptodev.h> > + > +#include "zsda_sym_pmd.h" > +#include "zsda_sym_capabilities.h" > + > +uint8_t zsda_sym_driver_id; > + > +static int > +zsda_sym_dev_config(__rte_unused struct rte_cryptodev *dev, > + __rte_unused struct rte_cryptodev_config *config) > +{ > + return ZSDA_SUCCESS; > +} > + > +static int > +zsda_sym_dev_start(struct rte_cryptodev *dev) > +{ > + struct zsda_sym_dev_private *sym_dev = dev->data->dev_private; > + int ret; > + > + ret = zsda_queue_start(sym_dev->zsda_pci_dev->pci_dev); > + > + if (ret) > + ZSDA_LOG(ERR, E_START_Q); > + return ret; > +} > + > +static void > +zsda_sym_dev_stop(struct rte_cryptodev *dev) > +{ > + struct zsda_sym_dev_private *sym_dev = dev->data->dev_private; > + > + zsda_queue_stop(sym_dev->zsda_pci_dev->pci_dev); > +} > + > +static int > +zsda_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) > +{ > + ZSDA_LOG(DEBUG, "Release sym qp %u on device %d", queue_pair_id, > + dev->data->dev_id); > + > + return zsda_queue_pair_release( > + (struct zsda_qp **)&(dev->data->queue_pairs[queue_pair_id])); > +} > + > +static int > +zsda_sym_dev_close(struct rte_cryptodev *dev) > +{ > + int ret = ZSDA_SUCCESS; > + uint16_t i; > + > + for (i = 0; i < dev->data->nb_queue_pairs; i++) > + ret |= zsda_sym_qp_release(dev, i); > + > + return ret; > +} > + > +static uint16_t > +zsda_sym_max_nb_qps(void) > +{ > + uint16_t encrypt = zsda_nb_qps.encrypt; > + uint16_t decrypt = zsda_nb_qps.decrypt; > + uint16_t hash = zsda_nb_qps.hash; > + uint16_t min = 0; > + > + if ((encrypt == MAX_QPS_ON_FUNCTION) || > + (decrypt == MAX_QPS_ON_FUNCTION) || > + (hash == MAX_QPS_ON_FUNCTION)) > + min = MAX_QPS_ON_FUNCTION; > + else { > + min = (encrypt < decrypt) ? encrypt : decrypt; > + min = (min < hash) ? min : hash; > + } > + > + if (min == 0) > + return MAX_QPS_ON_FUNCTION; > + return min; > +} > + > + > +static void > +zsda_sym_dev_info_get(struct rte_cryptodev *dev, > + struct rte_cryptodev_info *info) > +{ > + struct zsda_sym_dev_private *sym_priv = dev->data->dev_private; > + > + if (info != NULL) { > + info->max_nb_queue_pairs = zsda_sym_max_nb_qps(); > + info->feature_flags = dev->feature_flags; > + info->capabilities = sym_priv->zsda_dev_capabilities; > + info->driver_id = zsda_sym_driver_id; > + info->sym.max_nb_sessions = 0; > + } > +} > + > +static void > +zsda_sym_stats_get(struct rte_cryptodev *dev, struct rte_cryptodev_stats > *stats) > +{ > + struct zsda_common_stat comm = {0}; > + > + zsda_stats_get(dev->data->queue_pairs, dev->data->nb_queue_pairs, > + &comm); > + stats->enqueued_count = comm.enqueued_count; > + stats->dequeued_count = comm.dequeued_count; > + stats->enqueue_err_count = comm.enqueue_err_count; > + stats->dequeue_err_count = comm.dequeue_err_count; > +} > + > +static void > +zsda_sym_stats_reset(struct rte_cryptodev *dev) > +{ > + zsda_stats_reset(dev->data->queue_pairs, dev->data->nb_queue_pairs); > +} > + > +static int > +zsda_setup_encrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t > qp_id, > + struct zsda_qp *qp, uint32_t nb_des, int socket_id) > +{ > + enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_ENCRYPT; > + struct zsda_qp_config conf; > + int ret; > + struct zsda_qp_hw *qp_hw; > + > + qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type); > + conf.hw = qp_hw->data + qp_id; > + conf.service_type = type; > + conf.cookie_size = sizeof(struct zsda_op_cookie); > + conf.nb_descriptors = nb_des; > + conf.socket_id = socket_id; > + conf.service_str = "sym_encrypt"; > + > + ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, > &conf); > + qp->srv[type].rx_cb = zsda_crypto_callback; > + qp->srv[type].tx_cb = zsda_build_cipher_request; > + qp->srv[type].match = zsda_encry_match; > + > + return ret; > +} > + > +static int > +zsda_setup_decrypto_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t > qp_id, > + struct zsda_qp *qp, uint32_t nb_des, int socket_id) > +{ > + enum zsda_service_type type = ZSDA_SERVICE_SYMMETRIC_DECRYPT; > + struct zsda_qp_config conf; > + int ret; > + struct zsda_qp_hw *qp_hw; > + > + qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type); > + conf.hw = qp_hw->data + qp_id; > + conf.service_type = type; > + > + conf.cookie_size = sizeof(struct zsda_op_cookie); > + conf.nb_descriptors = nb_des; > + conf.socket_id = socket_id; > + conf.service_str = "sym_decrypt"; > + > + ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, > &conf); > + qp->srv[type].rx_cb = zsda_crypto_callback; > + qp->srv[type].tx_cb = zsda_build_cipher_request; > + qp->srv[type].match = zsda_decry_match; > + > + return ret; > +} > + > +static int > +zsda_setup_hash_queue(struct zsda_pci_device *zsda_pci_dev, uint16_t qp_id, > + struct zsda_qp *qp, uint32_t nb_des, int socket_id) > +{ > + enum zsda_service_type type = ZSDA_SERVICE_HASH_ENCODE; > + struct zsda_qp_config conf; > + int ret; > + struct zsda_qp_hw *qp_hw; > + > + qp_hw = zsda_qps_hw_per_service(zsda_pci_dev, type); > + conf.hw = qp_hw->data + qp_id; > + conf.service_type = type; > + conf.cookie_size = sizeof(struct zsda_op_cookie); > + conf.nb_descriptors = nb_des; > + conf.socket_id = socket_id; > + conf.service_str = "sym_hash"; > + > + ret = zsda_common_setup_qp(zsda_pci_dev->zsda_dev_id, &qp, qp_id, > &conf); > + qp->srv[type].rx_cb = zsda_crypto_callback; > + qp->srv[type].tx_cb = zsda_build_hash_request; > + qp->srv[type].match = zsda_hash_match; > + > + return ret; > +} > + > +static int > +zsda_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, > + const struct rte_cryptodev_qp_conf *qp_conf, > + int socket_id) > +{ > + int ret = ZSDA_SUCCESS; > + struct zsda_qp *qp_new; > + struct zsda_qp **qp_addr = > + (struct zsda_qp **)&(dev->data->queue_pairs[qp_id]); > + struct zsda_sym_dev_private *sym_priv = dev->data->dev_private; > + struct zsda_pci_device *zsda_pci_dev = sym_priv->zsda_pci_dev; > + uint16_t num_qps_encrypt = zsda_nb_qps.encrypt; > + uint16_t num_qps_decrypt = zsda_nb_qps.decrypt; > + uint16_t num_qps_hash = zsda_nb_qps.hash; > + uint32_t nb_des = qp_conf->nb_descriptors; > + > + nb_des = (nb_des == NB_DES) ? nb_des : NB_DES; > + > + if (*qp_addr != NULL) { > + ret = zsda_sym_qp_release(dev, qp_id); > + if (ret) > + return ret; > + } > + > + qp_new = rte_zmalloc_socket("zsda PMD qp metadata", sizeof(*qp_new), > + RTE_CACHE_LINE_SIZE, socket_id); > + if (qp_new == NULL) { > + ZSDA_LOG(ERR, "Failed to alloc mem for qp struct"); > + return -ENOMEM; > + } > + > + if (num_qps_encrypt == MAX_QPS_ON_FUNCTION) > + ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des, > + socket_id); > + else if (num_qps_decrypt == MAX_QPS_ON_FUNCTION) > + ret = zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des, > + socket_id); > + else if (num_qps_hash == MAX_QPS_ON_FUNCTION) > + ret = zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des, > + socket_id); > + else { > + ret = zsda_setup_encrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des, > + socket_id); > + ret |= zsda_setup_decrypto_queue(zsda_pci_dev, qp_id, qp_new, nb_des, > + socket_id); > + ret |= zsda_setup_hash_queue(zsda_pci_dev, qp_id, qp_new, nb_des, > + socket_id); > + } > + > + if (ret) { > + rte_free(qp_new); > + return ret; > + } > + > + *qp_addr = qp_new; > + > + return ret; > +} > + > +static unsigned int > +zsda_sym_session_get_private_size(struct rte_cryptodev *dev __rte_unused) > +{ > + return RTE_ALIGN_CEIL(sizeof(struct zsda_sym_session), 8); > +} > + > +static int > +zsda_sym_session_configure(struct rte_cryptodev *dev __rte_unused, > + struct rte_crypto_sym_xform *xform, > + struct rte_cryptodev_sym_session *sess) > +{ > + void *sess_private_data; > + int ret; > + > + if (unlikely(sess == NULL)) { > + ZSDA_LOG(ERR, "Invalid session struct"); > + return -EINVAL; > + } > + > + sess_private_data = CRYPTODEV_GET_SYM_SESS_PRIV(sess); > + > + ret = zsda_crypto_set_session_parameters( > + sess_private_data, xform); > + > + if (ret != ZSDA_SUCCESS) > + ZSDA_LOG(ERR, "Failed configure session parameters"); > + > + return ret; > +} > + > +static void > +zsda_sym_session_clear(struct rte_cryptodev *dev __rte_unused, > + struct rte_cryptodev_sym_session *sess __rte_unused) > +{} > + > +static struct rte_cryptodev_ops crypto_zsda_ops = { > + > + .dev_configure = zsda_sym_dev_config, > + .dev_start = zsda_sym_dev_start, > + .dev_stop = zsda_sym_dev_stop, > + .dev_close = zsda_sym_dev_close, > + .dev_infos_get = zsda_sym_dev_info_get, > + > + .stats_get = zsda_sym_stats_get, > + .stats_reset = zsda_sym_stats_reset, > + .queue_pair_setup = zsda_sym_qp_setup, > + .queue_pair_release = zsda_sym_qp_release, > + > + .sym_session_get_size = zsda_sym_session_get_private_size, > + .sym_session_configure = zsda_sym_session_configure, > + .sym_session_clear = zsda_sym_session_clear, > +}; I asked multiple times to split the patches as per https://git.dpdk.org/dpdk/tree/doc/guides/contributing/new_driver.rst In this patch you are - configuring device - setting capabilities - configuring queues - stats and device info - session configuration - scatter gather flags are also added here. Not sure if the feature is complete in this patch. - even datapath is also added in this patch only. This is a v18 patchset and still we have issues which were highlighted in initial versions. Please review other crypto/compress PMDs which were submitted and merged in past Before sending the next version. You can use this as reference. https://patches.dpdk.org/project/dpdk/list/?series=17483&state=%2A&archive=both https://patches.dpdk.org/project/dpdk/list/?series=14865&state=%2A&archive=both As these are some major changes to be done by today(RC2). I would be deferring the patchset for next release. > + > +static uint16_t > +zsda_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, > + uint16_t nb_ops) > +{ > + return zsda_enqueue_op_burst((struct zsda_qp *)qp, (void **)ops, > + nb_ops); > +} > + > +static uint16_t > +zsda_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, > + uint16_t nb_ops) > +{ > + return zsda_dequeue_op_burst((struct zsda_qp *)qp, (void **)ops, > + nb_ops); > +} > + > +static const char zsda_sym_drv_name[] = > RTE_STR(CRYPTODEV_NAME_ZSDA_SYM_PMD); > +static const struct rte_driver cryptodev_zsda_sym_driver = { > + .name = zsda_sym_drv_name, .alias = zsda_sym_drv_name}; > + > +int > +zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev) > +{ > + struct zsda_device_info *dev_info = > + &zsda_devs[zsda_pci_dev->zsda_dev_id]; > + > + struct rte_cryptodev_pmd_init_params init_params = { > + .name = "", > + .socket_id = (int)rte_socket_id(), > + .private_data_size = sizeof(struct zsda_sym_dev_private)}; > + > + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; > + char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; > + struct rte_cryptodev *cryptodev; > + struct zsda_sym_dev_private *sym_priv; > + const struct rte_cryptodev_capabilities *capabilities; > + uint64_t capa_size; > + > + init_params.max_nb_queue_pairs = zsda_sym_max_nb_qps(); > + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", zsda_pci_dev- > >name, > + "sym_encrypt"); > + ZSDA_LOG(DEBUG, "Creating ZSDA SYM device %s", name); > + > + if (rte_eal_process_type() != RTE_PROC_PRIMARY) > + return ZSDA_SUCCESS; > + > + dev_info->sym_rte_dev.driver = &cryptodev_zsda_sym_driver; > + dev_info->sym_rte_dev.numa_node = dev_info->pci_dev- > >device.numa_node; > + dev_info->sym_rte_dev.devargs = NULL; > + > + cryptodev = rte_cryptodev_pmd_create(name, &(dev_info->sym_rte_dev), > + &init_params); > + > + if (cryptodev == NULL) > + return -ENODEV; > + > + dev_info->sym_rte_dev.name = cryptodev->data->name; > + cryptodev->driver_id = zsda_sym_driver_id; > + > + cryptodev->dev_ops = &crypto_zsda_ops; > + > + cryptodev->enqueue_burst = zsda_sym_pmd_enqueue_op_burst; > + cryptodev->dequeue_burst = zsda_sym_pmd_dequeue_op_burst; > + > + cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | > + RTE_CRYPTODEV_FF_SYM_SESSIONLESS | > + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | > + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | > + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | > + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | > + RTE_CRYPTODEV_FF_HW_ACCELERATED; > + > + sym_priv = cryptodev->data->dev_private; > + sym_priv->zsda_pci_dev = zsda_pci_dev; > + capabilities = zsda_crypto_sym_capabilities; > + capa_size = sizeof(zsda_crypto_sym_capabilities); > + > + snprintf(capa_memz_name, RTE_CRYPTODEV_NAME_MAX_LEN, > "ZSDA_SYM_CAPA"); > + > + sym_priv->capa_mz = rte_memzone_lookup(capa_memz_name); > + if (sym_priv->capa_mz == NULL) > + sym_priv->capa_mz = rte_memzone_reserve( > + capa_memz_name, capa_size, rte_socket_id(), 0); > + > + if (sym_priv->capa_mz == NULL) { > + ZSDA_LOG(ERR, E_MALLOC); > + goto error; > + } > + > + memcpy(sym_priv->capa_mz->addr, capabilities, capa_size); > + sym_priv->zsda_dev_capabilities = sym_priv->capa_mz->addr; > + > + zsda_pci_dev->sym_dev = sym_priv; > + > + return ZSDA_SUCCESS; > + > +error: > + > + rte_cryptodev_pmd_destroy(cryptodev); > + memset(&dev_info->sym_rte_dev, 0, sizeof(dev_info->sym_rte_dev)); > + > + return -EFAULT; > +} > + > +int > +zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev) > +{ > + struct rte_cryptodev *cryptodev; > + > + if (zsda_pci_dev == NULL) > + return -ENODEV; > + if (zsda_pci_dev->sym_dev == NULL) > + return ZSDA_SUCCESS; > + if (rte_eal_process_type() == RTE_PROC_PRIMARY) > + rte_memzone_free(zsda_pci_dev->sym_dev->capa_mz); > + > + cryptodev = rte_cryptodev_pmd_get_dev(zsda_pci_dev->zsda_dev_id); > + > + rte_cryptodev_pmd_destroy(cryptodev); > + zsda_devs[zsda_pci_dev->zsda_dev_id].sym_rte_dev.name = NULL; > + zsda_pci_dev->sym_dev = NULL; > + > + return ZSDA_SUCCESS; > +} > + > +static struct cryptodev_driver zsda_crypto_drv; > +RTE_PMD_REGISTER_CRYPTO_DRIVER(zsda_crypto_drv, > cryptodev_zsda_sym_driver, > + zsda_sym_driver_id); > diff --git a/drivers/crypto/zsda/zsda_sym_pmd.h > b/drivers/crypto/zsda/zsda_sym_pmd.h > new file mode 100644 > index 0000000000..0c0fd5a6a1 > --- /dev/null > +++ b/drivers/crypto/zsda/zsda_sym_pmd.h > @@ -0,0 +1,105 @@ > +/* SPDX-License-Identifier: BSD-3-Clause > + * Copyright(c) 2024 ZTE Corporation > + */ > + > +#ifndef _ZSDA_SYM_PMD_H_ > +#define _ZSDA_SYM_PMD_H_ > + > +#include "cryptodev_pmd.h" > + > +#include "zsda_logs.h" > +#include "zsda_common.h" > +#include "zsda_device.h" > +#include "zsda_qp.h" > + > +/** ZSDA Symmetric Crypto PMD driver name */ > +#define CRYPTODEV_NAME_ZSDA_SYM_PMD crypto_zsda > +#define ZSDA_CIPHER_KEY_MAX_LEN 64 > + > +extern uint8_t zsda_sym_driver_id; > + > +/** private data structure for a ZSDA device. > + * This ZSDA device is a device offering only symmetric crypto service, > + * there can be one of these on each zsda_pci_device (VF). > + */ > +struct zsda_sym_dev_private { > + struct zsda_pci_device *zsda_pci_dev; > + /**< The zsda pci device hosting the service */ > + > + const struct rte_cryptodev_capabilities *zsda_dev_capabilities; > + /* ZSDA device symmetric crypto capabilities */ > + const struct rte_memzone *capa_mz; > + /* Shared memzone for storing capabilities */ > +}; > + > +enum zsda_sym_chain_order { > + ZSDA_SYM_CHAIN_ONLY_CIPHER, > + ZSDA_SYM_CHAIN_ONLY_AUTH, > + ZSDA_SYM_CHAIN_CIPHER_AUTH, > + ZSDA_SYM_CHAIN_AUTH_CIPHER, > + ZSDA_SYM_CHAIN_NOT_SUPPORTED > +}; > +struct zsda_sym_session { > + enum zsda_sym_chain_order chain_order; > + > + /* Cipher Parameters */ > + struct { > + enum rte_crypto_cipher_operation op; > + enum rte_crypto_cipher_algorithm algo; > + struct { > + uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN]; > + size_t length; > + } key_encry; > + struct { > + uint8_t data[ZSDA_CIPHER_KEY_MAX_LEN]; > + size_t length; > + } key_decry; > + struct { > + uint32_t offset; > + size_t length; > + } iv; > + > + uint32_t dataunit_len; > + uint8_t lbads; > + } cipher; > + > + struct { > + enum rte_crypto_auth_operation op; > + /* Auth operation */ > + enum rte_crypto_auth_algorithm algo; > + /* Auth algorithm */ > + uint16_t digest_length; > + } auth; > + > + bool cipher_first; > +}; > + > +__rte_weak int > +zsda_encry_match(const void *op_in); > + > +__rte_weak int > +zsda_decry_match(const void *op_in); > + > +__rte_weak int > +zsda_hash_match(const void *op_in); > + > +__rte_weak int > +zsda_build_cipher_request(void *op_in, const struct zsda_queue *queue, > + void **op_cookies, const uint16_t new_tail); > + > +__rte_weak int > +zsda_build_hash_request(void *op_in, const struct zsda_queue *queue, > + void **op_cookies, const uint16_t new_tail); > + > +__rte_weak int > +zsda_crypto_callback(void *cookie_in, struct zsda_cqe *cqe); > + > +__rte_weak int > +zsda_crypto_set_session_parameters(void *sess_priv, > + struct rte_crypto_sym_xform *xform); > + > +int zsda_sym_dev_create(struct zsda_pci_device *zsda_pci_dev); > + > +int zsda_sym_dev_destroy(struct zsda_pci_device *zsda_pci_dev); > + > +#endif /* _ZSDA_SYM_PMD_H_ */ > -- > 2.27.0