Acked-by: Arkadiusz Kusztal <arkadiuszx.kusz...@intel.com>

> -----Original Message-----
> From: Dooley, Brian <brian.doo...@intel.com>
> Sent: Friday, July 12, 2024 4:49 PM
> To: Ji, Kai <kai...@intel.com>
> Cc: dev@dpdk.org; sta...@dpdk.org; gak...@marvell.com; Kusztal, ArkadiuszX
> <arkadiuszx.kusz...@intel.com>; Dooley, Brian <brian.doo...@intel.com>
> Subject: [PATCH v1] crypto/qat: add fix for Gen4 WRITE
> 
> All generations of QAT use the same Gen1 raw datapath. Gen4 needs a different
> WRITE function than other generations. Added separation for configuration of
> the raw ctx for Gen4 from the Gen1 codepath.
> 
> Fixes: 85fec6fd9674 ("crypto/qat: unify raw data path functions")
> Cc: kai...@intel.com
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Brian Dooley <brian.doo...@intel.com>
> ---
>  drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 121 ++++++++++++++++++-
>  drivers/crypto/qat/dev/qat_crypto_pmd_gens.h |   6 +
>  2 files changed, 123 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
> b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
> index 5e808a60bf..6a5d6e78b9 100644
> --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
> +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
> @@ -9,6 +9,7 @@
>  #include "qat_asym.h"
>  #include "qat_crypto.h"
>  #include "qat_crypto_pmd_gens.h"
> +#include "adf_transport_access_macros_gen4vf.h"
> 
> 
>  static struct rte_cryptodev_capabilities qat_sym_crypto_legacy_caps_gen4[] = 
> {
> @@ -233,6 +234,78 @@ qat_sym_build_op_aead_gen4(void *in_op, struct
> qat_sym_session *ctx,
>       return 0;
>  }
> 
> +int
> +qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t
> +n) {
> +     struct qat_qp *qp = qp_data;
> +     struct qat_queue *tx_queue = &qp->tx_q;
> +     struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
> +
> +     if (unlikely(dp_ctx->cached_enqueue != n))
> +             return -1;
> +
> +     qp->enqueued += n;
> +     qp->stats.enqueued_count += n;
> +
> +     tx_queue->tail = dp_ctx->tail;
> +
> +     WRITE_CSR_RING_TAIL_GEN4VF(qp->mmap_bar_addr,
> +             tx_queue->hw_bundle_number,
> +             tx_queue->hw_queue_number, tx_queue->tail);
> +
> +     tx_queue->csr_tail = tx_queue->tail;
> +     dp_ctx->cached_enqueue = 0;
> +
> +     return 0;
> +}
> +
> +int
> +qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t
> +n) {
> +     struct qat_qp *qp = qp_data;
> +     struct qat_queue *rx_queue = &qp->rx_q;
> +     struct qat_sym_dp_ctx *dp_ctx = (void *)drv_ctx;
> +
> +     if (unlikely(dp_ctx->cached_dequeue != n))
> +             return -1;
> +
> +     rx_queue->head = dp_ctx->head;
> +     rx_queue->nb_processed_responses += n;
> +     qp->dequeued += n;
> +     qp->stats.dequeued_count += n;
> +     if (rx_queue->nb_processed_responses >
> QAT_CSR_HEAD_WRITE_THRESH) {
> +             uint32_t old_head, new_head;
> +             uint32_t max_head;
> +
> +             old_head = rx_queue->csr_head;
> +             new_head = rx_queue->head;
> +             max_head = qp->nb_descriptors * rx_queue->msg_size;
> +
> +             /* write out free descriptors */
> +             void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
> +
> +             if (new_head < old_head) {
> +                     memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
> +                                     max_head - old_head);
> +                     memset(rx_queue->base_addr,
> ADF_RING_EMPTY_SIG_BYTE,
> +                                     new_head);
> +             } else {
> +                     memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
> new_head -
> +                                     old_head);
> +             }
> +             rx_queue->nb_processed_responses = 0;
> +             rx_queue->csr_head = new_head;
> +
> +             /* write current head to CSR */
> +             WRITE_CSR_RING_HEAD_GEN4VF(qp->mmap_bar_addr,
> +                     rx_queue->hw_bundle_number, rx_queue-
> >hw_queue_number,
> +                     new_head);
> +     }
> +
> +     dp_ctx->cached_dequeue = 0;
> +     return 0;
> +}
> +
>  int
>  qat_sym_crypto_set_session_gen4(void *cdev, void *session)  { @@ -390,11
> +463,51 @@ qat_sym_configure_raw_dp_ctx_gen4(void *_raw_dp_ctx, void
> *_ctx)  {
>       struct rte_crypto_raw_dp_ctx *raw_dp_ctx = _raw_dp_ctx;
>       struct qat_sym_session *ctx = _ctx;
> -     int ret;
> 
> -     ret = qat_sym_configure_raw_dp_ctx_gen1(_raw_dp_ctx, _ctx);
> -     if (ret < 0)
> -             return ret;
> +     raw_dp_ctx->enqueue_done = qat_sym_dp_enqueue_done_gen4;
> +     raw_dp_ctx->dequeue_burst = qat_sym_dp_dequeue_burst_gen1;
> +     raw_dp_ctx->dequeue = qat_sym_dp_dequeue_single_gen1;
> +     raw_dp_ctx->dequeue_done = qat_sym_dp_dequeue_done_gen4;
> +
> +     if ((ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
> +                     ctx->qat_cmd ==
> ICP_QAT_FW_LA_CMD_CIPHER_HASH) &&
> +                     !ctx->is_gmac) {
> +             /* AES-GCM or AES-CCM */
> +             if (ctx->qat_hash_alg ==
> ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
> +                     ctx->qat_hash_alg ==
> ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
> +                     (ctx->qat_cipher_alg ==
> ICP_QAT_HW_CIPHER_ALGO_AES128
> +                     && ctx->qat_mode ==
> ICP_QAT_HW_CIPHER_CTR_MODE
> +                     && ctx->qat_hash_alg ==
> +
>       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
> +                     raw_dp_ctx->enqueue_burst =
> +
>       qat_sym_dp_enqueue_aead_jobs_gen1;
> +                     raw_dp_ctx->enqueue =
> +
>       qat_sym_dp_enqueue_single_aead_gen1;
> +             } else {
> +                     raw_dp_ctx->enqueue_burst =
> +
>       qat_sym_dp_enqueue_chain_jobs_gen1;
> +                     raw_dp_ctx->enqueue =
> +
>       qat_sym_dp_enqueue_single_chain_gen1;
> +             }
> +     } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH || ctx-
> >is_gmac) {
> +             raw_dp_ctx->enqueue_burst =
> qat_sym_dp_enqueue_auth_jobs_gen1;
> +             raw_dp_ctx->enqueue =
> qat_sym_dp_enqueue_single_auth_gen1;
> +     } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
> +             if (ctx->qat_mode == ICP_QAT_HW_CIPHER_AEAD_MODE ||
> +                     ctx->qat_cipher_alg ==
> +
>       ICP_QAT_HW_CIPHER_ALGO_CHACHA20_POLY1305) {
> +                     raw_dp_ctx->enqueue_burst =
> +
>       qat_sym_dp_enqueue_aead_jobs_gen1;
> +                     raw_dp_ctx->enqueue =
> +
>       qat_sym_dp_enqueue_single_aead_gen1;
> +             } else {
> +                     raw_dp_ctx->enqueue_burst =
> +
>       qat_sym_dp_enqueue_cipher_jobs_gen1;
> +                     raw_dp_ctx->enqueue =
> +
>       qat_sym_dp_enqueue_single_cipher_gen1;
> +             }
> +     } else
> +             return -1;
> 
>       if (ctx->is_single_pass && ctx->is_ucs) {
>               raw_dp_ctx->enqueue_burst =
> qat_sym_dp_enqueue_aead_jobs_gen4;
> diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> index 1f5d2583c4..2c5816e696 100644
> --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h
> @@ -1040,6 +1040,12 @@ qat_sym_dp_enqueue_done_gen1(void *qp_data,
> uint8_t *drv_ctx, uint32_t n);  int  qat_sym_dp_dequeue_done_gen1(void
> *qp_data, uint8_t *drv_ctx, uint32_t n);
> 
> +int
> +qat_sym_dp_enqueue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t
> +n);
> +
> +int
> +qat_sym_dp_dequeue_done_gen4(void *qp_data, uint8_t *drv_ctx, uint32_t
> +n);
> +
>  int
>  qat_sym_configure_raw_dp_ctx_gen1(void *_raw_dp_ctx, void *_ctx);
> 
> --
> 2.25.1

Reply via email to