> This patch renames the sgl to src_sgl to help differentiating
> between source and destination sgl.
> 
> Signed-off-by: Hemant Agrawal <hemant.agra...@nxp.com>
> Acked-by: Akhil Goyal <gak...@marvell.com>
> ---
>  app/test/test_cryptodev.c                  |  6 ++---
>  drivers/crypto/aesni_gcm/aesni_gcm_pmd.c   | 12 +++++-----
>  drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c |  6 ++---
>  drivers/crypto/qat/qat_sym_hw_dp.c         | 27 +++++++++++++---------
>  lib/cryptodev/rte_crypto_sym.h             |  2 +-
>  lib/ipsec/misc.h                           |  4 ++--
>  6 files changed, 31 insertions(+), 26 deletions(-)
> 
> diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
> index 843d07ba37..ed63524edc 100644
> --- a/app/test/test_cryptodev.c
> +++ b/app/test/test_cryptodev.c
> @@ -221,7 +221,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
>       digest.va = NULL;
>       sgl.vec = data_vec;
>       vec.num = 1;
> -     vec.sgl = &sgl;
> +     vec.src_sgl = &sgl;
>       vec.iv = &cipher_iv;
>       vec.digest = &digest;
>       vec.aad = &aad_auth_iv;
> @@ -385,7 +385,7 @@ process_cpu_aead_op(uint8_t dev_id, struct rte_crypto_op 
> *op)
> 
>       sgl.vec = vec;
>       sgl.num = n;
> -     symvec.sgl = &sgl;
> +     symvec.src_sgl = &sgl;
>       symvec.iv = &iv_ptr;
>       symvec.digest = &digest_ptr;
>       symvec.aad = &aad_ptr;
> @@ -431,7 +431,7 @@ process_cpu_crypt_auth_op(uint8_t dev_id, struct 
> rte_crypto_op *op)
> 
>       sgl.vec = vec;
>       sgl.num = n;
> -     symvec.sgl = &sgl;
> +     symvec.src_sgl = &sgl;
>       symvec.iv = &iv_ptr;
>       symvec.digest = &digest_ptr;
>       symvec.status = &st;
> diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c 
> b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
> index 886e2a5aaa..5fbb9b79f8 100644
> --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
> +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
> @@ -535,7 +535,7 @@ aesni_gcm_sgl_encrypt(struct aesni_gcm_session *s,
>       processed = 0;
>       for (i = 0; i < vec->num; ++i) {
>               aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
> -                     &vec->sgl[i], vec->iv[i].va,
> +                     &vec->src_sgl[i], vec->iv[i].va,
>                       vec->aad[i].va);
>               vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
>                       gdata_ctx, vec->digest[i].va);
> @@ -554,7 +554,7 @@ aesni_gcm_sgl_decrypt(struct aesni_gcm_session *s,
>       processed = 0;
>       for (i = 0; i < vec->num; ++i) {
>               aesni_gcm_process_gcm_sgl_op(s, gdata_ctx,
> -                     &vec->sgl[i], vec->iv[i].va,
> +                     &vec->src_sgl[i], vec->iv[i].va,
>                       vec->aad[i].va);
>                vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
>                       gdata_ctx, vec->digest[i].va);
> @@ -572,13 +572,13 @@ aesni_gmac_sgl_generate(struct aesni_gcm_session *s,
> 
>       processed = 0;
>       for (i = 0; i < vec->num; ++i) {
> -             if (vec->sgl[i].num != 1) {
> +             if (vec->src_sgl[i].num != 1) {
>                       vec->status[i] = ENOTSUP;
>                       continue;
>               }
> 
>               aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
> -                     &vec->sgl[i], vec->iv[i].va);
> +                     &vec->src_sgl[i], vec->iv[i].va);
>               vec->status[i] = aesni_gcm_sgl_op_finalize_encryption(s,
>                       gdata_ctx, vec->digest[i].va);
>               processed += (vec->status[i] == 0);
> @@ -595,13 +595,13 @@ aesni_gmac_sgl_verify(struct aesni_gcm_session *s,
> 
>       processed = 0;
>       for (i = 0; i < vec->num; ++i) {
> -             if (vec->sgl[i].num != 1) {
> +             if (vec->src_sgl[i].num != 1) {
>                       vec->status[i] = ENOTSUP;
>                       continue;
>               }
> 
>               aesni_gcm_process_gmac_sgl_op(s, gdata_ctx,
> -                     &vec->sgl[i], vec->iv[i].va);
> +                     &vec->src_sgl[i], vec->iv[i].va);
>               vec->status[i] = aesni_gcm_sgl_op_finalize_decryption(s,
>                       gdata_ctx, vec->digest[i].va);
>               processed += (vec->status[i] == 0);
> diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c 
> b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
> index a01c826a3c..1b05099446 100644
> --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
> +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
> @@ -2002,14 +2002,14 @@ aesni_mb_cpu_crypto_process_bulk(struct rte_cryptodev 
> *dev,
>       for (i = 0, j = 0, k = 0; i != vec->num; i++) {
> 
> 
> -             ret = check_crypto_sgl(sofs, vec->sgl + i);
> +             ret = check_crypto_sgl(sofs, vec->src_sgl + i);
>               if (ret != 0) {
>                       vec->status[i] = ret;
>                       continue;
>               }
> 
> -             buf = vec->sgl[i].vec[0].base;
> -             len = vec->sgl[i].vec[0].len;
> +             buf = vec->src_sgl[i].vec[0].base;
> +             len = vec->src_sgl[i].vec[0].len;
> 
>               job = IMB_GET_NEXT_JOB(mb_mgr);
>               if (job == NULL) {
> diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c 
> b/drivers/crypto/qat/qat_sym_hw_dp.c
> index ac9ac05363..4870ebf66a 100644
> --- a/drivers/crypto/qat/qat_sym_hw_dp.c
> +++ b/drivers/crypto/qat/qat_sym_hw_dp.c
> @@ -181,8 +181,9 @@ qat_sym_dp_enqueue_cipher_jobs(void *qp_data, uint8_t 
> *drv_ctx,
>                       (uint8_t *)tx_queue->base_addr + tail);
>               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -             data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -                     vec->sgl[i].num);
> +             data_len = qat_sym_dp_parse_data_vec(qp, req,
> +                     vec->src_sgl[i].vec,
> +                     vec->src_sgl[i].num);
>               if (unlikely(data_len < 0))
>                       break;
>               req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> @@ -302,8 +303,9 @@ qat_sym_dp_enqueue_auth_jobs(void *qp_data, uint8_t 
> *drv_ctx,
>                       (uint8_t *)tx_queue->base_addr + tail);
>               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -             data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -                     vec->sgl[i].num);
> +             data_len = qat_sym_dp_parse_data_vec(qp, req,
> +                     vec->src_sgl[i].vec,
> +                     vec->src_sgl[i].num);
>               if (unlikely(data_len < 0))
>                       break;
>               req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> @@ -484,14 +486,16 @@ qat_sym_dp_enqueue_chain_jobs(void *qp_data, uint8_t 
> *drv_ctx,
>                       (uint8_t *)tx_queue->base_addr + tail);
>               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -             data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -                     vec->sgl[i].num);
> +             data_len = qat_sym_dp_parse_data_vec(qp, req,
> +                     vec->src_sgl[i].vec,
> +                     vec->src_sgl[i].num);
>               if (unlikely(data_len < 0))
>                       break;
>               req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> -             if (unlikely(enqueue_one_chain_job(ctx, req, vec->sgl[i].vec,
> -                     vec->sgl[i].num, &vec->iv[i], &vec->digest[i],
> -                             &vec->auth_iv[i], ofs, (uint32_t)data_len)))
> +             if (unlikely(enqueue_one_chain_job(ctx, req,
> +                     vec->src_sgl[i].vec, vec->src_sgl[i].num,
> +                     &vec->iv[i], &vec->digest[i],
> +                     &vec->auth_iv[i], ofs, (uint32_t)data_len)))
>                       break;
> 
>               tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
> @@ -688,8 +692,9 @@ qat_sym_dp_enqueue_aead_jobs(void *qp_data, uint8_t 
> *drv_ctx,
>                       (uint8_t *)tx_queue->base_addr + tail);
>               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
> 
> -             data_len = qat_sym_dp_parse_data_vec(qp, req, vec->sgl[i].vec,
> -                     vec->sgl[i].num);
> +             data_len = qat_sym_dp_parse_data_vec(qp, req,
> +                     vec->src_sgl[i].vec,
> +                     vec->src_sgl[i].num);
>               if (unlikely(data_len < 0))
>                       break;
>               req->comn_mid.opaque_data = (uint64_t)(uintptr_t)user_data[i];
> diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> index 58c0724743..dcc0bd5933 100644
> --- a/lib/cryptodev/rte_crypto_sym.h
> +++ b/lib/cryptodev/rte_crypto_sym.h
> @@ -69,7 +69,7 @@ struct rte_crypto_sym_vec {
>       /** number of operations to perform */
>       uint32_t num;
>       /** array of SGL vectors */
> -     struct rte_crypto_sgl *sgl;
> +     struct rte_crypto_sgl *src_sgl;
>       /** array of pointers to cipher IV */
>       struct rte_crypto_va_iova_ptr *iv;
>       /** array of pointers to digest */
> diff --git a/lib/ipsec/misc.h b/lib/ipsec/misc.h
> index 79b9a20762..58ff538141 100644
> --- a/lib/ipsec/misc.h
> +++ b/lib/ipsec/misc.h
> @@ -136,7 +136,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
>               /* not enough space in vec[] to hold all segments */
>               if (vcnt < 0) {
>                       /* fill the request structure */
> -                     symvec.sgl = &vecpkt[j];
> +                     symvec.src_sgl = &vecpkt[j];
>                       symvec.iv = &iv[j];
>                       symvec.digest = &dgst[j];
>                       symvec.aad = &aad[j];
> @@ -160,7 +160,7 @@ cpu_crypto_bulk(const struct rte_ipsec_session *ss,
>       }
> 
>       /* fill the request structure */
> -     symvec.sgl = &vecpkt[j];
> +     symvec.src_sgl = &vecpkt[j];
>       symvec.iv = &iv[j];
>       symvec.aad = &aad[j];
>       symvec.digest = &dgst[j];
> --

Acked-by: Konstantin Ananyev <konstantin.anan...@intel.com>

> 2.17.1

Reply via email to