This patch fix the cipher & auth offset and length values when convert mbuf to vector chain for QAT build op.
Fixes: a815a04cea05 ("crypto/qat: support symmetric build op request") Signed-off-by: Kai Ji <kai...@intel.com> --- drivers/crypto/qat/dev/qat_crypto_pmd_gens.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h index 50a9c5ad5b..9e01755fd9 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gens.h @@ -330,7 +330,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, } n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, auth_ofs, - auth_ofs + auth_len, in_sgl->vec, + auth_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); if (n_src < 0 || n_src > op->sym->m_src->nb_segs) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -343,7 +343,7 @@ qat_sym_convert_op_to_vec_auth(struct rte_crypto_op *op, if (unlikely((op->sym->m_dst != NULL) && (op->sym->m_dst != op->sym->m_src))) { int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, auth_ofs, - auth_ofs + auth_len, out_sgl->vec, + auth_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); if ((n_dst < 0) || (n_dst > op->sym->m_dst->nb_segs)) { @@ -369,7 +369,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, struct rte_crypto_va_iova_ptr *digest) { union rte_crypto_sym_ofs ofs; - uint32_t min_ofs = 0, max_len = 0; + uint32_t max_len = 0; uint32_t cipher_len = 0, cipher_ofs = 0; uint32_t auth_len = 0, auth_ofs = 0; int is_oop = (op->sym->m_dst != NULL) && @@ -395,12 +395,12 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, ret = qat_cipher_is_len_in_bits(ctx, op); switch (ret) { case 1: - cipher_len = op->sym->aead.data.length >> 3; - cipher_ofs = op->sym->aead.data.offset >> 3; + cipher_len = op->sym->cipher.data.length >> 3; + cipher_ofs = op->sym->cipher.data.offset >> 3; break; case 0: - cipher_len = op->sym->aead.data.length; - cipher_ofs = op->sym->aead.data.offset; + cipher_len = op->sym->cipher.data.length; + cipher_ofs = op->sym->cipher.data.offset; break; default: QAT_DP_LOG(ERR, @@ -426,7 +426,6 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, return -EINVAL; } - min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; max_len = RTE_MAX(cipher_ofs + cipher_len, auth_ofs + auth_len); /* digest in buffer check. Needed only for wireless algos */ @@ -463,7 +462,8 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, ctx->digest_length); } - n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, min_ofs, max_len, + /* Passing 0 as cipher & auth offsets are assigned into ofs later */ + n_src = rte_crypto_mbuf_to_vec(op->sym->m_src, 0, max_len, in_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); if (unlikely(n_src < 0 || n_src > op->sym->m_src->nb_segs)) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -473,7 +473,7 @@ qat_sym_convert_op_to_vec_chain(struct rte_crypto_op *op, if (unlikely((op->sym->m_dst != NULL) && (op->sym->m_dst != op->sym->m_src))) { - int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, min_ofs, + int n_dst = rte_crypto_mbuf_to_vec(op->sym->m_dst, 0, max_len, out_sgl->vec, QAT_SYM_SGL_MAX_NUMBER); if (n_dst < 0 || n_dst > op->sym->m_dst->nb_segs) { -- 2.17.1