test_mixed_auth_cipher: ensure enough space allocate in ibuf & obuf
for mbuf to vec conversion
test_kasumi_decryption: cipher length update.
qat/dev: support sgl oop operation

Fixes: 681f540da52b ("cryptodev: do not use AAD in wireless algorithms")
Cc: pablo.de.lara.gua...@intel.com

Fixes: e847fc512817 ("test/crypto: add encrypted digest case for AES-CTR-CMAC")
Cc: adamx.dybkow...@intel.com

Signed-off-by: Kai Ji <kai...@intel.com>
---
 app/test/test_cryptodev.c                    | 58 ++++++++++++++++----
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++--
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c    | 55 ++++++++++++++++---
 4 files changed, 129 insertions(+), 26 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 814a0b401d..bfc6408eda 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -179,6 +179,10 @@ post_process_raw_dp_op(void *user_data,    uint32_t index 
__rte_unused,
                        RTE_CRYPTO_OP_STATUS_ERROR;
 }
 
+static struct crypto_testsuite_params testsuite_params = { NULL };
+struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
+static struct crypto_unittest_params unittest_params;
+
 void
 process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
                struct rte_crypto_op *op, uint8_t is_cipher, uint8_t is_auth,
@@ -193,6 +197,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
        struct rte_crypto_sgl sgl, dest_sgl;
        uint32_t max_len;
        union rte_cryptodev_session_ctx sess;
+       uint64_t auth_end_iova;
        uint32_t count = 0;
        struct rte_crypto_raw_dp_ctx *ctx;
        uint32_t cipher_offset = 0, cipher_len = 0, auth_offset = 0,
@@ -202,6 +207,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
        int ctx_service_size;
        int32_t status = 0;
        int enqueue_status, dequeue_status;
+       struct crypto_unittest_params *ut_params = &unittest_params;
+       int is_sgl = sop->m_src->nb_segs > 1;
+       int is_oop = 0;
 
        ctx_service_size = rte_cryptodev_get_raw_dp_ctx_size(dev_id);
        if (ctx_service_size < 0) {
@@ -240,6 +248,9 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
        ofs.raw = 0;
 
+       if ((sop->m_dst != NULL) && (sop->m_dst != sop->m_src))
+               is_oop = 1;
+
        if (is_cipher && is_auth) {
                cipher_offset = sop->cipher.data.offset;
                cipher_len = sop->cipher.data.length;
@@ -267,6 +278,31 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
                digest.va = (void *)sop->auth.digest.data;
                digest.iova = sop->auth.digest.phys_addr;
 
+               if (is_sgl) {
+                       uint32_t remaining_off = auth_offset + auth_len;
+                       struct rte_mbuf *sgl_buf = sop->m_src;
+                       if (is_oop)
+                               sgl_buf = sop->m_dst;
+
+                       while (remaining_off >= rte_pktmbuf_data_len(sgl_buf)
+                                       && sgl_buf->next != NULL) {
+                               remaining_off -= rte_pktmbuf_data_len(sgl_buf);
+                               sgl_buf = sgl_buf->next;
+                       }
+
+                       auth_end_iova = (uint64_t)rte_pktmbuf_iova_offset(
+                               sgl_buf, remaining_off);
+               } else {
+                       auth_end_iova = rte_pktmbuf_iova(op->sym->m_src) +
+                                                        auth_offset + auth_len;
+               }
+               /* Then check if digest-encrypted conditions are met */
+               if ((auth_offset + auth_len < cipher_offset + cipher_len) &&
+                               (digest.iova == auth_end_iova) && is_sgl)
+                       max_len = RTE_MAX(max_len,
+                               auth_offset + auth_len +
+                               ut_params->auth_xform.auth.digest_length);
+
        } else if (is_cipher) {
                cipher_offset = sop->cipher.data.offset;
                cipher_len = sop->cipher.data.length;
@@ -327,7 +363,7 @@ process_sym_raw_dp_op(uint8_t dev_id, uint16_t qp_id,
 
        sgl.num = n;
        /* Out of place */
-       if (sop->m_dst != NULL) {
+       if (is_oop) {
                dest_sgl.vec = dest_data_vec;
                vec.dest_sgl = &dest_sgl;
                n = rte_crypto_mbuf_to_vec(sop->m_dst, 0, max_len,
@@ -503,10 +539,6 @@ process_crypto_request(uint8_t dev_id, struct 
rte_crypto_op *op)
        return op;
 }
 
-static struct crypto_testsuite_params testsuite_params = { NULL };
-struct crypto_testsuite_params *p_testsuite_params = &testsuite_params;
-static struct crypto_unittest_params unittest_params;
-
 static int
 testsuite_setup(void)
 {
@@ -4077,9 +4109,9 @@ test_kasumi_decryption(const struct kasumi_test_data 
*tdata)
 
        /* Create KASUMI operation */
        retval = create_wireless_algo_cipher_operation(tdata->cipher_iv.data,
-                                       tdata->cipher_iv.len,
-                                       tdata->ciphertext.len,
-                                       tdata->validCipherOffsetInBits.len);
+                       tdata->cipher_iv.len,
+                       RTE_ALIGN_CEIL(tdata->validCipherLenInBits.len, 8),
+                       tdata->validCipherOffsetInBits.len);
        if (retval < 0)
                return retval;
 
@@ -7310,6 +7342,7 @@ test_mixed_auth_cipher(const struct 
mixed_cipher_auth_test_data *tdata,
        unsigned int plaintext_len;
        unsigned int ciphertext_pad_len;
        unsigned int ciphertext_len;
+       unsigned int data_len;
 
        struct rte_cryptodev_info dev_info;
        struct rte_crypto_op *op;
@@ -7370,21 +7403,22 @@ test_mixed_auth_cipher(const struct 
mixed_cipher_auth_test_data *tdata,
        plaintext_len = ceil_byte_length(tdata->plaintext.len_bits);
        ciphertext_pad_len = RTE_ALIGN_CEIL(ciphertext_len, 16);
        plaintext_pad_len = RTE_ALIGN_CEIL(plaintext_len, 16);
+       data_len = RTE_MAX(ciphertext_pad_len, plaintext_pad_len);
 
        if (verify) {
                ciphertext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-                               ciphertext_pad_len);
+                               data_len);
                memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len);
                if (op_mode == OUT_OF_PLACE)
-                       rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len);
+                       rte_pktmbuf_append(ut_params->obuf, data_len);
                debug_hexdump(stdout, "ciphertext:", ciphertext,
                                ciphertext_len);
        } else {
                plaintext = (uint8_t *)rte_pktmbuf_append(ut_params->ibuf,
-                               plaintext_pad_len);
+                               data_len);
                memcpy(plaintext, tdata->plaintext.data, plaintext_len);
                if (op_mode == OUT_OF_PLACE)
-                       rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len);
+                       rte_pktmbuf_append(ut_params->obuf, data_len);
                debug_hexdump(stdout, "plaintext:", plaintext, plaintext_len);
        }
 
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c 
b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
index 6494019050..c59c25fe8f 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c
@@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-                       vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
+                               vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
 
@@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-                       vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
+                               vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
                enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i],
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c 
b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
index 108e07ee7f..1b6cf10589 100644
--- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
+++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c
@@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-                       vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
+                               vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
 
diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c 
b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
index abd36c7f1c..9894757657 100644
--- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
+++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c
@@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i],
-                               cookie, vec->src_sgl[i].vec,
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
                                vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
                enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs,
@@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-                       vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
+                               vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
                enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i],
@@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-                       vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
+                               vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
 
@@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t 
*drv_ctx,
                        (uint8_t *)tx_queue->base_addr + tail);
                rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
 
-               data_len = qat_sym_build_req_set_data(req, user_data[i], cookie,
-                       vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0);
+               if (vec->dest_sgl) {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec, vec->src_sgl[i].num,
+                               vec->dest_sgl[i].vec, vec->dest_sgl[i].num);
+               } else {
+                       data_len = qat_sym_build_req_set_data(req,
+                               user_data[i], cookie,
+                               vec->src_sgl[i].vec,
+                               vec->src_sgl[i].num, NULL, 0);
+               }
+
                if (unlikely(data_len < 0))
                        break;
 
-- 
2.17.1

Reply via email to