This patch add-in sgl oop support in qat driver Signed-off-by: Kai Ji <kai...@intel.com> --- drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 28 ++++++++-- drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c | 14 ++++- drivers/crypto/qat/dev/qat_sym_pmd_gen1.c | 55 +++++++++++++++++--- 3 files changed, 83 insertions(+), 14 deletions(-)
diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c index 6494019050..c59c25fe8f 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c @@ -467,8 +467,18 @@ qat_sym_dp_enqueue_aead_jobs_gen3(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; @@ -564,8 +574,18 @@ qat_sym_dp_enqueue_auth_jobs_gen3(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; enqueue_one_auth_job_gen3(ctx, cookie, req, &vec->digest[i], diff --git a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c index 108e07ee7f..1b6cf10589 100644 --- a/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c +++ b/drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c @@ -295,8 +295,18 @@ qat_sym_dp_enqueue_aead_jobs_gen4(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; diff --git a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c index abd36c7f1c..9894757657 100644 --- a/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c +++ b/drivers/crypto/qat/dev/qat_sym_pmd_gen1.c @@ -529,9 +529,18 @@ qat_sym_dp_enqueue_cipher_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], - cookie, vec->src_sgl[i].vec, + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; enqueue_one_cipher_job_gen1(ctx, req, &vec->iv[i], ofs, @@ -628,8 +637,18 @@ qat_sym_dp_enqueue_auth_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; enqueue_one_auth_job_gen1(ctx, req, &vec->digest[i], @@ -728,8 +747,18 @@ qat_sym_dp_enqueue_chain_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; @@ -833,8 +862,18 @@ qat_sym_dp_enqueue_aead_jobs_gen1(void *qp_data, uint8_t *drv_ctx, (uint8_t *)tx_queue->base_addr + tail); rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req)); - data_len = qat_sym_build_req_set_data(req, user_data[i], cookie, - vec->src_sgl[i].vec, vec->src_sgl[i].num, NULL, 0); + if (vec->dest_sgl) { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, vec->src_sgl[i].num, + vec->dest_sgl[i].vec, vec->dest_sgl[i].num); + } else { + data_len = qat_sym_build_req_set_data(req, + user_data[i], cookie, + vec->src_sgl[i].vec, + vec->src_sgl[i].num, NULL, 0); + } + if (unlikely(data_len < 0)) break; -- 2.17.1