From: Hemant Agrawal <hemant.agra...@nxp.com> Signed-off-by: Gagandeep Singh <g.si...@nxp.com> Signed-off-by: Hemant Agrawal <hemant.agra...@nxp.com> --- drivers/crypto/caam_jr/caam_jr.c | 710 ++++++++++++++++++++++++++++++- 1 file changed, 707 insertions(+), 3 deletions(-)
diff --git a/drivers/crypto/caam_jr/caam_jr.c b/drivers/crypto/caam_jr/caam_jr.c index 6d30c4f4d..d582b2fcb 100644 --- a/drivers/crypto/caam_jr/caam_jr.c +++ b/drivers/crypto/caam_jr/caam_jr.c @@ -21,7 +21,9 @@ /* RTA header files */ #include <hw/desc/common.h> #include <hw/desc/algo.h> +#include <hw/desc/ipsec.h> #include <of.h> + #include <caam_jr_hw_specific.h> #include <caam_jr.h> #include <caam_jr_pvt.h> @@ -103,6 +105,71 @@ static inline int is_cipher_only(struct caam_jr_session *ses) (ses->auth_alg == RTE_CRYPTO_AUTH_NULL)); } +static inline int is_auth_only(struct caam_jr_session *ses) +{ + return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) && + (ses->auth_alg != RTE_CRYPTO_AUTH_NULL)); +} + +static inline int is_aead(struct caam_jr_session *ses) +{ + return ((ses->cipher_alg == 0) && + (ses->auth_alg == 0) && + (ses->aead_alg != 0)); +} + +static inline int is_auth_cipher(struct caam_jr_session *ses) +{ + return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) && + (ses->auth_alg != RTE_CRYPTO_AUTH_NULL)); +} + +static inline int is_encode(struct caam_jr_session *ses) +{ + return ses->dir == DIR_ENC; +} + +static inline int is_decode(struct caam_jr_session *ses) +{ + return ses->dir == DIR_DEC; +} + +static inline void +caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a) +{ + switch (ses->auth_alg) { + case RTE_CRYPTO_AUTH_NULL: + ses->digest_length = 0; + break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + alginfo_a->algtype = OP_ALG_ALGSEL_MD5; + alginfo_a->algmode = OP_ALG_AAI_HMAC; + break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + alginfo_a->algtype = OP_ALG_ALGSEL_SHA1; + alginfo_a->algmode = OP_ALG_AAI_HMAC; + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + alginfo_a->algtype = OP_ALG_ALGSEL_SHA224; + alginfo_a->algmode = OP_ALG_AAI_HMAC; + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + alginfo_a->algtype = OP_ALG_ALGSEL_SHA256; + alginfo_a->algmode = OP_ALG_AAI_HMAC; + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + alginfo_a->algtype = OP_ALG_ALGSEL_SHA384; + alginfo_a->algmode = OP_ALG_AAI_HMAC; + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + alginfo_a->algtype = OP_ALG_ALGSEL_SHA512; + alginfo_a->algmode = OP_ALG_AAI_HMAC; + break; + default: + CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg); + } +} + static inline void caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c) { @@ -126,13 +193,27 @@ caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c) } } +static inline void +caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo) +{ + switch (ses->aead_alg) { + case RTE_CRYPTO_AEAD_AES_GCM: + alginfo->algtype = OP_ALG_ALGSEL_AES; + alginfo->algmode = OP_ALG_AAI_GCM; + break; + default: + CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg); + } +} + /* prepare command block of the session */ static int caam_jr_prep_cdb(struct caam_jr_session *ses) { - struct alginfo alginfo_c = {0}; + struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; int32_t shared_desc_len = 0; struct sec_cdb *cdb; + int err; #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN int swap = false; #else @@ -171,6 +252,108 @@ caam_jr_prep_cdb(struct caam_jr_session *ses) NULL, ses->iv.length, ses->dir); + } else if (is_auth_only(ses)) { + caam_auth_alg(ses, &alginfo_a); + if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { + CAAM_JR_ERR("not supported auth alg"); + rte_free(cdb); + return -ENOTSUP; + } + + alginfo_a.key = (size_t)ses->auth_key.data; + alginfo_a.keylen = ses->auth_key.length; + alginfo_a.key_enc_flags = 0; + alginfo_a.key_type = RTA_DATA_IMM; + + shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true, + swap, &alginfo_a, + !ses->dir, + ses->digest_length); + } else if (is_aead(ses)) { + caam_aead_alg(ses, &alginfo); + if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { + CAAM_JR_ERR("not supported aead alg"); + rte_free(cdb); + return -ENOTSUP; + } + alginfo.key = (size_t)ses->aead_key.data; + alginfo.keylen = ses->aead_key.length; + alginfo.key_enc_flags = 0; + alginfo.key_type = RTA_DATA_IMM; + + if (ses->dir == DIR_ENC) + shared_desc_len = cnstr_shdsc_gcm_encap( + cdb->sh_desc, true, swap, + &alginfo, + ses->iv.length, + ses->digest_length); + else + shared_desc_len = cnstr_shdsc_gcm_decap( + cdb->sh_desc, true, swap, + &alginfo, + ses->iv.length, + ses->digest_length); + } else { + caam_cipher_alg(ses, &alginfo_c); + if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { + CAAM_JR_ERR("not supported cipher alg"); + rte_free(cdb); + return -ENOTSUP; + } + + alginfo_c.key = (size_t)ses->cipher_key.data; + alginfo_c.keylen = ses->cipher_key.length; + alginfo_c.key_enc_flags = 0; + alginfo_c.key_type = RTA_DATA_IMM; + + caam_auth_alg(ses, &alginfo_a); + if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) { + CAAM_JR_ERR("not supported auth alg"); + rte_free(cdb); + return -ENOTSUP; + } + + alginfo_a.key = (size_t)ses->auth_key.data; + alginfo_a.keylen = ses->auth_key.length; + alginfo_a.key_enc_flags = 0; + alginfo_a.key_type = RTA_DATA_IMM; + + cdb->sh_desc[0] = alginfo_c.keylen; + cdb->sh_desc[1] = alginfo_a.keylen; + err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN, + MIN_JOB_DESC_SIZE, + (unsigned int *)cdb->sh_desc, + &cdb->sh_desc[2], 2); + + if (err < 0) { + CAAM_JR_ERR("Crypto: Incorrect key lengths"); + rte_free(cdb); + return err; + } + if (cdb->sh_desc[2] & 1) + alginfo_c.key_type = RTA_DATA_IMM; + else { + alginfo_c.key = (size_t)caam_jr_mem_vtop( + (void *)(size_t)alginfo_c.key); + alginfo_c.key_type = RTA_DATA_PTR; + } + if (cdb->sh_desc[2] & (1<<1)) + alginfo_a.key_type = RTA_DATA_IMM; + else { + alginfo_a.key = (size_t)caam_jr_mem_vtop( + (void *)(size_t)alginfo_a.key); + alginfo_a.key_type = RTA_DATA_PTR; + } + cdb->sh_desc[0] = 0; + cdb->sh_desc[1] = 0; + cdb->sh_desc[2] = 0; + /* Auth_only_len is set as 0 here and it will be + * overwritten in fd for each packet. + */ + shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc, + true, swap, &alginfo_c, &alginfo_a, + ses->iv.length, 0, + ses->digest_length, ses->dir); } if (shared_desc_len < 0) { @@ -422,6 +605,163 @@ caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops, return num_rx; } +/** + * packet looks like: + * |<----data_len------->| + * |ip_header|ah_header|icv|payload| + * ^ + * | + * mbuf->pkt.data + */ +static inline struct caam_jr_op_ctx * +build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct rte_mbuf *mbuf = sym->m_src; + struct caam_jr_op_ctx *ctx; + struct sec4_sg_entry *sg; + int length; + struct sec_cdb *cdb; + uint64_t sdesc_offset; + struct sec_job_descriptor_t *jobdescr; + uint8_t extra_segs; + + if (is_decode(ses)) + extra_segs = 2; + else + extra_segs = 1; + + if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) { + CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d", + MAX_SG_ENTRIES); + return NULL; + } + + ctx = caam_jr_alloc_ctx(ses); + if (!ctx) + return NULL; + + ctx->op = op; + + cdb = ses->cdb; + sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); + + jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; + + SEC_JD_INIT(jobdescr); + SEC_JD_SET_SD(jobdescr, + (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, + cdb->sh_hdr.hi.field.idlen); + + /* output */ + SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr, + 0, ses->digest_length); + + /*input */ + sg = &ctx->sg[0]; + length = sym->auth.data.length; + sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset); + sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset); + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + sg++; + sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); + sg->len = cpu_to_caam32(mbuf->data_len); + mbuf = mbuf->next; + } + + if (is_decode(ses)) { + /* digest verification case */ + sg++; + /* hash result or digest, save digest first */ + rte_memcpy(ctx->digest, sym->auth.digest.data, + ses->digest_length); +#ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG + rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length); +#endif + sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest)); + sg->len = cpu_to_caam32(ses->digest_length); + length += ses->digest_length; + } else { + length -= ses->digest_length; + } + + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + + SEC_JD_SET_IN_PTR(jobdescr, + (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length); + /* enabling sg list */ + (jobdescr)->seq_in.command.word |= 0x01000000; + + return ctx; +} + +static inline struct caam_jr_op_ctx * +build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct caam_jr_op_ctx *ctx; + struct sec4_sg_entry *sg; + rte_iova_t start_addr; + struct sec_cdb *cdb; + uint64_t sdesc_offset; + struct sec_job_descriptor_t *jobdescr; + + ctx = caam_jr_alloc_ctx(ses); + if (!ctx) + return NULL; + + ctx->op = op; + + cdb = ses->cdb; + sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); + + start_addr = rte_pktmbuf_iova(sym->m_src); + + jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; + + SEC_JD_INIT(jobdescr); + SEC_JD_SET_SD(jobdescr, + (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, + cdb->sh_hdr.hi.field.idlen); + + /* output */ + SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr, + 0, ses->digest_length); + + /*input */ + if (is_decode(ses)) { + sg = &ctx->sg[0]; + SEC_JD_SET_IN_PTR(jobdescr, + (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0, + (sym->auth.data.length + ses->digest_length)); + /* enabling sg list */ + (jobdescr)->seq_in.command.word |= 0x01000000; + + /* hash result or digest, save digest first */ + rte_memcpy(ctx->digest, sym->auth.digest.data, + ses->digest_length); + sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset); + sg->len = cpu_to_caam32(sym->auth.data.length); + +#ifdef RTE_LIBRTE_PMD_CAAM_JR_DEBUG + rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length); +#endif + /* let's check digest by hw */ + sg++; + sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest)); + sg->len = cpu_to_caam32(ses->digest_length); + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + } else { + SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr, + sym->auth.data.offset, sym->auth.data.length); + } + return ctx; +} static inline struct caam_jr_op_ctx * build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses) @@ -602,6 +942,269 @@ build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses) return ctx; } +/* For decapsulation: + * Input: + * +----+----------------+--------------------------------+-----+ + * | IV | Auth-only data | Authenticated & Encrypted data | ICV | + * +----+----------------+--------------------------------+-----+ + * Output: + * +----+--------------------------+ + * | Decrypted & authenticated data | + * +----+--------------------------+ + */ + +static inline struct caam_jr_op_ctx * +build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct caam_jr_op_ctx *ctx; + struct sec4_sg_entry *sg, *out_sg, *in_sg; + struct rte_mbuf *mbuf; + uint32_t length = 0; + struct sec_cdb *cdb; + uint64_t sdesc_offset; + uint8_t req_segs; + uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + ses->iv.offset); + struct sec_job_descriptor_t *jobdescr; + uint32_t auth_only_len; + + auth_only_len = op->sym->auth.data.length - + op->sym->cipher.data.length; + + if (sym->m_dst) { + mbuf = sym->m_dst; + req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3; + } else { + mbuf = sym->m_src; + req_segs = mbuf->nb_segs * 2 + 3; + } + + if (req_segs > MAX_SG_ENTRIES) { + CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d", + MAX_SG_ENTRIES); + return NULL; + } + + ctx = caam_jr_alloc_ctx(ses); + if (!ctx) + return NULL; + + ctx->op = op; + cdb = ses->cdb; + sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); + + jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; + + SEC_JD_INIT(jobdescr); + SEC_JD_SET_SD(jobdescr, + (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, + cdb->sh_hdr.hi.field.idlen); + + /* output */ + if (sym->m_dst) + mbuf = sym->m_dst; + else + mbuf = sym->m_src; + + out_sg = &ctx->sg[0]; + if (is_encode(ses)) + length = sym->auth.data.length + ses->digest_length; + else + length = sym->auth.data.length; + + sg = &ctx->sg[0]; + + /* 1st seg */ + sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + + sym->auth.data.offset); + sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset); + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + sg++; + sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); + sg->len = cpu_to_caam32(mbuf->data_len); + mbuf = mbuf->next; + } + + if (is_encode(ses)) { + /* set auth output */ + sg++; + sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr); + sg->len = cpu_to_caam32(ses->digest_length); + } + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + + SEC_JD_SET_OUT_PTR(jobdescr, + (uint64_t)caam_jr_dma_vtop(out_sg), 0, length); + /* set sg bit */ + (jobdescr)->seq_out.command.word |= 0x01000000; + + /* input */ + sg++; + mbuf = sym->m_src; + in_sg = sg; + if (is_encode(ses)) + length = ses->iv.length + sym->auth.data.length; + else + length = ses->iv.length + sym->auth.data.length + + ses->digest_length; + + sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); + sg->len = cpu_to_caam32(ses->iv.length); + + sg++; + /* 1st seg */ + sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + + sym->auth.data.offset); + sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset); + + /* Successive segs */ + mbuf = mbuf->next; + while (mbuf) { + sg++; + sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)); + sg->len = cpu_to_caam32(mbuf->data_len); + mbuf = mbuf->next; + } + + if (is_decode(ses)) { + sg++; + rte_memcpy(ctx->digest, sym->auth.digest.data, + ses->digest_length); + sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest)); + sg->len = cpu_to_caam32(ses->digest_length); + } + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + + SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0, + length); + /* set sg bit */ + (jobdescr)->seq_in.command.word |= 0x01000000; + /* Auth_only_len is set as 0 in descriptor and it is + * overwritten here in the jd which will update + * the DPOVRD reg. + */ + if (auth_only_len) + /* set sg bit */ + (jobdescr)->dpovrd = 0x80000000 | auth_only_len; + + return ctx; +} + +static inline struct caam_jr_op_ctx * +build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses) +{ + struct rte_crypto_sym_op *sym = op->sym; + struct caam_jr_op_ctx *ctx; + struct sec4_sg_entry *sg; + rte_iova_t src_start_addr, dst_start_addr; + uint32_t length = 0; + struct sec_cdb *cdb; + uint64_t sdesc_offset; + uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + ses->iv.offset); + struct sec_job_descriptor_t *jobdescr; + uint32_t auth_only_len; + + auth_only_len = op->sym->auth.data.length - + op->sym->cipher.data.length; + + src_start_addr = rte_pktmbuf_iova(sym->m_src); + if (sym->m_dst) + dst_start_addr = rte_pktmbuf_iova(sym->m_dst); + else + dst_start_addr = src_start_addr; + + ctx = caam_jr_alloc_ctx(ses); + if (!ctx) + return NULL; + + ctx->op = op; + cdb = ses->cdb; + sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb); + + jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc; + + SEC_JD_INIT(jobdescr); + SEC_JD_SET_SD(jobdescr, + (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset, + cdb->sh_hdr.hi.field.idlen); + + /* input */ + sg = &ctx->sg[0]; + if (is_encode(ses)) { + sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); + sg->len = cpu_to_caam32(ses->iv.length); + length += ses->iv.length; + + sg++; + sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset); + sg->len = cpu_to_caam32(sym->auth.data.length); + length += sym->auth.data.length; + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + } else { + sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr)); + sg->len = cpu_to_caam32(ses->iv.length); + length += ses->iv.length; + + sg++; + sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset); + sg->len = cpu_to_caam32(sym->auth.data.length); + length += sym->auth.data.length; + + rte_memcpy(ctx->digest, sym->auth.digest.data, + ses->digest_length); + sg++; + sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest)); + sg->len = cpu_to_caam32(ses->digest_length); + length += ses->digest_length; + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + } + + SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0, + length); + /* set sg bit */ + (jobdescr)->seq_in.command.word |= 0x01000000; + + /* output */ + sg = &ctx->sg[6]; + + sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset); + sg->len = cpu_to_caam32(sym->cipher.data.length); + length = sym->cipher.data.length; + + if (is_encode(ses)) { + /* set auth output */ + sg++; + sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr); + sg->len = cpu_to_caam32(ses->digest_length); + length += ses->digest_length; + } + /* last element*/ + sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN); + + SEC_JD_SET_OUT_PTR(jobdescr, + (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length); + /* set sg bit */ + (jobdescr)->seq_out.command.word |= 0x01000000; + + /* Auth_only_len is set as 0 in descriptor and it is + * overwritten here in the jd which will update + * the DPOVRD reg. + */ + if (auth_only_len) + /* set sg bit */ + (jobdescr)->dpovrd = 0x80000000 | auth_only_len; + + return ctx; +} static int caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp) { @@ -629,12 +1232,25 @@ caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp) } if (rte_pktmbuf_is_contiguous(op->sym->m_src)) { - if (is_cipher_only(ses)) + if (is_auth_cipher(ses)) + ctx = build_cipher_auth(op, ses); + else if (is_aead(ses)) + goto err1; + else if (is_auth_only(ses)) + ctx = build_auth_only(op, ses); + else if (is_cipher_only(ses)) ctx = build_cipher_only(op, ses); } else { - if (is_cipher_only(ses)) + if (is_auth_cipher(ses)) + ctx = build_cipher_auth_sg(op, ses); + else if (is_aead(ses)) + goto err1; + else if (is_auth_only(ses)) + ctx = build_auth_only_sg(op, ses); + else if (is_cipher_only(ses)) ctx = build_cipher_only_sg(op, ses); } +err1: if (unlikely(!ctx)) { qp->tx_errs++; CAAM_JR_ERR("not supported sec op"); @@ -817,6 +1433,54 @@ caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused, return 0; } +static int +caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_sym_xform *xform, + struct caam_jr_session *session) +{ + session->auth_alg = xform->auth.algo; + session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, + RTE_CACHE_LINE_SIZE); + if (session->auth_key.data == NULL && xform->auth.key.length > 0) { + CAAM_JR_ERR("No Memory for auth key\n"); + return -ENOMEM; + } + session->auth_key.length = xform->auth.key.length; + session->digest_length = xform->auth.digest_length; + + memcpy(session->auth_key.data, xform->auth.key.data, + xform->auth.key.length); + session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? + DIR_ENC : DIR_DEC; + + return 0; +} + +static int +caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_sym_xform *xform, + struct caam_jr_session *session) +{ + session->aead_alg = xform->aead.algo; + session->iv.length = xform->aead.iv.length; + session->iv.offset = xform->aead.iv.offset; + session->auth_only_len = xform->aead.aad_length; + session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, + RTE_CACHE_LINE_SIZE); + if (session->aead_key.data == NULL && xform->aead.key.length > 0) { + CAAM_JR_ERR("No Memory for aead key\n"); + return -ENOMEM; + } + session->aead_key.length = xform->aead.key.length; + session->digest_length = xform->aead.digest_length; + + memcpy(session->aead_key.data, xform->aead.key.data, + xform->aead.key.length); + session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? + DIR_ENC : DIR_DEC; + + return 0; +} static int caam_jr_set_session_parameters(struct rte_cryptodev *dev, @@ -840,6 +1504,39 @@ caam_jr_set_session_parameters(struct rte_cryptodev *dev, session->auth_alg = RTE_CRYPTO_AUTH_NULL; caam_jr_cipher_init(dev, xform, session); + /* Authentication Only */ + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next == NULL) { + session->cipher_alg = RTE_CRYPTO_CIPHER_NULL; + caam_jr_auth_init(dev, xform, session); + + /* Cipher then Authenticate */ + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + caam_jr_cipher_init(dev, xform, session); + caam_jr_auth_init(dev, xform->next, session); + } else { + CAAM_JR_ERR("Not supported: Auth then Cipher"); + goto err1; + } + + /* Authenticate then Cipher */ + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) { + caam_jr_auth_init(dev, xform, session); + caam_jr_cipher_init(dev, xform->next, session); + } else { + CAAM_JR_ERR("Not supported: Auth then Cipher"); + goto err1; + } + + /* AEAD operation for AES-GCM kind of Algorithms */ + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && + xform->next == NULL) { + caam_jr_aead_init(dev, xform, session); + } else { CAAM_JR_ERR("Invalid crypto type"); return -EINVAL; @@ -847,6 +1544,13 @@ caam_jr_set_session_parameters(struct rte_cryptodev *dev, session->ctx_pool = internals->ctx_pool; return 0; + +err1: + rte_free(session->cipher_key.data); + rte_free(session->auth_key.data); + memset(session, 0, sizeof(struct caam_jr_session)); + + return -EINVAL; } static int -- 2.17.1