This patch updates the ipsec library to handle the newly introduced
RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO action.

Signed-off-by: Fan Zhang <roy.fan.zh...@intel.com>
---
 lib/librte_ipsec/crypto.h   |  24 +++
 lib/librte_ipsec/esp_inb.c  | 200 ++++++++++++++++++++++--
 lib/librte_ipsec/esp_outb.c | 369 +++++++++++++++++++++++++++++++++++++++++---
 lib/librte_ipsec/sa.c       |  53 ++++++-
 lib/librte_ipsec/sa.h       |  29 ++++
 lib/librte_ipsec/ses.c      |   4 +-
 6 files changed, 643 insertions(+), 36 deletions(-)

diff --git a/lib/librte_ipsec/crypto.h b/lib/librte_ipsec/crypto.h
index f8fbf8d4f..901c8c7de 100644
--- a/lib/librte_ipsec/crypto.h
+++ b/lib/librte_ipsec/crypto.h
@@ -179,4 +179,28 @@ lksd_none_cop_prepare(struct rte_crypto_op *cop,
        __rte_crypto_sym_op_attach_sym_session(sop, cs);
 }
 
+typedef void* (*_set_icv_f)(void *val, struct rte_mbuf *ml, uint32_t icv_off);
+
+static inline void *
+set_icv_va_pa(void *val, struct rte_mbuf *ml, uint32_t icv_off)
+{
+       union sym_op_data *icv = val;
+
+       icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_off);
+       icv->pa = rte_pktmbuf_iova_offset(ml, icv_off);
+
+       return icv->va;
+}
+
+static inline void *
+set_icv_va(__rte_unused void *val, __rte_unused struct rte_mbuf *ml,
+               __rte_unused uint32_t icv_off)
+{
+       void **icv_va = val;
+
+       *icv_va = rte_pktmbuf_mtod_offset(ml, void *, icv_off);
+
+       return *icv_va;
+}
+
 #endif /* _CRYPTO_H_ */
diff --git a/lib/librte_ipsec/esp_inb.c b/lib/librte_ipsec/esp_inb.c
index 8e3ecbc64..c4476e819 100644
--- a/lib/librte_ipsec/esp_inb.c
+++ b/lib/librte_ipsec/esp_inb.c
@@ -105,6 +105,78 @@ inb_cop_prepare(struct rte_crypto_op *cop,
        }
 }
 
+static inline int
+inb_cpu_crypto_proc_prepare(const struct rte_ipsec_sa *sa, struct rte_mbuf *mb,
+       uint32_t pofs, uint32_t plen,
+       struct rte_security_vec *buf, struct iovec *cur_vec,
+       void *iv)
+{
+       struct rte_mbuf *ms;
+       struct iovec *vec = cur_vec;
+       struct aead_gcm_iv *gcm;
+       struct aesctr_cnt_blk *ctr;
+       uint64_t *ivp;
+       uint32_t algo;
+       uint32_t left;
+       uint32_t off = 0, n_seg = 0;
+
+       ivp = rte_pktmbuf_mtod_offset(mb, uint64_t *,
+               pofs + sizeof(struct rte_esp_hdr));
+       algo = sa->algo_type;
+
+       switch (algo) {
+       case ALGO_TYPE_AES_GCM:
+               gcm = (struct aead_gcm_iv *)iv;
+               aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+               off = sa->ctp.cipher.offset + pofs;
+               left = plen - sa->ctp.cipher.length;
+               break;
+       case ALGO_TYPE_AES_CBC:
+       case ALGO_TYPE_3DES_CBC:
+               copy_iv(iv, ivp, sa->iv_len);
+               off = sa->ctp.auth.offset + pofs;
+               left = plen - sa->ctp.auth.length;
+               break;
+       case ALGO_TYPE_AES_CTR:
+               copy_iv(iv, ivp, sa->iv_len);
+               off = sa->ctp.auth.offset + pofs;
+               left = plen - sa->ctp.auth.length;
+               ctr = (struct aesctr_cnt_blk *)iv;
+               aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+               break;
+       case ALGO_TYPE_NULL:
+               left = plen - sa->ctp.cipher.length;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ms = mbuf_get_seg_ofs(mb, &off);
+       if (!ms)
+               return -1;
+
+       while (n_seg < RTE_LIBRTE_IP_FRAG_MAX_FRAG && left && ms) {
+               uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+               vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+               vec->iov_len = len;
+
+               left -= len;
+               vec++;
+               n_seg++;
+               ms = ms->next;
+               off = 0;
+       }
+
+       if (left)
+               return -1;
+
+       buf->vec = cur_vec;
+       buf->num = n_seg;
+
+       return n_seg;
+}
+
 /*
  * Helper function for prepare() to deal with situation when
  * ICV is spread by two segments. Tries to move ICV completely into the
@@ -139,20 +211,21 @@ move_icv(struct rte_mbuf *ml, uint32_t ofs)
  */
 static inline void
 inb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
-       const union sym_op_data *icv)
+       uint8_t *icv_va, void *aad_buf, uint32_t aad_off)
 {
        struct aead_gcm_aad *aad;
 
        /* insert SQN.hi between ESP trailer and ICV */
        if (sa->sqh_len != 0)
-               insert_sqh(sqn_hi32(sqc), icv->va, sa->icv_len);
+               insert_sqh(sqn_hi32(sqc), icv_va, sa->icv_len);
 
        /*
         * fill AAD fields, if any (aad fields are placed after icv),
         * right now we support only one AEAD algorithm: AES-GCM.
         */
        if (sa->aad_len != 0) {
-               aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+               aad = aad_buf ? aad_buf :
+                               (struct aead_gcm_aad *)(icv_va + aad_off);
                aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
        }
 }
@@ -162,13 +235,15 @@ inb_pkt_xprepare(const struct rte_ipsec_sa *sa, 
rte_be64_t sqc,
  */
 static inline int32_t
 inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct replay_sqn *rsn,
-       struct rte_mbuf *mb, uint32_t hlen, union sym_op_data *icv)
+       struct rte_mbuf *mb, uint32_t hlen, _set_icv_f set_icv, void *icv_val,
+       void *aad_buf)
 {
        int32_t rc;
        uint64_t sqn;
        uint32_t clen, icv_len, icv_ofs, plen;
        struct rte_mbuf *ml;
        struct rte_esp_hdr *esph;
+       void *icv_va;
 
        esph = rte_pktmbuf_mtod_offset(mb, struct rte_esp_hdr *, hlen);
 
@@ -226,8 +301,8 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct 
replay_sqn *rsn,
        if (sa->aad_len + sa->sqh_len > rte_pktmbuf_tailroom(ml))
                return -ENOSPC;
 
-       icv->va = rte_pktmbuf_mtod_offset(ml, void *, icv_ofs);
-       icv->pa = rte_pktmbuf_iova_offset(ml, icv_ofs);
+       icv_va = set_icv(icv_val, ml, icv_ofs);
+       inb_pkt_xprepare(sa, sqn, icv_va, aad_buf, sa->icv_len);
 
        /*
         * if esn is used then high-order 32 bits are also used in ICV
@@ -238,7 +313,6 @@ inb_pkt_prepare(const struct rte_ipsec_sa *sa, const struct 
replay_sqn *rsn,
        mb->pkt_len += sa->sqh_len;
        ml->data_len += sa->sqh_len;
 
-       inb_pkt_xprepare(sa, sqn, icv);
        return plen;
 }
 
@@ -265,7 +339,8 @@ esp_inb_pkt_prepare(const struct rte_ipsec_session *ss, 
struct rte_mbuf *mb[],
        for (i = 0; i != num; i++) {
 
                hl = mb[i]->l2_len + mb[i]->l3_len;
-               rc = inb_pkt_prepare(sa, rsn, mb[i], hl, &icv);
+               rc = inb_pkt_prepare(sa, rsn, mb[i], hl, set_icv_va_pa,
+                               (void *)&icv, NULL);
                if (rc >= 0) {
                        lksd_none_cop_prepare(cop[k], cs, mb[i]);
                        inb_cop_prepare(cop[k], sa, mb[i], &icv, hl, rc);
@@ -512,7 +587,6 @@ tun_process(const struct rte_ipsec_sa *sa, struct rte_mbuf 
*mb[],
        return k;
 }
 
-
 /*
  * *process* function for tunnel packets
  */
@@ -625,6 +699,114 @@ esp_inb_pkt_process(struct rte_ipsec_sa *sa, struct 
rte_mbuf *mb[],
        return n;
 }
 
+/*
+ * process packets using sync crypto engine
+ */
+static uint16_t
+esp_inb_cpu_crypto_pkt_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num,
+               esp_inb_process_t process)
+{
+       int32_t rc;
+       uint32_t i, hl, n, p;
+       struct rte_ipsec_sa *sa;
+       struct replay_sqn *rsn;
+       void *icv_va;
+       uint32_t sqn[num];
+       uint32_t dr[num];
+       uint8_t sqh_len;
+
+       /* cpu crypto specific variables */
+       struct rte_security_vec buf[num];
+       struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+       uint32_t vec_idx = 0;
+       uint64_t iv_buf[num][IPSEC_MAX_IV_QWORD];
+       void *iv[num];
+       int status[num];
+       uint8_t *aad_buf[num][sizeof(struct aead_gcm_aad)];
+       void *aad[num];
+       void *digest[num];
+       uint32_t k;
+
+       sa = ss->sa;
+       rsn = rsn_acquire(sa);
+       sqh_len = sa->sqh_len;
+
+       k = 0;
+       for (i = 0; i != num; i++) {
+               hl = mb[i]->l2_len + mb[i]->l3_len;
+               rc = inb_pkt_prepare(sa, rsn, mb[i], hl, set_icv_va,
+                               (void *)&icv_va, (void *)aad_buf[k]);
+               if (rc >= 0) {
+                       iv[k] = (void *)iv_buf[k];
+                       aad[k] = (void *)aad_buf[k];
+                       digest[k] = (void *)icv_va;
+
+                       rc = inb_cpu_crypto_proc_prepare(sa, mb[i], hl,
+                                       rc, &buf[k], &vec[vec_idx], iv[k]);
+                       if (rc < 0) {
+                               dr[i - k] = i;
+                               continue;
+                       }
+
+                       vec_idx += rc;
+                       k++;
+               } else
+                       dr[i - k] = i;
+       }
+
+       /* copy not prepared mbufs beyond good ones */
+       if (k != num) {
+               rte_errno = EBADMSG;
+
+               if (unlikely(k == 0))
+                       return 0;
+
+               move_bad_mbufs(mb, dr, num, num - k);
+       }
+
+       /* process the packets */
+       n = 0;
+       rc = rte_security_process_cpu_crypto_bulk(ss->security.ctx,
+                       ss->security.ses, buf, iv, aad, digest, status, k);
+       /* move failed process packets to dr */
+       for (i = 0; i < k; i++) {
+               if (status[i]) {
+                       dr[n++] = i;
+                       rte_errno = EBADMSG;
+               }
+       }
+
+       /* move bad packets to the back */
+       if (n)
+               move_bad_mbufs(mb, dr, k, n);
+
+       /* process packets */
+       p = process(sa, mb, sqn, dr, k - n, sqh_len);
+
+       if (p != k - n && p != 0)
+               move_bad_mbufs(mb, dr, k - n, k - n - p);
+
+       if (p != num)
+               rte_errno = EBADMSG;
+
+       return p;
+}
+
+uint16_t
+esp_inb_tun_cpu_crypto_pkt_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       return esp_inb_cpu_crypto_pkt_process(ss, mb, num, tun_process);
+}
+
+uint16_t
+esp_inb_trs_cpu_crypto_pkt_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       return esp_inb_cpu_crypto_pkt_process(ss, mb, num, trs_process);
+}
+
 /*
  * process group of ESP inbound tunnel packets.
  */
diff --git a/lib/librte_ipsec/esp_outb.c b/lib/librte_ipsec/esp_outb.c
index 55799a867..ecfc4cd3f 100644
--- a/lib/librte_ipsec/esp_outb.c
+++ b/lib/librte_ipsec/esp_outb.c
@@ -104,7 +104,7 @@ outb_cop_prepare(struct rte_crypto_op *cop,
 static inline int32_t
 outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
        const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
-       union sym_op_data *icv, uint8_t sqh_len)
+       _set_icv_f set_icv, void *icv_val, uint8_t sqh_len)
 {
        uint32_t clen, hlen, l2len, pdlen, pdofs, plen, tlen;
        struct rte_mbuf *ml;
@@ -177,8 +177,8 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t 
sqc,
        espt->pad_len = pdlen;
        espt->next_proto = sa->proto;
 
-       icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
-       icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+       /* set icv va/pa value(s) */
+       set_icv(icv_val, ml, pdofs);
 
        return clen;
 }
@@ -189,14 +189,14 @@ outb_tun_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t 
sqc,
  */
 static inline void
 outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t sqc,
-       const union sym_op_data *icv)
+       uint8_t *icv_va, void *aad_buf)
 {
        uint32_t *psqh;
        struct aead_gcm_aad *aad;
 
        /* insert SQN.hi between ESP trailer and ICV */
        if (sa->sqh_len != 0) {
-               psqh = (uint32_t *)(icv->va - sa->sqh_len);
+               psqh = (uint32_t *)(icv_va - sa->sqh_len);
                psqh[0] = sqn_hi32(sqc);
        }
 
@@ -205,7 +205,7 @@ outb_pkt_xprepare(const struct rte_ipsec_sa *sa, rte_be64_t 
sqc,
         * right now we support only one AEAD algorithm: AES-GCM .
         */
        if (sa->aad_len != 0) {
-               aad = (struct aead_gcm_aad *)(icv->va + sa->icv_len);
+               aad = aad_buf;
                aead_gcm_aad_fill(aad, sa->spi, sqc, IS_ESN(sa));
        }
 }
@@ -242,11 +242,12 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, 
struct rte_mbuf *mb[],
                gen_iv(iv, sqc);
 
                /* try to update the packet itself */
-               rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv,
-                                         sa->sqh_len);
+               rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], set_icv_va_pa,
+                               (void *)&icv, sa->sqh_len);
                /* success, setup crypto op */
                if (rc >= 0) {
-                       outb_pkt_xprepare(sa, sqc, &icv);
+                       outb_pkt_xprepare(sa, sqc, icv.va,
+                                       (void *)(icv.va + sa->icv_len));
                        lksd_none_cop_prepare(cop[k], cs, mb[i]);
                        outb_cop_prepare(cop[k], sa, iv, &icv, 0, rc);
                        k++;
@@ -270,7 +271,7 @@ esp_outb_tun_prepare(const struct rte_ipsec_session *ss, 
struct rte_mbuf *mb[],
 static inline int32_t
 outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t sqc,
        const uint64_t ivp[IPSEC_MAX_IV_QWORD], struct rte_mbuf *mb,
-       uint32_t l2len, uint32_t l3len, union sym_op_data *icv,
+       uint32_t l2len, uint32_t l3len, _set_icv_f set_icv, void *icv_val,
        uint8_t sqh_len)
 {
        uint8_t np;
@@ -340,8 +341,7 @@ outb_trs_pkt_prepare(struct rte_ipsec_sa *sa, rte_be64_t 
sqc,
        espt->pad_len = pdlen;
        espt->next_proto = np;
 
-       icv->va = rte_pktmbuf_mtod_offset(ml, void *, pdofs);
-       icv->pa = rte_pktmbuf_iova_offset(ml, pdofs);
+       set_icv(icv_val, ml, pdofs);
 
        return clen;
 }
@@ -381,11 +381,12 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, 
struct rte_mbuf *mb[],
                gen_iv(iv, sqc);
 
                /* try to update the packet itself */
-               rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3, &icv,
-                                         sa->sqh_len);
+               rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i], l2, l3,
+                               set_icv_va_pa, (void *)&icv, sa->sqh_len);
                /* success, setup crypto op */
                if (rc >= 0) {
-                       outb_pkt_xprepare(sa, sqc, &icv);
+                       outb_pkt_xprepare(sa, sqc, icv.va,
+                                       (void *)(icv.va + sa->icv_len));
                        lksd_none_cop_prepare(cop[k], cs, mb[i]);
                        outb_cop_prepare(cop[k], sa, iv, &icv, l2 + l3, rc);
                        k++;
@@ -403,6 +404,335 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, 
struct rte_mbuf *mb[],
        return k;
 }
 
+
+static inline int
+outb_cpu_crypto_proc_prepare(struct rte_mbuf *m, const struct rte_ipsec_sa *sa,
+               uint32_t hlen, uint32_t plen,
+               struct rte_security_vec *buf, struct iovec *cur_vec, void *iv)
+{
+       struct rte_mbuf *ms;
+       uint64_t *ivp = iv;
+       struct aead_gcm_iv *gcm;
+       struct aesctr_cnt_blk *ctr;
+       struct iovec *vec = cur_vec;
+       uint32_t left;
+       uint32_t off = 0;
+       uint32_t n_seg = 0;
+       uint32_t algo;
+
+       algo = sa->algo_type;
+
+       switch (algo) {
+       case ALGO_TYPE_AES_GCM:
+               gcm = iv;
+               aead_gcm_iv_fill(gcm, ivp[0], sa->salt);
+               off = sa->ctp.cipher.offset + hlen;
+               left = sa->ctp.cipher.length + plen;
+               break;
+       case ALGO_TYPE_AES_CBC:
+       case ALGO_TYPE_3DES_CBC:
+               off = sa->ctp.auth.offset + hlen;
+               left = sa->ctp.auth.length + plen;
+               break;
+       case ALGO_TYPE_AES_CTR:
+               off = sa->ctp.auth.offset + hlen;
+               left = sa->ctp.auth.length + plen;
+               ctr = iv;
+               aes_ctr_cnt_blk_fill(ctr, ivp[0], sa->salt);
+               break;
+       case ALGO_TYPE_NULL:
+               left = sa->ctp.cipher.length + plen;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       ms = mbuf_get_seg_ofs(m, &off);
+       if (!ms)
+               return -1;
+
+       while (n_seg < m->nb_segs && left && ms) {
+               uint32_t len = RTE_MIN(left, ms->data_len - off);
+
+               vec->iov_base = rte_pktmbuf_mtod_offset(ms, void *, off);
+               vec->iov_len = len;
+
+               left -= len;
+               vec++;
+               n_seg++;
+               ms = ms->next;
+               off = 0;
+       }
+
+       if (left)
+               return -1;
+
+       buf->vec = cur_vec;
+       buf->num = n_seg;
+
+       return n_seg;
+}
+
+static uint16_t
+esp_outb_tun_cpu_crypto_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       uint64_t sqn;
+       rte_be64_t sqc;
+       struct rte_ipsec_sa *sa;
+       struct rte_security_ctx *ctx;
+       struct rte_security_session *rss;
+       void *icv_va;
+       uint32_t dr[num];
+       uint32_t i, n;
+       int32_t rc;
+
+       /* cpu crypto specific variables */
+       struct rte_security_vec buf[num];
+       struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+       uint32_t vec_idx = 0;
+       uint64_t iv_buf[num][IPSEC_MAX_IV_QWORD];
+       void *iv[num];
+       int status[num];
+       uint8_t *aad_buf[num][sizeof(struct aead_gcm_aad)];
+       void *aad[num];
+       void *digest[num];
+       uint32_t k;
+
+       sa = ss->sa;
+       ctx = ss->security.ctx;
+       rss = ss->security.ses;
+
+       k = 0;
+       n = num;
+       sqn = esn_outb_update_sqn(sa, &n);
+       if (n != num)
+               rte_errno = EOVERFLOW;
+
+       for (i = 0; i != n; i++) {
+               sqc = rte_cpu_to_be_64(sqn + i);
+               gen_iv(iv_buf[k], sqc);
+
+               /* try to update the packet itself */
+               rc = outb_tun_pkt_prepare(sa, sqc, iv_buf[k], mb[i], set_icv_va,
+                               (void *)&icv_va, sa->sqh_len);
+
+               /* success, setup crypto op */
+               if (rc >= 0) {
+                       iv[k] = (void *)iv_buf[k];
+                       aad[k] = (void *)aad_buf[k];
+                       digest[k] = (void *)icv_va;
+
+                       outb_pkt_xprepare(sa, sqc, icv_va, aad[k]);
+
+                       rc = outb_cpu_crypto_proc_prepare(mb[i], sa,
+                                       0, rc, &buf[k], &vec[vec_idx], iv[k]);
+                       if (rc < 0) {
+                               dr[i - k] = i;
+                               rte_errno = -rc;
+                               continue;
+                       }
+
+                       vec_idx += rc;
+                       k++;
+               /* failure, put packet into the death-row */
+               } else {
+                       dr[i - k] = i;
+                       rte_errno = -rc;
+               }
+       }
+
+        /* copy not prepared mbufs beyond good ones */
+       if (k != n && k != 0)
+               move_bad_mbufs(mb, dr, n, n - k);
+
+       if (unlikely(k == 0)) {
+               rte_errno = EBADMSG;
+               return 0;
+       }
+
+       /* process the packets */
+       n = 0;
+       rc = rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad,
+                       digest, status, k);
+       /* move failed process packets to dr */
+       if (rc < 0)
+               for (i = 0; i < n; i++) {
+                       if (status[i])
+                               dr[n++] = i;
+               }
+
+       if (n)
+               move_bad_mbufs(mb, dr, k, n);
+
+       return k - n;
+}
+
+static uint16_t
+esp_outb_trs_cpu_crypto_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+
+{
+       uint64_t sqn;
+       rte_be64_t sqc;
+       struct rte_ipsec_sa *sa;
+       struct rte_security_ctx *ctx;
+       struct rte_security_session *rss;
+       void *icv_va;
+       uint32_t dr[num];
+       uint32_t i, n;
+       uint32_t l2, l3;
+       int32_t rc;
+
+       /* cpu crypto specific variables */
+       struct rte_security_vec buf[num];
+       struct iovec vec[RTE_LIBRTE_IP_FRAG_MAX_FRAG * num];
+       uint32_t vec_idx = 0;
+       uint64_t iv_buf[num][IPSEC_MAX_IV_QWORD];
+       void *iv[num];
+       int status[num];
+       uint8_t *aad_buf[num][sizeof(struct aead_gcm_aad)];
+       void *aad[num];
+       void *digest[num];
+       uint32_t k;
+
+       sa = ss->sa;
+       ctx = ss->security.ctx;
+       rss = ss->security.ses;
+
+       k = 0;
+       n = num;
+       sqn = esn_outb_update_sqn(sa, &n);
+       if (n != num)
+               rte_errno = EOVERFLOW;
+
+       for (i = 0; i != n; i++) {
+               l2 = mb[i]->l2_len;
+               l3 = mb[i]->l3_len;
+
+               sqc = rte_cpu_to_be_64(sqn + i);
+               gen_iv(iv_buf[k], sqc);
+
+               /* try to update the packet itself */
+               rc = outb_trs_pkt_prepare(sa, sqc, iv_buf[k], mb[i], l2, l3,
+                               set_icv_va, (void *)&icv_va, sa->sqh_len);
+
+               /* success, setup crypto op */
+               if (rc >= 0) {
+                       iv[k] = (void *)iv_buf[k];
+                       aad[k] = (void *)aad_buf[k];
+                       digest[k] = (void *)icv_va;
+
+                       outb_pkt_xprepare(sa, sqc, icv_va, aad[k]);
+
+                       rc = outb_cpu_crypto_proc_prepare(mb[i], sa,
+                                       l2 + l3, rc, &buf[k], &vec[vec_idx],
+                                       iv[k]);
+                       if (rc < 0) {
+                               dr[i - k] = i;
+                               rte_errno = -rc;
+                               continue;
+                       }
+
+                       vec_idx += rc;
+                       k++;
+               /* failure, put packet into the death-row */
+               } else {
+                       dr[i - k] = i;
+                       rte_errno = -rc;
+               }
+       }
+
+        /* copy not prepared mbufs beyond good ones */
+       if (k != n && k != 0)
+               move_bad_mbufs(mb, dr, n, n - k);
+
+       /* process the packets */
+       n = 0;
+       rc = rte_security_process_cpu_crypto_bulk(ctx, rss, buf, iv, aad,
+                       digest, status, k);
+       /* move failed process packets to dr */
+       if (rc < 0)
+               for (i = 0; i < k; i++) {
+                       if (status[i])
+                               dr[n++] = i;
+               }
+
+       if (n)
+               move_bad_mbufs(mb, dr, k, n);
+
+       return k - n;
+}
+
+uint16_t
+esp_outb_tun_cpu_crypto_sqh_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       struct rte_ipsec_sa *sa = ss->sa;
+       uint32_t icv_len;
+       void *icv;
+       uint16_t n;
+       uint16_t i;
+
+       n = esp_outb_tun_cpu_crypto_process(ss, mb, num);
+
+       icv_len = sa->icv_len;
+
+       for (i = 0; i < n; i++) {
+               struct rte_mbuf *ml = rte_pktmbuf_lastseg(mb[i]);
+
+               mb[i]->pkt_len -= sa->sqh_len;
+               ml->data_len -= sa->sqh_len;
+
+               icv = rte_pktmbuf_mtod_offset(ml, void *,
+                               ml->data_len - icv_len);
+               remove_sqh(icv, sa->icv_len);
+       }
+
+       return n;
+}
+
+uint16_t
+esp_outb_tun_cpu_crypto_flag_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       return esp_outb_tun_cpu_crypto_process(ss, mb, num);
+}
+
+uint16_t
+esp_outb_trs_cpu_crypto_sqh_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       struct rte_ipsec_sa *sa = ss->sa;
+       uint32_t icv_len;
+       void *icv;
+       uint16_t n;
+       uint16_t i;
+
+       n = esp_outb_trs_cpu_crypto_process(ss, mb, num);
+       icv_len = sa->icv_len;
+
+       for (i = 0; i < n; i++) {
+               struct rte_mbuf *ml = rte_pktmbuf_lastseg(mb[i]);
+
+               mb[i]->pkt_len -= sa->sqh_len;
+               ml->data_len -= sa->sqh_len;
+
+               icv = rte_pktmbuf_mtod_offset(ml, void *,
+                               ml->data_len - icv_len);
+               remove_sqh(icv, sa->icv_len);
+       }
+
+       return n;
+}
+
+uint16_t
+esp_outb_trs_cpu_crypto_flag_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
+{
+       return esp_outb_trs_cpu_crypto_process(ss, mb, num);
+}
+
 /*
  * process outbound packets for SA with ESN support,
  * for algorithms that require SQN.hibits to be implictly included
@@ -410,8 +740,8 @@ esp_outb_trs_prepare(const struct rte_ipsec_session *ss, 
struct rte_mbuf *mb[],
  * In that case we have to move ICV bytes back to their proper place.
  */
 uint16_t
-esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
-       uint16_t num)
+esp_outb_sqh_process(const struct rte_ipsec_session *ss,
+       struct rte_mbuf *mb[], uint16_t num)
 {
        uint32_t i, k, icv_len, *icv;
        struct rte_mbuf *ml;
@@ -498,7 +828,8 @@ inline_outb_tun_pkt_process(const struct rte_ipsec_session 
*ss,
                gen_iv(iv, sqc);
 
                /* try to update the packet itself */
-               rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], &icv, 0);
+               rc = outb_tun_pkt_prepare(sa, sqc, iv, mb[i], set_icv_va_pa,
+                               (void *)&icv, 0);
 
                k += (rc >= 0);
 
@@ -552,7 +883,7 @@ inline_outb_trs_pkt_process(const struct rte_ipsec_session 
*ss,
 
                /* try to update the packet itself */
                rc = outb_trs_pkt_prepare(sa, sqc, iv, mb[i],
-                               l2, l3, &icv, 0);
+                               l2, l3, set_icv_va_pa, (void *)&icv, 0);
 
                k += (rc >= 0);
 
diff --git a/lib/librte_ipsec/sa.c b/lib/librte_ipsec/sa.c
index 23d394b46..b8d55a1c7 100644
--- a/lib/librte_ipsec/sa.c
+++ b/lib/librte_ipsec/sa.c
@@ -544,9 +544,9 @@ lksd_proto_prepare(const struct rte_ipsec_session *ss,
  * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
  * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
  */
-static uint16_t
-pkt_flag_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
-       uint16_t num)
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num)
 {
        uint32_t i, k;
        uint32_t dr[num];
@@ -599,12 +599,48 @@ lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
                pf->prepare = esp_outb_tun_prepare;
                pf->process = (sa->sqh_len != 0) ?
-                       esp_outb_sqh_process : pkt_flag_process;
+                       esp_outb_sqh_process : esp_outb_pkt_flag_process;
                break;
        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
                pf->prepare = esp_outb_trs_prepare;
                pf->process = (sa->sqh_len != 0) ?
-                       esp_outb_sqh_process : pkt_flag_process;
+                       esp_outb_sqh_process : esp_outb_pkt_flag_process;
+               break;
+       default:
+               rc = -ENOTSUP;
+       }
+
+       return rc;
+}
+
+static int
+cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
+               struct rte_ipsec_sa_pkt_func *pf)
+{
+       int32_t rc;
+
+       static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
+                       RTE_IPSEC_SATP_MODE_MASK;
+
+       rc = 0;
+       switch (sa->type & msk) {
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
+               pf->process = esp_inb_tun_cpu_crypto_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
+               pf->process = esp_inb_trs_cpu_crypto_pkt_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
+               pf->process = (sa->sqh_len != 0) ?
+                       esp_outb_tun_cpu_crypto_sqh_process :
+                       esp_outb_tun_cpu_crypto_flag_process;
+               break;
+       case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
+               pf->process = (sa->sqh_len != 0) ?
+                       esp_outb_trs_cpu_crypto_sqh_process :
+                       esp_outb_trs_cpu_crypto_flag_process;
                break;
        default:
                rc = -ENOTSUP;
@@ -672,13 +708,16 @@ ipsec_sa_pkt_func_select(const struct rte_ipsec_session 
*ss,
        case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
                if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
                                RTE_IPSEC_SATP_DIR_IB)
-                       pf->process = pkt_flag_process;
+                       pf->process = esp_outb_pkt_flag_process;
                else
                        pf->process = inline_proto_outb_pkt_process;
                break;
        case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
                pf->prepare = lksd_proto_prepare;
-               pf->process = pkt_flag_process;
+               pf->process = esp_outb_pkt_flag_process;
+               break;
+       case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
+               rc = cpu_crypto_pkt_func_select(sa, pf);
                break;
        default:
                rc = -ENOTSUP;
diff --git a/lib/librte_ipsec/sa.h b/lib/librte_ipsec/sa.h
index 51e69ad05..770d36b8b 100644
--- a/lib/librte_ipsec/sa.h
+++ b/lib/librte_ipsec/sa.h
@@ -156,6 +156,14 @@ uint16_t
 inline_inb_trs_pkt_process(const struct rte_ipsec_session *ss,
        struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+esp_inb_tun_cpu_crypto_pkt_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_inb_trs_cpu_crypto_pkt_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num);
+
 /* outbound processing */
 
 uint16_t
@@ -170,6 +178,10 @@ uint16_t
 esp_outb_sqh_process(const struct rte_ipsec_session *ss, struct rte_mbuf *mb[],
        uint16_t num);
 
+uint16_t
+esp_outb_pkt_flag_process(const struct rte_ipsec_session *ss,
+       struct rte_mbuf *mb[], uint16_t num);
+
 uint16_t
 inline_outb_tun_pkt_process(const struct rte_ipsec_session *ss,
        struct rte_mbuf *mb[], uint16_t num);
@@ -182,4 +194,21 @@ uint16_t
 inline_proto_outb_pkt_process(const struct rte_ipsec_session *ss,
        struct rte_mbuf *mb[], uint16_t num);
 
+uint16_t
+esp_outb_tun_cpu_crypto_sqh_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_tun_cpu_crypto_flag_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_cpu_crypto_sqh_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num);
+
+uint16_t
+esp_outb_trs_cpu_crypto_flag_process(const struct rte_ipsec_session *ss,
+               struct rte_mbuf *mb[], uint16_t num);
+
+
 #endif /* _SA_H_ */
diff --git a/lib/librte_ipsec/ses.c b/lib/librte_ipsec/ses.c
index 82c765a33..eaa8c17b7 100644
--- a/lib/librte_ipsec/ses.c
+++ b/lib/librte_ipsec/ses.c
@@ -19,7 +19,9 @@ session_check(struct rte_ipsec_session *ss)
                        return -EINVAL;
                if ((ss->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
                                ss->type ==
-                               RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) &&
+                               RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+                               ss->type ==
+                               RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) &&
                                ss->security.ctx == NULL)
                        return -EINVAL;
        }
-- 
2.14.5

Reply via email to