From: Aviad Yehezkel <avia...@mellanox.com>

data-path:
1. esp_outbound build ipsec headers, fill esp_next_pror
   metadata and mark packet for offload. note trailer
   will be added by hardware.
2. esp_outbound_post doing nothing for offload.
3. esp_inbound doing nothing for offload.
4. esp_inbound_post remove ipsec headers, if decrypt
   failed than print an error.
5. ipsec_enqueu skip cryptodev if offload.
6. ipsec_dequeue build pkt array depends if pkt is
   offloaded or not.

control-path:
1. parse_sa indicates that the SA uses inline crypto
   according to the "inline_port" option.
2. sa_add_rules constructs the rte_flow_item
   specification and the crypto rte_flow_action.
3. create_session calls rte_flow_create with sa
   parameters from step 2.

Signed-off-by: Aviad Yehezkel <avia...@mellanox.com>
Signed-off-by: Boris Pismenny <bor...@mellanox.com>
---
 examples/ipsec-secgw/esp.c   |  68 ++++++++++++++++-----
 examples/ipsec-secgw/esp.h   |  13 +---
 examples/ipsec-secgw/ipsec.c | 142 +++++++++++++++++++++++++++++++++++++------
 examples/ipsec-secgw/ipsec.h |  30 +++++++++
 examples/ipsec-secgw/sa.c    | 120 ++++++++++++++++++++++++++++++++----
 5 files changed, 317 insertions(+), 56 deletions(-)

diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index e77afa0..c6b48d4 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -60,8 +60,11 @@
 
        RTE_ASSERT(m != NULL);
        RTE_ASSERT(sa != NULL);
-       RTE_ASSERT(cop != NULL);
 
+       if (OFFLOADED_SA(sa))
+               return 0;
+
+       RTE_ASSERT(cop != NULL);
        ip4 = rte_pktmbuf_mtod(m, struct ip *);
        if (likely(ip4->ip_v == IPVERSION))
                ip_hdr_len = ip4->ip_hl * 4;
@@ -159,12 +162,22 @@
        uint8_t *nexthdr, *pad_len;
        uint8_t *padding;
        uint16_t i;
+       uint8_t decrypt_fail;
 
        RTE_ASSERT(m != NULL);
        RTE_ASSERT(sa != NULL);
-       RTE_ASSERT(cop != NULL);
 
-       if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+       if (OFFLOADED_SA(sa)) {
+               if (m->ol_flags & PKT_RX_IPSEC_CRYPTO)
+                       decrypt_fail = !!(m->ol_flags & 
PKT_RX_IPSEC_CRYPTO_FAILED);
+               else
+                       decrypt_fail = 1;
+       } else {
+               RTE_ASSERT(cop != NULL);
+               decrypt_fail = (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS);
+       }
+
+       if (decrypt_fail) {
                RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
                return -1;
        }
@@ -222,7 +235,6 @@
 
        RTE_ASSERT(m != NULL);
        RTE_ASSERT(sa != NULL);
-       RTE_ASSERT(cop != NULL);
 
        ip_hdr_len = 0;
 
@@ -250,7 +262,6 @@
        /* Padded payload length */
        pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
                        ip_hdr_len + 2, sa->block_size);
-       pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
 
        RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL ||
                        sa->flags == TRANSPORT);
@@ -272,12 +283,18 @@
                return -EINVAL;
        }
 
-       padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
-       if (unlikely(padding == NULL)) {
-               RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
-               return -ENOSPC;
+       if (!OFFLOADED_SA(sa)) {
+               pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
+
+               padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
+                                                       sa->digest_len);
+               if (unlikely(padding == NULL)) {
+                       RTE_LOG(ERR, IPSEC_ESP,
+                                       "not enough mbuf trailing space\n");
+                       return -ENOSPC;
+               }
+               rte_prefetch0(padding);
        }
-       rte_prefetch0(padding);
 
        switch (sa->flags) {
        case IP4_TUNNEL:
@@ -311,20 +328,39 @@
        esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
 
        uint64_t *iv = (uint64_t *)(esp + 1);
+       switch (sa->cipher_algo) {
+       case RTE_CRYPTO_CIPHER_NULL:
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+               memset(iv, 0, sa->iv_len);
+               break;
+       case RTE_CRYPTO_CIPHER_AES_CTR:
+       case RTE_CRYPTO_CIPHER_AES_GCM:
+               *iv = sa->seq;
+               break;
+       default:
+               RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
+                               sa->cipher_algo);
+               return -EINVAL;
+       }
 
+       if (OFFLOADED_SA(sa)) {
+               m->inner_esp_next_proto = nlp;
+               m->ol_flags |= PKT_TX_IPSEC_CRYPTO_HW_TRAILER;
+               goto done;
+       }
+
+       RTE_ASSERT(cop != NULL);
        sym_cop = get_sym_cop(cop);
        sym_cop->m_src = m;
        switch (sa->cipher_algo) {
        case RTE_CRYPTO_CIPHER_NULL:
        case RTE_CRYPTO_CIPHER_AES_CBC:
-               memset(iv, 0, sa->iv_len);
                sym_cop->cipher.data.offset = ip_hdr_len +
                        sizeof(struct esp_hdr);
                sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
                break;
        case RTE_CRYPTO_CIPHER_AES_CTR:
        case RTE_CRYPTO_CIPHER_AES_GCM:
-               *iv = sa->seq;
                sym_cop->cipher.data.offset = ip_hdr_len +
                        sizeof(struct esp_hdr) + sa->iv_len;
                sym_cop->cipher.data.length = pad_payload_len;
@@ -380,18 +416,22 @@
                        rte_pktmbuf_pkt_len(m) - sa->digest_len);
        sym_cop->auth.digest.length = sa->digest_len;
 
+done:
        return 0;
 }
 
 int
 esp_outbound_post(struct rte_mbuf *m __rte_unused,
-               struct ipsec_sa *sa __rte_unused,
+               struct ipsec_sa *sa,
                struct rte_crypto_op *cop)
 {
        RTE_ASSERT(m != NULL);
        RTE_ASSERT(sa != NULL);
-       RTE_ASSERT(cop != NULL);
 
+       if (OFFLOADED_SA(sa))
+               return 0;
+
+       RTE_ASSERT(cop != NULL);
        if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
                RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
                return -1;
diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h
index fa5cc8a..e93b2aa 100644
--- a/examples/ipsec-secgw/esp.h
+++ b/examples/ipsec-secgw/esp.h
@@ -33,18 +33,9 @@
 #ifndef __RTE_IPSEC_XFORM_ESP_H__
 #define __RTE_IPSEC_XFORM_ESP_H__
 
-struct mbuf;
+#include <rte_esp.h>
 
-/* RFC4303 */
-struct esp_hdr {
-       uint32_t spi;
-       uint32_t seq;
-       /* Payload */
-       /* Padding */
-       /* Pad Length */
-       /* Next Header */
-       /* Integrity Check Value - ICV */
-};
+struct mbuf;
 
 int
 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index edca5f0..8728f16 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -40,12 +40,32 @@
 #include <rte_cryptodev.h>
 #include <rte_mbuf.h>
 #include <rte_hash.h>
+#include <rte_flow.h>
 
 #include "ipsec.h"
 #include "esp.h"
 
+
+static inline int
+create_session_inline(struct ipsec_ctx *ipsec_ctx __rte_unused,
+                     struct ipsec_sa *sa)
+{
+       struct rte_flow_error err;
+
+       sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
+                       sa->action, &err);
+       if (sa->flow == NULL) {
+               RTE_LOG(ERR, IPSEC, "Failed to create ipsec flow message: %s\n",
+                               err.message);
+               return -1;
+       }
+
+       return 0;
+}
+
 static inline int
-create_session(struct ipsec_ctx *ipsec_ctx __rte_unused, struct ipsec_sa *sa)
+create_session_cryptodev(struct ipsec_ctx *ipsec_ctx __rte_unused,
+                        struct ipsec_sa *sa)
 {
        struct rte_cryptodev_info cdev_info;
        unsigned long cdev_id_qp = 0;
@@ -91,6 +111,15 @@
        return 0;
 }
 
+int
+create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
+{
+       if (sa->portid != -1)
+               return create_session_inline(ipsec_ctx, sa);
+       else
+               return create_session_cryptodev(ipsec_ctx, sa);
+}
+
 static inline void
 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
 {
@@ -117,7 +146,8 @@
 static inline void
 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
                struct rte_mbuf *pkts[], struct ipsec_sa *sas[],
-               uint16_t nb_pkts)
+               uint16_t nb_pkts,
+               uint8_t inflight_pkt_types[], uint16_t *nb_offloaded)
 {
        int32_t ret = 0, i;
        struct ipsec_mbuf_metadata *priv;
@@ -142,37 +172,89 @@
                rte_prefetch0(&priv->sym_cop);
                priv->cop.sym = &priv->sym_cop;
 
-               if ((unlikely(sa->crypto_session == NULL)) &&
+               if ((unlikely(sa->crypto_session == NULL && sa->flow == NULL)) 
&&
                                create_session(ipsec_ctx, sa)) {
                        rte_pktmbuf_free(pkts[i]);
                        continue;
                }
 
-               rte_crypto_op_attach_sym_session(&priv->cop,
-                               sa->crypto_session);
-
                ret = xform_func(pkts[i], sa, &priv->cop);
                if (unlikely(ret)) {
                        rte_pktmbuf_free(pkts[i]);
                        continue;
                }
 
-               RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
-               enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
+               if (!OFFLOADED_SA(sa)) {
+                       inflight_pkt_types[i] = IPSEC_INFLIGHT_PKT_CRYPTODEV;
+                       rte_crypto_op_attach_sym_session(&priv->cop,
+                                                        sa->crypto_session);
+
+                       RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
+                       enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp],
+                                   &priv->cop);
+               } else {
+                       inflight_pkt_types[i] = IPSEC_INFLIGHT_PKT_OFFLOADED;
+                       (*nb_offloaded)++;
+               }
+       }
+}
+
+static int32_t next_offloaded_pkt_idx(uint8_t inflight_pkt_types[],
+               int32_t curr_idx, uint16_t max_pkts)
+{
+       int32_t i = 0;
+
+       for (i = curr_idx; i < max_pkts; ++i) {
+               if (inflight_pkt_types[i] == IPSEC_INFLIGHT_PKT_OFFLOADED)
+                       break;
        }
+       return i;
+}
+
+static int32_t next_cryptodev_pkt_idx(uint8_t inflight_pkt_types[],
+               int32_t curr_idx, uint16_t max_pkts)
+{
+       int32_t i = 0;
+
+       for (i = curr_idx; i < max_pkts; ++i) {
+               if (inflight_pkt_types[i] == IPSEC_INFLIGHT_PKT_CRYPTODEV)
+                       break;
+       }
+       return i;
 }
 
 static inline int
 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
-               struct rte_mbuf *pkts[], uint16_t max_pkts)
+               struct rte_mbuf *pkts[], uint16_t max_pkts,
+               uint8_t inflight_pkt_types[], uint16_t nb_offloaded)
 {
-       int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
+       int32_t nb_pkts = 0, ret = 0, i, j, idx, nb_cops;
        struct ipsec_mbuf_metadata *priv;
        struct rte_crypto_op *cops[max_pkts];
        struct ipsec_sa *sa;
        struct rte_mbuf *pkt;
 
-       for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
+       /* all offloaded pkts are in place already */
+       for (i = 0, idx = 0; i < nb_offloaded; ++i) {
+               idx = next_offloaded_pkt_idx(inflight_pkt_types, idx, max_pkts);
+
+               pkt = pkts[idx];
+               rte_prefetch0(pkt);
+               priv = get_priv(pkt);
+               sa = priv->sa;
+
+               RTE_ASSERT(sa != NULL);
+
+               ret = xform_func(pkt, sa, NULL);
+               if (unlikely(ret)) {
+                       rte_pktmbuf_free(pkt);
+                       pkt = NULL;
+               }
+               pkts[idx++] = pkt;
+       }
+       nb_pkts += nb_offloaded;
+
+       for (i = 0, idx = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
                struct cdev_qp *cqp;
 
                cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
@@ -197,13 +279,29 @@
                        RTE_ASSERT(sa != NULL);
 
                        ret = xform_func(pkt, sa, cops[j]);
-                       if (unlikely(ret))
+                       if (unlikely(ret)) {
                                rte_pktmbuf_free(pkt);
-                       else
-                               pkts[nb_pkts++] = pkt;
+                               pkt = NULL;
+                       }
+
+                       idx = next_cryptodev_pkt_idx(inflight_pkt_types, idx,
+                                                    max_pkts);
+                       pkts[idx++] = pkt;
+                       nb_pkts++;
                }
        }
 
+       for (i = 0; i < max_pkts; ++i)
+               if (!pkts[i])
+                       goto err;
+
+       goto done;
+err:
+       for (; i < max_pkts; ++i) {
+               rte_pktmbuf_free(pkts[i]);
+               --nb_pkts;
+       }
+done:
        /* return packets */
        return nb_pkts;
 }
@@ -213,12 +311,16 @@
                uint16_t nb_pkts, uint16_t len)
 {
        struct ipsec_sa *sas[nb_pkts];
+       uint8_t inflight_pkt_types[nb_pkts];
+       uint16_t nb_offloaded;
 
        inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
 
-       ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
+       ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts,
+                       inflight_pkt_types, &nb_offloaded);
 
-       return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
+       return ipsec_dequeue(esp_inbound_post, ctx, pkts, len,
+                       inflight_pkt_types, nb_offloaded);
 }
 
 uint16_t
@@ -226,10 +328,14 @@
                uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
 {
        struct ipsec_sa *sas[nb_pkts];
+       uint8_t inflight_pkt_types[nb_pkts];
+       uint16_t nb_offloaded;
 
        outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
 
-       ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
+       ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts,
+                       inflight_pkt_types, &nb_offloaded);
 
-       return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
+       return ipsec_dequeue(esp_outbound_post, ctx, pkts, len,
+                       inflight_pkt_types, nb_offloaded);
 }
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index fe42661..fb2413a 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -38,6 +38,7 @@
 
 #include <rte_byteorder.h>
 #include <rte_crypto.h>
+#include <rte_flow.h>
 
 #define RTE_LOGTYPE_IPSEC       RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_IPSEC_ESP   RTE_LOGTYPE_USER2
@@ -97,6 +98,22 @@ struct ipsec_sa {
        uint32_t cdev_id_qp;
        uint64_t seq;
        uint32_t salt;
+       int32_t portid;
+       struct rte_flow_attr attr;
+#define MAX_RTE_FLOW_PATTERN (4)
+       // ETH + IP + ESP + END
+       union {
+               struct rte_flow_item_ipv4 ipv4;
+               struct rte_flow_item_ipv6 ipv6;
+       } ip_spec;
+       struct rte_flow_item_esp esp_spec;
+       struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN];
+#define MAX_RTE_FLOW_ACTIONS (2)
+       // IPsec + END
+       struct rte_flow_action_crypto crypto_action;
+       struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS];
+       struct rte_flow *flow;
+#define OFFLOADED_SA(sa)     ((sa)->flow)
        struct rte_cryptodev_sym_session *crypto_session;
        enum rte_crypto_cipher_algorithm cipher_algo;
        enum rte_crypto_auth_algorithm auth_algo;
@@ -117,6 +134,9 @@ struct ipsec_sa {
        struct rte_crypto_sym_xform *xforms;
 } __rte_cache_aligned;
 
+#define IPSEC_INFLIGHT_PKT_OFFLOADED (1 << 0)
+#define IPSEC_INFLIGHT_PKT_CRYPTODEV (1 << 1)
+
 struct ipsec_mbuf_metadata {
        uint8_t buf[32];
        struct ipsec_sa *sa;
@@ -132,6 +152,14 @@ struct cdev_qp {
        struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
 };
 
+struct sa_ctx {
+       struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
+       struct {
+               struct rte_crypto_sym_xform a;
+               struct rte_crypto_sym_xform b;
+       } xf[IPSEC_SA_MAX_ENTRIES];
+};
+
 struct ipsec_ctx {
        struct rte_hash *cdev_map;
        struct sp_ctx *sp4_ctx;
@@ -231,4 +259,6 @@ struct cnt_blk {
 void
 rt_init(struct socket_ctx *ctx, int32_t socket_id);
 
+int
+create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa);
 #endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 39624c4..bb17cd5 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -46,6 +46,8 @@
 #include <rte_errno.h>
 #include <rte_ip.h>
 #include <rte_random.h>
+#include <rte_flow.h>
+#include <rte_ethdev.h>
 
 #include "ipsec.h"
 #include "esp.h"
@@ -213,6 +215,7 @@ struct supported_auth_algo {
        uint32_t src_p = 0;
        uint32_t dst_p = 0;
        uint32_t mode_p = 0;
+       uint32_t portid_p = 0;
 
        if (strcmp(tokens[0], "in") == 0) {
                ri = &nb_sa_in;
@@ -407,6 +410,8 @@ struct supported_auth_algo {
                                        return;
                                rule->src.ip.ip4 = rte_bswap32(
                                        (uint32_t)ip.s_addr);
+                               rule->src.ip.ip4 = rte_cpu_to_be_32(
+                                               rule->src.ip.ip4);
                        } else if (rule->flags == IP6_TUNNEL) {
                                struct in6_addr ip;
 
@@ -450,6 +455,8 @@ struct supported_auth_algo {
                                        return;
                                rule->dst.ip.ip4 = rte_bswap32(
                                        (uint32_t)ip.s_addr);
+                               rule->dst.ip.ip4 = rte_cpu_to_be_32(
+                                               rule->dst.ip.ip4);
                        } else if (rule->flags == IP6_TUNNEL) {
                                struct in6_addr ip;
 
@@ -471,6 +478,23 @@ struct supported_auth_algo {
                        continue;
                }
 
+               if (strcmp(tokens[ti], "inline_port") == 0) {
+                       APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
+                       if (status->status < 0)
+                               return;
+
+                       INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+                       if (status->status < 0)
+                               return;
+
+                       rule->portid = atoi(tokens[ti]);
+
+                       if (status->status < 0)
+                               return;
+                       portid_p = 1;
+                       continue;
+               }
+
                /* unrecognizeable input */
                APP_CHECK(0, status, "unrecognized input \"%s\"",
                        tokens[ti]);
@@ -489,6 +513,10 @@ struct supported_auth_algo {
        if (status->status < 0)
                return;
 
+       /* This SA isn't offload */
+       if (!portid_p)
+               rule->portid = -1;
+
        *ri = *ri + 1;
 }
 
@@ -547,14 +575,6 @@ struct supported_auth_algo {
        printf("\n");
 }
 
-struct sa_ctx {
-       struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
-       struct {
-               struct rte_crypto_sym_xform a;
-               struct rte_crypto_sym_xform b;
-       } xf[IPSEC_SA_MAX_ENTRIES];
-};
-
 static struct sa_ctx *
 sa_create(const char *name, int32_t socket_id)
 {
@@ -588,11 +608,13 @@ struct sa_ctx {
                uint32_t nb_entries, uint32_t inbound)
 {
        struct ipsec_sa *sa;
-       uint32_t i, idx;
+       uint32_t i, idx, j;
+       struct rte_eth_dev_info dev_info;
 
        for (i = 0; i < nb_entries; i++) {
                idx = SPI2IDX(entries[i].spi);
                sa = &sa_ctx->sa[idx];
+               j = 0;
                if (sa->spi != 0) {
                        printf("Index %u already in use by SPI %u\n",
                                        idx, sa->spi);
@@ -601,12 +623,75 @@ struct sa_ctx {
                *sa = entries[i];
                sa->seq = 0;
 
+               if (sa->portid == -1)
+                       goto not_offloaded;
+
+               rte_eth_dev_info_get(sa->portid, &dev_info);
+
+               if (inbound) {
+                       if ((dev_info.rx_offload_capa &
+                            DEV_RX_OFFLOAD_IPSEC_CRYPTO) == 0) {
+                               RTE_LOG(WARNING, PORT,
+                                       "hardware RX IPSec offload is not 
supported\n");
+                               return -EINVAL;
+                       }
+               } else { /* outbound */
+                       if ((dev_info.tx_offload_capa &
+                            DEV_TX_OFFLOAD_IPSEC_CRYPTO_NEED_METADATA) == 0)
+                               goto inline_with_metadata;
+                       if ((dev_info.tx_offload_capa &
+                            DEV_TX_OFFLOAD_IPSEC_CRYPTO_HW_TRAILER) == 0) {
+                               RTE_LOG(WARNING, PORT,
+                                       "hardware TX IPSec offload is not 
supported\n");
+                               return -EINVAL;
+                       }
+               }
+
+               sa->pattern[j++].type = RTE_FLOW_ITEM_TYPE_ETH;
                switch (sa->flags) {
                case IP4_TUNNEL:
-                       sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
-                       sa->dst.ip.ip4 = rte_cpu_to_be_32(sa->dst.ip.ip4);
+                       sa->pattern[j].type = RTE_FLOW_ITEM_TYPE_IPV4;
+                       sa->pattern[j].spec = &sa->ip_spec.ipv4;
+                       sa->pattern[j++].mask = &rte_flow_item_ipv4_mask;
+                       sa->ip_spec.ipv4.hdr.src_addr = sa->src.ip.ip4 =
+                               rte_cpu_to_be_32(sa->src.ip.ip4);
+                       sa->ip_spec.ipv4.hdr.dst_addr = sa->dst.ip.ip4 =
+                               rte_cpu_to_be_32(sa->dst.ip.ip4);
+                       break;
+               case IP6_TUNNEL:
+                       sa->pattern[j].type = RTE_FLOW_ITEM_TYPE_IPV6;
+                       sa->pattern[j].spec = &sa->ip_spec.ipv6;
+                       sa->pattern[j++].mask = &rte_flow_item_ipv6_mask;
+                       memcpy(sa->ip_spec.ipv6.hdr.src_addr,
+                                       sa->src.ip.ip6.ip6_b, 16);
+                       memcpy(sa->ip_spec.ipv6.hdr.dst_addr,
+                                       sa->dst.ip.ip6.ip6_b, 16);
+                       break;
+               case TRANSPORT:
+                       rte_exit(EXIT_FAILURE,
+                                "Error creating offload SA with TRANSPORT, 
currently not supported\n");
                }
-
+               sa->pattern[j].type = RTE_FLOW_ITEM_TYPE_ESP;
+               sa->pattern[j].spec = &sa->esp_spec;
+               sa->pattern[j++].mask = &rte_flow_item_esp_mask;
+               sa->esp_spec.hdr.spi = entries[i].spi;
+
+               sa->pattern[j++].type = RTE_FLOW_ITEM_TYPE_END;
+
+               memset(&sa->attr, 0, sizeof(struct rte_flow_attr));
+               j = 0;
+               sa->action[j].type = RTE_FLOW_ACTION_TYPE_CRYPTO;
+               sa->action[j++].conf = &sa->crypto_action;
+               sa->crypto_action.xform.type = RTE_CRYPTO_SYM_XFORM_IPSEC;
+               sa->crypto_action.xform.ipsec.algo = RTE_CRYPTO_CIPHER_AES_GCM;
+               sa->crypto_action.xform.ipsec.key.data = sa->cipher_key;
+               sa->crypto_action.xform.ipsec.key.length = sa->cipher_key_len;
+               sa->crypto_action.xform.ipsec.salt = sa->salt;
+
+               sa->action[j].type = RTE_FLOW_ITEM_TYPE_END;
+inline_with_metadata:
+               /* Implement TX ipsec inline crypto offload with metadata here! 
*/
+not_offloaded:
                if (inbound) {
                        sa_ctx->xf[idx].b.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
                        sa_ctx->xf[idx].b.cipher.algo = sa->cipher_algo;
@@ -628,7 +713,11 @@ struct sa_ctx {
                                sa->digest_len;
                        sa_ctx->xf[idx].a.auth.op =
                                RTE_CRYPTO_AUTH_OP_VERIFY;
-
+                       if (sa->portid != -1) {
+                               sa->attr.ingress = 1;
+                               sa->crypto_action.xform.ipsec.op =
+                                               RTE_CRYPTO_CIPHER_OP_DECRYPT;
+                       }
                } else { /* outbound */
                        sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
                        sa_ctx->xf[idx].a.cipher.algo = sa->cipher_algo;
@@ -650,6 +739,11 @@ struct sa_ctx {
                                sa->digest_len;
                        sa_ctx->xf[idx].b.auth.op =
                                RTE_CRYPTO_AUTH_OP_GENERATE;
+                       if (sa->portid != -1) {
+                               sa->attr.egress = 1;
+                               sa->crypto_action.xform.ipsec.op =
+                                               RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+                       }
                }
 
                sa_ctx->xf[idx].a.next = &sa_ctx->xf[idx].b;
-- 
1.8.3.1

Reply via email to