> -----Original Message-----
> From: Nicolau, Radu <radu.nico...@intel.com>
> Sent: Tuesday, October 26, 2021 9:57 PM
> To: Wu, Jingjing <jingjing...@intel.com>; Xing, Beilei 
> <beilei.x...@intel.com>;
> Ray Kinsella <m...@ashroe.eu>
> Cc: dev@dpdk.org; Doherty, Declan <declan.dohe...@intel.com>; Sinha,
> Abhijit <abhijit.si...@intel.com>; Zhang, Qi Z <qi.z.zh...@intel.com>;
> Richardson, Bruce <bruce.richard...@intel.com>; Ananyev, Konstantin
> <konstantin.anan...@intel.com>; Nicolau, Radu <radu.nico...@intel.com>
> Subject: [PATCH v12 4/7] net/iavf: add iAVF IPsec inline crypto support
> 
> Add support for inline crypto for IPsec, for ESP transport and
> tunnel over IPv4 and IPv6, as well as supporting the offload for
> ESP over UDP, and inconjunction with TSO for UDP and TCP flows.
> Implement support for rte_security packet metadata
> 
> Add definition for IPsec descriptors, extend support for offload
> in data and context descriptor to support
> 
> Add support to virtual channel mailbox for IPsec Crypto request
> operations. IPsec Crypto requests receive an initial acknowledgment
> from phsyical function driver of receipt of request and then an
> asynchronous response with success/failure of request including any
> response data.
> 
> Add enhanced descriptor debugging
> 
> Refactor of scalar tx burst function to support integration of offload
> 
> Signed-off-by: Declan Doherty <declan.dohe...@intel.com>
> Signed-off-by: Abhijit Sinha <abhijit.si...@intel.com>
> Signed-off-by: Radu Nicolau <radu.nico...@intel.com>
> Reviewed-by: Jingjing Wu <jingjing...@intel.com>
> ---
>  drivers/net/iavf/iavf.h                       |   10 +
>  drivers/net/iavf/iavf_ethdev.c                |   41 +-
>  drivers/net/iavf/iavf_generic_flow.c          |   15 +
>  drivers/net/iavf/iavf_generic_flow.h          |    2 +
>  drivers/net/iavf/iavf_ipsec_crypto.c          | 1894 +++++++++++++++++
>  drivers/net/iavf/iavf_ipsec_crypto.h          |  160 ++
>  .../net/iavf/iavf_ipsec_crypto_capabilities.h |  383 ++++
>  drivers/net/iavf/iavf_rxtx.c                  |  202 +-
>  drivers/net/iavf/iavf_rxtx.h                  |  107 +-
>  drivers/net/iavf/iavf_vchnl.c                 |   29 +
>  drivers/net/iavf/meson.build                  |    3 +-
>  drivers/net/iavf/rte_pmd_iavf.h               |    1 +
>  drivers/net/iavf/version.map                  |    3 +
>  13 files changed, 2823 insertions(+), 27 deletions(-)
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.c
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto.h
>  create mode 100644 drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> 
> diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
> index efc90f9072..6df31a649e 100644
> --- a/drivers/net/iavf/iavf.h
> +++ b/drivers/net/iavf/iavf.h
> @@ -221,6 +221,7 @@ struct iavf_info {
>       rte_spinlock_t flow_ops_lock;
>       struct iavf_parser_list rss_parser_list;
>       struct iavf_parser_list dist_parser_list;
> +     struct iavf_parser_list ipsec_crypto_parser_list;
> 
>       struct iavf_fdir_info fdir; /* flow director info */
>       /* indicate large VF support enabled or not */
> @@ -245,6 +246,7 @@ enum iavf_proto_xtr_type {
>       IAVF_PROTO_XTR_IPV6_FLOW,
>       IAVF_PROTO_XTR_TCP,
>       IAVF_PROTO_XTR_IP_OFFSET,
> +     IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID,
>       IAVF_PROTO_XTR_MAX,
>  };
> 
> @@ -256,11 +258,14 @@ struct iavf_devargs {
>       uint8_t proto_xtr[IAVF_MAX_QUEUE_NUM];
>  };
> 
> +struct iavf_security_ctx;
> +
>  /* Structure to store private data for each VF instance. */
>  struct iavf_adapter {
>       struct iavf_hw hw;
>       struct rte_eth_dev_data *dev_data;
>       struct iavf_info vf;
> +     struct iavf_security_ctx *security_ctx;
> 
>       bool rx_bulk_alloc_allowed;
>       /* For vector PMD */
> @@ -279,6 +284,8 @@ struct iavf_adapter {
>       (&((struct iavf_adapter *)adapter)->vf)
>  #define IAVF_DEV_PRIVATE_TO_HW(adapter) \
>       (&((struct iavf_adapter *)adapter)->hw)
> +#define IAVF_DEV_PRIVATE_TO_IAVF_SECURITY_CTX(adapter) \
> +     (((struct iavf_adapter *)adapter)->security_ctx)
> 
>  /* IAVF_VSI_TO */
>  #define IAVF_VSI_TO_HW(vsi) \
> @@ -421,5 +428,8 @@ int iavf_set_q_tc_map(struct rte_eth_dev *dev,
>                       uint16_t size);
>  void iavf_tm_conf_init(struct rte_eth_dev *dev);
>  void iavf_tm_conf_uninit(struct rte_eth_dev *dev);
> +int iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
> +             uint8_t *msg, size_t msg_len,
> +             uint8_t *resp_msg, size_t resp_msg_len);
>  extern const struct rte_tm_ops iavf_tm_ops;
>  #endif /* _IAVF_ETHDEV_H_ */
> diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
> index f892306f18..dba505494f 100644
> --- a/drivers/net/iavf/iavf_ethdev.c
> +++ b/drivers/net/iavf/iavf_ethdev.c
> @@ -30,6 +30,7 @@
>  #include "iavf_rxtx.h"
>  #include "iavf_generic_flow.h"
>  #include "rte_pmd_iavf.h"
> +#include "iavf_ipsec_crypto.h"
> 
>  /* devargs */
>  #define IAVF_PROTO_XTR_ARG         "proto_xtr"
> @@ -71,6 +72,11 @@ static struct iavf_proto_xtr_ol iavf_proto_xtr_params[]
> = {
>       [IAVF_PROTO_XTR_IP_OFFSET] = {
>               .param = { .name = "intel_pmd_dynflag_proto_xtr_ip_offset" },
>               .ol_flag = &rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask },
> +     [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] = {
> +             .param = {
> +             .name = "intel_pmd_dynflag_proto_xtr_ipsec_crypto_said" },
> +             .ol_flag =
> +                     &rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask },
>  };
> 
>  static int iavf_dev_configure(struct rte_eth_dev *dev);
> @@ -922,6 +928,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
>       iavf_add_del_mc_addr_list(adapter, vf->mc_addrs, vf->mc_addrs_num,
>                                 false);
> 
> +     /* free iAVF security device context all related resources */
> +     iavf_security_ctx_destroy(adapter);
> +
>       adapter->stopped = 1;
>       dev->data->dev_started = 0;
> 
> @@ -931,7 +940,9 @@ iavf_dev_stop(struct rte_eth_dev *dev)
>  static int
>  iavf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> *dev_info)
>  {
> -     struct iavf_info *vf =
> IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> +     struct iavf_adapter *adapter =
> +             IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> +     struct iavf_info *vf = &adapter->vf;
> 
>       dev_info->max_rx_queues = IAVF_MAX_NUM_QUEUES_LV;
>       dev_info->max_tx_queues = IAVF_MAX_NUM_QUEUES_LV;
> @@ -973,6 +984,11 @@ iavf_dev_info_get(struct rte_eth_dev *dev, struct
> rte_eth_dev_info *dev_info)
>       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_CRC)
>               dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
> 
> +     if (iavf_ipsec_crypto_supported(adapter)) {
> +             dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
> +             dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
> +     }
> +
>       dev_info->default_rxconf = (struct rte_eth_rxconf) {
>               .rx_free_thresh = IAVF_DEFAULT_RX_FREE_THRESH,
>               .rx_drop_en = 0,
> @@ -1718,6 +1734,7 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
>               { "ipv6_flow", IAVF_PROTO_XTR_IPV6_FLOW },
>               { "tcp",       IAVF_PROTO_XTR_TCP       },
>               { "ip_offset", IAVF_PROTO_XTR_IP_OFFSET },
> +             { "ipsec_crypto_said", IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID },
>       };
>       uint32_t i;
> 
> @@ -1726,8 +1743,8 @@ iavf_lookup_proto_xtr_type(const char *flex_name)
>                       return xtr_type_map[i].type;
>       }
> 
> -     PMD_DRV_LOG(ERR, "wrong proto_xtr type, "
> -                 "it should be: vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset");
> +     PMD_DRV_LOG(ERR, "wrong proto_xtr type, it should be: "
> +                     
> "vlan|ipv4|ipv6|ipv6_flow|tcp|ip_offset|ipsec_crypto_said");
> 
>       return -1;
>  }
> @@ -2375,6 +2392,24 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
>               goto flow_init_err;
>       }
> 
> +     /** Check if the IPsec Crypto offload is supported and create
> +      *  security_ctx if it is.
> +      */
> +     if (iavf_ipsec_crypto_supported(adapter)) {
> +             /* Initialize security_ctx only for primary process*/
> +             ret = iavf_security_ctx_create(adapter);
> +             if (ret) {
> +                     PMD_INIT_LOG(ERR, "failed to create ipsec crypto 
> security
> instance");
> +                     return ret;
> +             }
> +
> +             ret = iavf_security_init(adapter);
> +             if (ret) {
> +                     PMD_INIT_LOG(ERR, "failed to initialized ipsec crypto
> resources");
> +                     return ret;
> +             }
> +     }
> +
>       iavf_default_rss_disable(adapter);
> 
>       return 0;
> diff --git a/drivers/net/iavf/iavf_generic_flow.c
> b/drivers/net/iavf/iavf_generic_flow.c
> index 364904fa02..2befa125ac 100644
> --- a/drivers/net/iavf/iavf_generic_flow.c
> +++ b/drivers/net/iavf/iavf_generic_flow.c
> @@ -1766,6 +1766,7 @@ iavf_flow_init(struct iavf_adapter *ad)
>       TAILQ_INIT(&vf->flow_list);
>       TAILQ_INIT(&vf->rss_parser_list);
>       TAILQ_INIT(&vf->dist_parser_list);
> +     TAILQ_INIT(&vf->ipsec_crypto_parser_list);
>       rte_spinlock_init(&vf->flow_ops_lock);
> 
>       RTE_TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
> @@ -1840,6 +1841,9 @@ iavf_register_parser(struct iavf_flow_parser
> *parser,
>       } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
>               list = &vf->dist_parser_list;
>               TAILQ_INSERT_HEAD(list, parser_node, node);
> +     } else if (parser->engine->type == IAVF_FLOW_ENGINE_IPSEC_CRYPTO) {
> +             list = &vf->ipsec_crypto_parser_list;
> +             TAILQ_INSERT_HEAD(list, parser_node, node);
>       } else {
>               return -EINVAL;
>       }
> @@ -2149,6 +2153,13 @@ iavf_flow_process_filter(struct rte_eth_dev *dev,
> 
>       *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
>                                   actions, error);
> +     if (*engine)
> +             return 0;
> +
> +     *engine = iavf_parse_engine(ad, flow, &vf->ipsec_crypto_parser_list,
> +                     pattern, actions, error);
> +     if (*engine)
> +             return 0;
> 
>       if (!*engine) {
>               rte_flow_error_set(error, EINVAL,
> @@ -2195,6 +2206,10 @@ iavf_flow_create(struct rte_eth_dev *dev,
>               return flow;
>       }
> 
> +     /* Special case for inline crypto egress flows */
> +     if (attr->egress && actions[0].type ==
> RTE_FLOW_ACTION_TYPE_SECURITY)
> +             goto free_flow;
> +
>       ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
>                       &engine, iavf_parse_engine_create, error);
>       if (ret < 0) {
> diff --git a/drivers/net/iavf/iavf_generic_flow.h
> b/drivers/net/iavf/iavf_generic_flow.h
> index f2b54e1944..3681a96b31 100644
> --- a/drivers/net/iavf/iavf_generic_flow.h
> +++ b/drivers/net/iavf/iavf_generic_flow.h
> @@ -464,6 +464,7 @@ typedef int (*parse_pattern_action_t)(struct
> iavf_adapter *ad,
>  /* engine types. */
>  enum iavf_flow_engine_type {
>       IAVF_FLOW_ENGINE_NONE = 0,
> +     IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
>       IAVF_FLOW_ENGINE_FDIR,
>       IAVF_FLOW_ENGINE_HASH,
>       IAVF_FLOW_ENGINE_MAX,
> @@ -477,6 +478,7 @@ enum iavf_flow_engine_type {
>   */
>  enum iavf_flow_classification_stage {
>       IAVF_FLOW_STAGE_NONE = 0,
> +     IAVF_FLOW_STAGE_IPSEC_CRYPTO,
>       IAVF_FLOW_STAGE_RSS,
>       IAVF_FLOW_STAGE_DISTRIBUTOR,
>       IAVF_FLOW_STAGE_MAX,
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto.c
> b/drivers/net/iavf/iavf_ipsec_crypto.c
> new file mode 100644
> index 0000000000..633fedf860
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_ipsec_crypto.c
> @@ -0,0 +1,1894 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#include <rte_cryptodev.h>
> +#include <rte_ethdev.h>
> +#include <rte_security_driver.h>
> +#include <rte_security.h>
> +
> +#include "iavf.h"
> +#include "iavf_rxtx.h"
> +#include "iavf_log.h"
> +#include "iavf_generic_flow.h"
> +
> +#include "iavf_ipsec_crypto.h"
> +#include "iavf_ipsec_crypto_capabilities.h"
> +
> +/**
> + * iAVF IPsec Crypto Security Context
> + */
> +struct iavf_security_ctx {
> +     struct iavf_adapter *adapter;
> +     int pkt_md_offset;
> +     struct rte_cryptodev_capabilities *crypto_capabilities;
> +};
> +
> +/**
> + * iAVF IPsec Crypto Security Session Parameters
> + */
> +struct iavf_security_session {
> +     struct iavf_adapter *adapter;
> +
> +     enum rte_security_ipsec_sa_mode mode;
> +     enum rte_security_ipsec_tunnel_type type;
> +     enum rte_security_ipsec_sa_direction direction;
> +
> +     struct {
> +             uint32_t spi; /* Security Parameter Index */
> +             uint32_t hw_idx; /* SA Index in hardware table */
> +     } sa;
> +
> +     struct {
> +             uint8_t enabled :1;
> +             union {
> +                     uint64_t value;
> +                     struct {
> +                             uint32_t hi;
> +                             uint32_t low;
> +                     };
> +             };
> +     } esn;
> +
> +     struct {
> +             uint8_t enabled :1;
> +     } udp_encap;
> +
> +     size_t iv_sz;
> +     size_t icv_sz;
> +     size_t block_sz;
> +
> +     struct iavf_ipsec_crypto_pkt_metadata pkt_metadata_template;
> +};
> +/**
> + *  IV Length field in IPsec Tx Desc uses the following encoding:
> + *
> + *  0B - 0
> + *  4B - 1
> + *  8B - 2
> + *  16B - 3
> + *
> + * but we also need the IV Length for TSO to correctly calculate the total
> + * header length so placing it in the upper 6-bits here for easier reterival.
> + */
> +static inline uint8_t
> +calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
> +{
> +     uint8_t iv_length = IAVF_IPSEC_IV_LEN_NONE;
> +
> +     switch (iv_sz) {
> +     case 4:
> +             iv_length = IAVF_IPSEC_IV_LEN_DW;
> +             break;
> +     case 8:
> +             iv_length = IAVF_IPSEC_IV_LEN_DDW;
> +             break;
> +     case 16:
> +             iv_length = IAVF_IPSEC_IV_LEN_QDW;
> +             break;
> +     }
> +
> +     return (iv_sz << 2) | iv_length;
> +}
> +
> +static unsigned int
> +iavf_ipsec_crypto_session_size_get(void *device __rte_unused)
> +{
> +     return sizeof(struct iavf_security_session);
> +}
> +
> +static const struct rte_cryptodev_symmetric_capability *
> +get_capability(struct iavf_security_ctx *iavf_sctx,
> +     uint32_t algo, uint32_t type)
> +{
> +     const struct rte_cryptodev_capabilities *capability;
> +     int i = 0;
> +
> +     capability = &iavf_sctx->crypto_capabilities[i];
> +
> +     while (capability->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
> +             if (capability->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
> +                     capability->sym.xform_type == type &&
> +                     capability->sym.cipher.algo == algo)
> +                     return &capability->sym;
> +             /** try next capability */
> +             capability = &iavf_crypto_capabilities[i++];
> +     }
> +
> +     return NULL;
> +}
> +
> +static const struct rte_cryptodev_symmetric_capability *
> +get_auth_capability(struct iavf_security_ctx *iavf_sctx,
> +     enum rte_crypto_auth_algorithm algo)
> +{
> +     return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AUTH);
> +}
> +
> +static const struct rte_cryptodev_symmetric_capability *
> +get_cipher_capability(struct iavf_security_ctx *iavf_sctx,
> +     enum rte_crypto_cipher_algorithm algo)
> +{
> +     return get_capability(iavf_sctx, algo,
> RTE_CRYPTO_SYM_XFORM_CIPHER);
> +}
> +static const struct rte_cryptodev_symmetric_capability *
> +get_aead_capability(struct iavf_security_ctx *iavf_sctx,
> +     enum rte_crypto_aead_algorithm algo)
> +{
> +     return get_capability(iavf_sctx, algo, RTE_CRYPTO_SYM_XFORM_AEAD);
> +}
> +
> +static uint16_t
> +get_cipher_blocksize(struct iavf_security_ctx *iavf_sctx,
> +     enum rte_crypto_cipher_algorithm algo)
> +{
> +     const struct rte_cryptodev_symmetric_capability *capability;
> +
> +     capability = get_cipher_capability(iavf_sctx, algo);
> +     if (capability == NULL)
> +             return 0;
> +
> +     return capability->cipher.block_size;
> +}
> +
> +static uint16_t
> +get_aead_blocksize(struct iavf_security_ctx *iavf_sctx,
> +     enum rte_crypto_aead_algorithm algo)
> +{
> +     const struct rte_cryptodev_symmetric_capability *capability;
> +
> +     capability = get_aead_capability(iavf_sctx, algo);
> +     if (capability == NULL)
> +             return 0;
> +
> +     return capability->cipher.block_size;
> +}
> +
> +static uint16_t
> +get_auth_blocksize(struct iavf_security_ctx *iavf_sctx,
> +     enum rte_crypto_auth_algorithm algo)
> +{
> +     const struct rte_cryptodev_symmetric_capability *capability;
> +
> +     capability = get_auth_capability(iavf_sctx, algo);
> +     if (capability == NULL)
> +             return 0;
> +
> +     return capability->auth.block_size;
> +}
> +
> +static uint8_t
> +calc_context_desc_cipherblock_sz(size_t len)
> +{
> +     switch (len) {
> +     case 8:
> +             return 0x2;
> +     case 16:
> +             return 0x3;
> +     default:
> +             return 0x0;
> +     }
> +}
> +
> +static int
> +valid_length(uint32_t len, uint32_t min, uint32_t max, uint32_t increment)
> +{
> +     if (len < min || len > max)
> +             return false;
> +
> +     if (increment == 0)
> +             return true;
> +
> +     if ((len - min) % increment)
> +             return false;
> +
> +     /* make sure it fits in the key array */
> +     if (len > VIRTCHNL_IPSEC_MAX_KEY_LEN)
> +             return false;
> +
> +     return true;
> +}
> +
> +static int
> +valid_auth_xform(struct iavf_security_ctx *iavf_sctx,
> +     struct rte_crypto_auth_xform *auth)
> +{
> +     const struct rte_cryptodev_symmetric_capability *capability;
> +
> +     capability = get_auth_capability(iavf_sctx, auth->algo);
> +     if (capability == NULL)
> +             return false;
> +
> +     /* verify key size */
> +     if (!valid_length(auth->key.length,
> +             capability->auth.key_size.min,
> +             capability->auth.key_size.max,
> +             capability->aead.key_size.increment))
> +             return false;
> +
> +     return true;
> +}
> +
> +static int
> +valid_cipher_xform(struct iavf_security_ctx *iavf_sctx,
> +     struct rte_crypto_cipher_xform *cipher)
> +{
> +     const struct rte_cryptodev_symmetric_capability *capability;
> +
> +     capability = get_cipher_capability(iavf_sctx, cipher->algo);
> +     if (capability == NULL)
> +             return false;
> +
> +     /* verify key size */
> +     if (!valid_length(cipher->key.length,
> +             capability->cipher.key_size.min,
> +             capability->cipher.key_size.max,
> +             capability->cipher.key_size.increment))
> +             return false;
> +
> +     return true;
> +}
> +
> +static int
> +valid_aead_xform(struct iavf_security_ctx *iavf_sctx,
> +     struct rte_crypto_aead_xform *aead)
> +{
> +     const struct rte_cryptodev_symmetric_capability *capability;
> +
> +     capability = get_aead_capability(iavf_sctx, aead->algo);
> +     if (capability == NULL)
> +             return false;
> +
> +     /* verify key size */
> +     if (!valid_length(aead->key.length,
> +             capability->aead.key_size.min,
> +             capability->aead.key_size.max,
> +             capability->aead.key_size.increment))
> +             return false;
> +
> +     return true;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_validate_conf(struct iavf_security_ctx *iavf_sctx,
> +     struct rte_security_session_conf *conf)
> +{
> +     /** validate security action/protocol selection */
> +     if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
> +             conf->protocol != RTE_SECURITY_PROTOCOL_IPSEC) {
> +             PMD_DRV_LOG(ERR, "Invalid action / protocol specified");
> +             return -EINVAL;
> +     }
> +
> +     /** validate IPsec protocol selection */
> +     if (conf->ipsec.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
> +             PMD_DRV_LOG(ERR, "Invalid IPsec protocol specified");
> +             return -EINVAL;
> +     }
> +
> +     /** validate selected options */
> +     if (conf->ipsec.options.copy_dscp ||
> +             conf->ipsec.options.copy_flabel ||
> +             conf->ipsec.options.copy_df ||
> +             conf->ipsec.options.dec_ttl ||
> +             conf->ipsec.options.ecn ||
> +             conf->ipsec.options.stats) {
> +             PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +             return -EINVAL;
> +     }
> +
> +     /**
> +      * Validate crypto xforms parameters.
> +      *
> +      * AEAD transforms can be used for either inbound/outbound IPsec SAs,
> +      * for non-AEAD crypto transforms we explicitly only support
> CIPHER/AUTH
> +      * for outbound and AUTH/CIPHER chained transforms for inbound IPsec.
> +      */
> +     if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +             if (!valid_aead_xform(iavf_sctx, &conf->crypto_xform->aead)) {
> +                     PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +                     return -EINVAL;
> +             }
> +     } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS
> &&
> +             conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER
> &&
> +             conf->crypto_xform->next &&
> +             conf->crypto_xform->next->type ==
> RTE_CRYPTO_SYM_XFORM_AUTH) {
> +             if (!valid_cipher_xform(iavf_sctx,
> +                             &conf->crypto_xform->cipher)) {
> +                     PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +                     return -EINVAL;
> +             }
> +
> +             if (!valid_auth_xform(iavf_sctx,
> +                             &conf->crypto_xform->next->auth)) {
> +                     PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +                     return -EINVAL;
> +             }
> +     } else if (conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS
> &&
> +             conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
> +             conf->crypto_xform->next &&
> +             conf->crypto_xform->next->type ==
> RTE_CRYPTO_SYM_XFORM_CIPHER) {
> +             if (!valid_auth_xform(iavf_sctx, &conf->crypto_xform->auth)) {
> +                     PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +                     return -EINVAL;
> +             }
> +
> +             if (!valid_cipher_xform(iavf_sctx,
> +                             &conf->crypto_xform->next->cipher)) {
> +                     PMD_DRV_LOG(ERR, "Invalid IPsec option specified");
> +                     return -EINVAL;
> +             }
> +     }
> +
> +     return 0;
> +}
> +
> +static void
> +sa_add_set_aead_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +     struct rte_crypto_aead_xform *aead, uint32_t salt)
> +{
> +     cfg->crypto_type = VIRTCHNL_AEAD;
> +
> +     switch (aead->algo) {
> +     case RTE_CRYPTO_AEAD_AES_CCM:
> +             cfg->algo_type = VIRTCHNL_AES_CCM; break;
> +     case RTE_CRYPTO_AEAD_AES_GCM:
> +             cfg->algo_type = VIRTCHNL_AES_GCM; break;
> +     case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
> +             cfg->algo_type = VIRTCHNL_CHACHA20_POLY1305; break;
> +     default:
> +             PMD_DRV_LOG(ERR, "Invalid AEAD parameters");
> +             break;
> +     }
> +
> +     cfg->key_len = aead->key.length;
> +     cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt len */
> +     cfg->digest_len = aead->digest_length;
> +     cfg->salt = salt;
> +
> +     memcpy(cfg->key_data, aead->key.data, cfg->key_len);
> +}
> +
> +static void
> +sa_add_set_cipher_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +     struct rte_crypto_cipher_xform *cipher, uint32_t salt)
> +{
> +     cfg->crypto_type = VIRTCHNL_CIPHER;
> +
> +     switch (cipher->algo) {
> +     case RTE_CRYPTO_CIPHER_AES_CBC:
> +             cfg->algo_type = VIRTCHNL_AES_CBC; break;
> +     case RTE_CRYPTO_CIPHER_3DES_CBC:
> +             cfg->algo_type = VIRTCHNL_3DES_CBC; break;
> +     case RTE_CRYPTO_CIPHER_NULL:
> +             cfg->algo_type = VIRTCHNL_CIPHER_NO_ALG; break;
> +     case RTE_CRYPTO_CIPHER_AES_CTR:
> +             cfg->algo_type = VIRTCHNL_AES_CTR;
> +             cfg->salt = salt;
> +             break;
> +     default:
> +             PMD_DRV_LOG(ERR, "Invalid cipher parameters");
> +             break;
> +     }
> +
> +     cfg->key_len = cipher->key.length;
> +     cfg->iv_len = cipher->iv.length;
> +     cfg->salt = salt;
> +
> +     memcpy(cfg->key_data, cipher->key.data, cfg->key_len);
> +}
> +
> +static void
> +sa_add_set_auth_params(struct virtchnl_ipsec_crypto_cfg_item *cfg,
> +     struct rte_crypto_auth_xform *auth, uint32_t salt)
> +{
> +     cfg->crypto_type = VIRTCHNL_AUTH;
> +
> +     switch (auth->algo) {
> +     case RTE_CRYPTO_AUTH_NULL:
> +             cfg->algo_type = VIRTCHNL_HASH_NO_ALG; break;
> +     case RTE_CRYPTO_AUTH_AES_CBC_MAC:
> +             cfg->algo_type = VIRTCHNL_AES_CBC_MAC; break;
> +     case RTE_CRYPTO_AUTH_AES_CMAC:
> +             cfg->algo_type = VIRTCHNL_AES_CMAC; break;
> +     case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
> +             cfg->algo_type = VIRTCHNL_AES_XCBC_MAC; break;
> +     case RTE_CRYPTO_AUTH_MD5_HMAC:
> +             cfg->algo_type = VIRTCHNL_MD5_HMAC; break;
> +     case RTE_CRYPTO_AUTH_SHA1_HMAC:
> +             cfg->algo_type = VIRTCHNL_SHA1_HMAC; break;
> +     case RTE_CRYPTO_AUTH_SHA224_HMAC:
> +             cfg->algo_type = VIRTCHNL_SHA224_HMAC; break;
> +     case RTE_CRYPTO_AUTH_SHA256_HMAC:
> +             cfg->algo_type = VIRTCHNL_SHA256_HMAC; break;
> +     case RTE_CRYPTO_AUTH_SHA384_HMAC:
> +             cfg->algo_type = VIRTCHNL_SHA384_HMAC; break;
> +     case RTE_CRYPTO_AUTH_SHA512_HMAC:
> +             cfg->algo_type = VIRTCHNL_SHA512_HMAC; break;
> +     case RTE_CRYPTO_AUTH_AES_GMAC:
> +             cfg->algo_type = VIRTCHNL_AES_GMAC;
> +             cfg->salt = salt;
> +             break;
> +     default:
> +             PMD_DRV_LOG(ERR, "Invalid auth parameters");
> +             break;
> +     }
> +
> +     cfg->key_len = auth->key.length;
> +     /* special case for RTE_CRYPTO_AUTH_AES_GMAC */
> +     if (auth->algo == RTE_CRYPTO_AUTH_AES_GMAC)
> +             cfg->iv_len = sizeof(uint64_t); /* iv.length includes salt */
> +     else
> +             cfg->iv_len = auth->iv.length;
> +     cfg->digest_len = auth->digest_length;
> +
> +     memcpy(cfg->key_data, auth->key.data, cfg->key_len);
> +}
> +
> +/**
> + * Send SA add virtual channel request to Inline IPsec driver.
> + *
> + * Inline IPsec driver expects SPI and destination IP adderss to be in host
> + * order, but DPDK APIs are network order, therefore we need to do a htonl
> + * conversion of these parameters.
> + */
> +static uint32_t
> +iavf_ipsec_crypto_security_association_add(struct iavf_adapter *adapter,
> +     struct rte_security_session_conf *conf)
> +{
> +     struct inline_ipsec_msg *request = NULL, *response = NULL;
> +     struct virtchnl_ipsec_sa_cfg *sa_cfg;
> +     size_t request_len, response_len;
> +
> +     int rc;
> +
> +     request_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sa_cfg);
> +
> +     request = rte_malloc("iavf-sad-add-request", request_len, 0);
> +     if (request == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     response_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sa_cfg_resp);
> +     response = rte_malloc("iavf-sad-add-response", response_len, 0);
> +     if (response == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* set msg header params */
> +     request->ipsec_opcode = INLINE_IPSEC_OP_SA_CREATE;
> +     request->req_id = (uint16_t)0xDEADBEEF;
> +
> +     /* set SA configuration params */
> +     sa_cfg = (struct virtchnl_ipsec_sa_cfg *)(request + 1);
> +
> +     sa_cfg->spi = conf->ipsec.spi;
> +     sa_cfg->virtchnl_protocol_type = VIRTCHNL_PROTO_ESP;
> +     sa_cfg->virtchnl_direction =
> +             conf->ipsec.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
> +                     VIRTCHNL_DIR_INGRESS : VIRTCHNL_DIR_EGRESS;
> +
> +     if (conf->ipsec.options.esn) {
> +             sa_cfg->esn_enabled = 1;
> +             sa_cfg->esn_hi = conf->ipsec.esn.hi;
> +             sa_cfg->esn_low = conf->ipsec.esn.low;
> +     }
> +
> +     if (conf->ipsec.options.udp_encap)
> +             sa_cfg->udp_encap_enabled = 1;
> +
> +     /* Set outer IP params */
> +     if (conf->ipsec.tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
> +             sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV4;
> +
> +             *((uint32_t *)sa_cfg->dst_addr) =
> +                     htonl(conf->ipsec.tunnel.ipv4.dst_ip.s_addr);
> +     } else {
> +             uint32_t *v6_dst_addr =
> +                     conf->ipsec.tunnel.ipv6.dst_addr.s6_addr32;
> +
> +             sa_cfg->virtchnl_ip_type = VIRTCHNL_IPV6;
> +
> +             ((uint32_t *)sa_cfg->dst_addr)[0] = htonl(v6_dst_addr[0]);
> +             ((uint32_t *)sa_cfg->dst_addr)[1] = htonl(v6_dst_addr[1]);
> +             ((uint32_t *)sa_cfg->dst_addr)[2] = htonl(v6_dst_addr[2]);
> +             ((uint32_t *)sa_cfg->dst_addr)[3] = htonl(v6_dst_addr[3]);
> +     }
> +
> +     /* set crypto params */
> +     if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +             sa_add_set_aead_params(&sa_cfg->crypto_cfg.items[0],
> +                     &conf->crypto_xform->aead, conf->ipsec.salt);
> +
> +     } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
> {
> +             sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[0],
> +                     &conf->crypto_xform->cipher, conf->ipsec.salt);
> +             sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[1],
> +                     &conf->crypto_xform->next->auth, conf->ipsec.salt);
> +
> +     } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +             sa_add_set_auth_params(&sa_cfg->crypto_cfg.items[0],
> +                     &conf->crypto_xform->auth, conf->ipsec.salt);
> +             if (conf->crypto_xform->auth.algo !=
> RTE_CRYPTO_AUTH_AES_GMAC)
> +                     sa_add_set_cipher_params(&sa_cfg->crypto_cfg.items[1],
> +                     &conf->crypto_xform->next->cipher, conf->ipsec.salt);
> +     }
> +
> +     /* send virtual channel request to add SA to hardware database */
> +     rc = iavf_ipsec_crypto_request(adapter,
> +                     (uint8_t *)request, request_len,
> +                     (uint8_t *)response, response_len);
> +     if (rc)
> +             goto update_cleanup;
> +
> +     /* verify response id */
> +     if (response->ipsec_opcode != request->ipsec_opcode ||
> +             response->req_id != request->req_id)
> +             rc = -EFAULT;
> +     else
> +             rc = response->ipsec_data.sa_cfg_resp->sa_handle;
> +update_cleanup:
> +     rte_free(response);
> +     rte_free(request);
> +
> +     return rc;
> +}
> +
> +static void
> +set_pkt_metadata_template(struct iavf_ipsec_crypto_pkt_metadata
> *template,
> +     struct iavf_security_session *sess)
> +{
> +     template->sa_idx = sess->sa.hw_idx;
> +
> +     if (sess->udp_encap.enabled)
> +             template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT;
> +
> +     if (sess->esn.enabled)
> +             template->ol_flags = IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN;
> +
> +     template->len_iv = calc_ipsec_desc_iv_len_field(sess->iv_sz);
> +     template->ctx_desc_ipsec_params =
> +                     calc_context_desc_cipherblock_sz(sess->block_sz) |
> +                     ((uint8_t)(sess->icv_sz >> 2) << 3);
> +}
> +
> +static void
> +set_session_parameter(struct iavf_security_ctx *iavf_sctx,
> +     struct iavf_security_session *sess,
> +     struct rte_security_session_conf *conf, uint32_t sa_idx)
> +{
> +     sess->adapter = iavf_sctx->adapter;
> +
> +     sess->mode = conf->ipsec.mode;
> +     sess->direction = conf->ipsec.direction;
> +
> +     if (sess->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
> +             sess->type = conf->ipsec.tunnel.type;
> +
> +     sess->sa.spi = conf->ipsec.spi;
> +     sess->sa.hw_idx = sa_idx;
> +
> +     if (conf->ipsec.options.esn) {
> +             sess->esn.enabled = 1;
> +             sess->esn.value = conf->ipsec.esn.value;
> +     }
> +
> +     if (conf->ipsec.options.udp_encap)
> +             sess->udp_encap.enabled = 1;
> +
> +     if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
> +             sess->block_sz = get_aead_blocksize(iavf_sctx,
> +                     conf->crypto_xform->aead.algo);
> +             sess->iv_sz = sizeof(uint64_t); /* iv.length includes salt */
> +             sess->icv_sz = conf->crypto_xform->aead.digest_length;
> +     } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
> {
> +             sess->block_sz = get_cipher_blocksize(iavf_sctx,
> +                     conf->crypto_xform->cipher.algo);
> +             sess->iv_sz = conf->crypto_xform->cipher.iv.length;
> +             sess->icv_sz = conf->crypto_xform->next->auth.digest_length;
> +     } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
> +             if (conf->crypto_xform->auth.algo ==
> RTE_CRYPTO_AUTH_AES_GMAC) {
> +                     sess->block_sz = get_auth_blocksize(iavf_sctx,
> +                             RTE_CRYPTO_SYM_XFORM_AUTH);

There is a warning due to implicit conversion from 'enum 
rte_crypto_sym_xform_type' to 'enum rte_crypto_auth_algorithm
Replace above line with (enum 
rte_crypto_auth_algorithm)RTE_CRYPTO_SYM_XFORM_AUTH); during merge.


> +                     sess->iv_sz = conf->crypto_xform->auth.iv.length;
> +                     sess->icv_sz = conf->crypto_xform->auth.digest_length;
> +             } else {
> +                     sess->block_sz = get_cipher_blocksize(iavf_sctx,
> +                             conf->crypto_xform->next->cipher.algo);
> +                     sess->iv_sz =
> +                             conf->crypto_xform->next->cipher.iv.length;
> +                     sess->icv_sz = conf->crypto_xform->auth.digest_length;
> +             }
> +     }
> +
> +     set_pkt_metadata_template(&sess->pkt_metadata_template, sess);
> +}
> +
> +/**
> + * Create IPsec Security Association for inline IPsec Crypto offload.
> + *
> + * 1. validate session configuration parameters
> + * 2. allocate session memory from mempool
> + * 3. add SA to hardware database
> + * 4. set session parameters
> + * 5. create packet metadata template for datapath
> + */
> +static int
> +iavf_ipsec_crypto_session_create(void *device,
> +                              struct rte_security_session_conf *conf,
> +                              struct rte_security_session *session,
> +                              struct rte_mempool *mempool)
> +{
> +     struct rte_eth_dev *ethdev = device;
> +     struct iavf_adapter *adapter =
> +             IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
> +     struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +     struct iavf_security_session *iavf_session = NULL;
> +     int sa_idx;
> +     int ret = 0;
> +
> +     /* validate that all SA parameters are valid for device */
> +     ret = iavf_ipsec_crypto_session_validate_conf(iavf_sctx, conf);
> +     if (ret)
> +             return ret;
> +
> +     /* allocate session context */
> +     if (rte_mempool_get(mempool, (void **)&iavf_session)) {
> +             PMD_DRV_LOG(ERR, "Cannot get object from sess mempool");
> +             return -ENOMEM;
> +     }
> +
> +     /* add SA to hardware database */
> +     sa_idx = iavf_ipsec_crypto_security_association_add(adapter, conf);
> +     if (sa_idx < 0) {
> +             PMD_DRV_LOG(ERR,
> +                     "Failed to add SA (spi: %d, mode: %s, direction: %s)",
> +                     conf->ipsec.spi,
> +                     conf->ipsec.mode ==
> +                             RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT ?
> +                             "transport" : "tunnel",
> +                     conf->ipsec.direction ==
> +                             RTE_SECURITY_IPSEC_SA_DIR_INGRESS ?
> +                             "inbound" : "outbound");
> +
> +             rte_mempool_put(mempool, iavf_session);
> +             return -EFAULT;
> +     }
> +
> +     /* save data plane required session parameters */
> +     set_session_parameter(iavf_sctx, iavf_session, conf, sa_idx);
> +
> +     /* save to security session private data */
> +     set_sec_session_private_data(session, iavf_session);
> +
> +     return 0;
> +}
> +
> +/**
> + * Check if valid ipsec crypto action.
> + * SPI must be non-zero and SPI in session must match SPI value
> + * passed into function.
> + *
> + * returns: 0 if invalid session or SPI value equal zero
> + * returns: 1 if valid
> + */
> +uint32_t
> +iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
> +     const struct rte_security_session *session, uint32_t spi)
> +{
> +     struct iavf_adapter *adapter =
> +             IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
> +     struct iavf_security_session *sess = session->sess_private_data;
> +
> +     /* verify we have a valid session and that it belong to this adapter */
> +     if (unlikely(sess == NULL || sess->adapter != adapter))
> +             return false;
> +
> +     /* SPI value must be non-zero */
> +     if (spi == 0)
> +             return false;
> +     /* Session SPI must patch flow SPI*/
> +     else if (sess->sa.spi == spi) {
> +             return true;
> +             /**
> +              * TODO: We should add a way of tracking valid hw SA indices to
> +              * make validation less brittle
> +              */
> +     }
> +
> +             return true;
> +}
> +
> +/**
> + * Send virtual channel security policy add request to IES driver.
> + *
> + * IES driver expects SPI and destination IP adderss to be in host
> + * order, but DPDK APIs are network order, therefore we need to do a htonl
> + * conversion of these parameters.
> + */
> +int
> +iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
> +     uint32_t esp_spi,
> +     uint8_t is_v4,
> +     rte_be32_t v4_dst_addr,
> +     uint8_t *v6_dst_addr,
> +     uint8_t drop)
> +{
> +     struct inline_ipsec_msg *request = NULL, *response = NULL;
> +     size_t request_len, response_len;
> +     int rc = 0;
> +
> +     request_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sp_cfg);
> +     request = rte_malloc("iavf-inbound-security-policy-add-request",
> +                             request_len, 0);
> +     if (request == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* set msg header params */
> +     request->ipsec_opcode = INLINE_IPSEC_OP_SP_CREATE;
> +     request->req_id = (uint16_t)0xDEADBEEF;
> +
> +     /* ESP SPI */
> +     request->ipsec_data.sp_cfg->spi = htonl(esp_spi);
> +
> +     /* Destination IP  */
> +     if (is_v4) {
> +             request->ipsec_data.sp_cfg->table_id =
> +                             VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4;
> +             request->ipsec_data.sp_cfg->dip[0] = htonl(v4_dst_addr);
> +     } else {
> +             request->ipsec_data.sp_cfg->table_id =
> +                             VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
> +             request->ipsec_data.sp_cfg->dip[0] =
> +                             htonl(((uint32_t *)v6_dst_addr)[0]);
> +             request->ipsec_data.sp_cfg->dip[1] =
> +                             htonl(((uint32_t *)v6_dst_addr)[1]);
> +             request->ipsec_data.sp_cfg->dip[2] =
> +                             htonl(((uint32_t *)v6_dst_addr)[2]);
> +             request->ipsec_data.sp_cfg->dip[3] =
> +                             htonl(((uint32_t *)v6_dst_addr)[3]);
> +     }
> +
> +     request->ipsec_data.sp_cfg->drop = drop;
> +
> +     /** Traffic Class/Congestion Domain currently not support */
> +     request->ipsec_data.sp_cfg->set_tc = 0;
> +     request->ipsec_data.sp_cfg->cgd = 0;
> +
> +     response_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sp_cfg_resp);
> +     response = rte_malloc("iavf-inbound-security-policy-add-response",
> +                             response_len, 0);
> +     if (response == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* send virtual channel request to add SA to hardware database */
> +     rc = iavf_ipsec_crypto_request(adapter,
> +                     (uint8_t *)request, request_len,
> +                     (uint8_t *)response, response_len);
> +     if (rc)
> +             goto update_cleanup;
> +
> +     /* verify response */
> +     if (response->ipsec_opcode != request->ipsec_opcode ||
> +             response->req_id != request->req_id)
> +             rc = -EFAULT;
> +     else
> +             rc = response->ipsec_data.sp_cfg_resp->rule_id;
> +
> +update_cleanup:
> +     rte_free(request);
> +     rte_free(response);
> +
> +     return rc;
> +}
> +
> +static uint32_t
> +iavf_ipsec_crypto_sa_update_esn(struct iavf_adapter *adapter,
> +     struct iavf_security_session *sess)
> +{
> +     struct inline_ipsec_msg *request = NULL, *response = NULL;
> +     size_t request_len, response_len;
> +     int rc = 0;
> +
> +     request_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sa_update);
> +     request = rte_malloc("iavf-sa-update-request", request_len, 0);
> +     if (request == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     response_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_resp);
> +     response = rte_malloc("iavf-sa-update-response", response_len, 0);
> +     if (response == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* set msg header params */
> +     request->ipsec_opcode = INLINE_IPSEC_OP_SA_UPDATE;
> +     request->req_id = (uint16_t)0xDEADBEEF;
> +
> +     /* set request params */
> +     request->ipsec_data.sa_update->sa_index = sess->sa.hw_idx;
> +     request->ipsec_data.sa_update->esn_hi = sess->esn.hi;
> +
> +     /* send virtual channel request to add SA to hardware database */
> +     rc = iavf_ipsec_crypto_request(adapter,
> +                     (uint8_t *)request, request_len,
> +                     (uint8_t *)response, response_len);
> +     if (rc)
> +             goto update_cleanup;
> +
> +     /* verify response */
> +     if (response->ipsec_opcode != request->ipsec_opcode ||
> +             response->req_id != request->req_id)
> +             rc = -EFAULT;
> +     else
> +             rc = response->ipsec_data.ipsec_resp->resp;
> +
> +update_cleanup:
> +     rte_free(request);
> +     rte_free(response);
> +
> +     return rc;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_update(void *device,
> +             struct rte_security_session *session,
> +             struct rte_security_session_conf *conf)
> +{
> +     struct iavf_adapter *adapter = NULL;
> +     struct iavf_security_session *iavf_sess = NULL;
> +     struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +     int rc = 0;
> +
> +     adapter =
> IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
> +     iavf_sess = (struct iavf_security_session *)session->sess_private_data;
> +
> +     /* verify we have a valid session and that it belong to this adapter */
> +     if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
> +             return -EINVAL;
> +
> +     /* update esn hi 32-bits */
> +     if (iavf_sess->esn.enabled && conf->ipsec.options.esn) {
> +             /**
> +              * Update ESN in hardware for inbound SA. Store in
> +              * iavf_security_session for outbound SA for use
> +              * in *iavf_ipsec_crypto_pkt_metadata_set* function.
> +              */
> +             if (iavf_sess->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
> +                     rc = iavf_ipsec_crypto_sa_update_esn(adapter,
> +                                     iavf_sess);
> +             else
> +                     iavf_sess->esn.hi = conf->ipsec.esn.hi;
> +     }
> +
> +     return rc;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_stats_get(void *device __rte_unused,
> +             struct rte_security_session *session __rte_unused,
> +             struct rte_security_stats *stats __rte_unused)
> +{
> +     return -EOPNOTSUPP;
> +}
> +
> +int
> +iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
> +     uint8_t is_v4, uint32_t flow_id)
> +{
> +     struct inline_ipsec_msg *request = NULL, *response = NULL;
> +     size_t request_len, response_len;
> +     int rc = 0;
> +
> +     request_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sp_destroy);
> +     request = rte_malloc("iavf-sp-del-request", request_len, 0);
> +     if (request == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     response_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_resp);
> +     response = rte_malloc("iavf-sp-del-response", response_len, 0);
> +     if (response == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* set msg header params */
> +     request->ipsec_opcode = INLINE_IPSEC_OP_SP_DESTROY;
> +     request->req_id = (uint16_t)0xDEADBEEF;
> +
> +     /* set security policy params */
> +     request->ipsec_data.sp_destroy->table_id = is_v4 ?
> +                     VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV4 :
> +                     VIRTCHNL_IPSEC_INBOUND_SPD_TBL_IPV6;
> +     request->ipsec_data.sp_destroy->rule_id = flow_id;
> +
> +     /* send virtual channel request to add SA to hardware database */
> +     rc = iavf_ipsec_crypto_request(adapter,
> +                     (uint8_t *)request, request_len,
> +                     (uint8_t *)response, response_len);
> +     if (rc)
> +             goto update_cleanup;
> +
> +     /* verify response */
> +     if (response->ipsec_opcode != request->ipsec_opcode ||
> +             response->req_id != request->req_id)
> +             rc = -EFAULT;
> +     else
> +             return response->ipsec_data.ipsec_status->status;
> +
> +update_cleanup:
> +     rte_free(request);
> +     rte_free(response);
> +
> +     return rc;
> +}
> +
> +static uint32_t
> +iavf_ipsec_crypto_sa_del(struct iavf_adapter *adapter,
> +     struct iavf_security_session *sess)
> +{
> +     struct inline_ipsec_msg *request = NULL, *response = NULL;
> +     size_t request_len, response_len;
> +
> +     int rc = 0;
> +
> +     request_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_sa_destroy);
> +
> +     request = rte_malloc("iavf-sa-del-request", request_len, 0);
> +     if (request == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     response_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_resp);
> +
> +     response = rte_malloc("iavf-sa-del-response", response_len, 0);
> +     if (response == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* set msg header params */
> +     request->ipsec_opcode = INLINE_IPSEC_OP_SA_DESTROY;
> +     request->req_id = (uint16_t)0xDEADBEEF;
> +
> +     /**
> +      * SA delete supports deletetion of 1-8 specified SA's or if the flag
> +      * field is zero, all SA's associated with VF will be deleted.
> +      */
> +     if (sess) {
> +             request->ipsec_data.sa_destroy->flag = 0x1;
> +             request->ipsec_data.sa_destroy->sa_index[0] = sess->sa.hw_idx;
> +     } else {
> +             request->ipsec_data.sa_destroy->flag = 0x0;
> +     }
> +
> +     /* send virtual channel request to add SA to hardware database */
> +     rc = iavf_ipsec_crypto_request(adapter,
> +                     (uint8_t *)request, request_len,
> +                     (uint8_t *)response, response_len);
> +     if (rc)
> +             goto update_cleanup;
> +
> +     /* verify response */
> +     if (response->ipsec_opcode != request->ipsec_opcode ||
> +             response->req_id != request->req_id)
> +             rc = -EFAULT;
> +
> +     /**
> +      * Delete status will be the same bitmask as sa_destroy request flag if
> +      * deletes successful
> +      */
> +     if (request->ipsec_data.sa_destroy->flag !=
> +                     response->ipsec_data.ipsec_status->status)
> +             rc = -EFAULT;
> +
> +update_cleanup:
> +     rte_free(response);
> +     rte_free(request);
> +
> +     return rc;
> +}
> +
> +static int
> +iavf_ipsec_crypto_session_destroy(void *device,
> +             struct rte_security_session *session)
> +{
> +     struct iavf_adapter *adapter = NULL;
> +     struct iavf_security_session *iavf_sess = NULL;
> +     struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +     int ret;
> +
> +     adapter =
> IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
> +     iavf_sess = (struct iavf_security_session *)session->sess_private_data;
> +
> +     /* verify we have a valid session and that it belong to this adapter */
> +     if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
> +             return -EINVAL;
> +
> +     ret = iavf_ipsec_crypto_sa_del(adapter, iavf_sess);
> +     rte_mempool_put(rte_mempool_from_obj(iavf_sess), (void *)iavf_sess);
> +     return ret;
> +}
> +
> +/**
> + * Get ESP trailer from packet as well as calculate the total ESP trailer
> + * length, which include padding, ESP trailer footer and the ICV
> + */
> +static inline struct rte_esp_tail *
> +iavf_ipsec_crypto_get_esp_trailer(struct rte_mbuf *m,
> +     struct iavf_security_session *s, uint16_t *esp_trailer_length)
> +{
> +     struct rte_esp_tail *esp_trailer;
> +
> +     uint16_t length = sizeof(struct rte_esp_tail) + s->icv_sz;
> +     uint16_t offset = 0;
> +
> +     /**
> +      * The ICV will not be present in TSO packets as this is appended by
> +      * hardware during segment generation
> +      */
> +     if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG))
> +             length -=  s->icv_sz;
> +
> +     *esp_trailer_length = length;
> +
> +     /**
> +      * Calculate offset in packet to ESP trailer header, this should be
> +      * total packet length less the size of the ESP trailer plus the ICV
> +      * length if it is present
> +      */
> +     offset = rte_pktmbuf_pkt_len(m) - length;
> +
> +     if (m->nb_segs > 1) {
> +             /* find segment which esp trailer is located */
> +             while (m->data_len < offset) {
> +                     offset -= m->data_len;
> +                     m = m->next;
> +             }
> +     }
> +
> +     esp_trailer = rte_pktmbuf_mtod_offset(m, struct rte_esp_tail *, offset);
> +
> +     *esp_trailer_length += esp_trailer->pad_len;
> +
> +     return esp_trailer;
> +}
> +
> +static inline uint16_t
> +iavf_ipsec_crypto_compute_l4_payload_length(struct rte_mbuf *m,
> +     struct iavf_security_session *s, uint16_t esp_tlen)
> +{
> +     uint16_t ol2_len = m->l2_len;   /* MAC + VLAN */
> +     uint16_t ol3_len = 0;           /* ipv4/6 + ext hdrs */
> +     uint16_t ol4_len = 0;           /* UDP NATT */
> +     uint16_t l3_len = 0;            /* IPv4/6 + ext hdrs */
> +     uint16_t l4_len = 0;            /* TCP/UDP/STCP hdrs */
> +     uint16_t esp_hlen = sizeof(struct rte_esp_hdr) + s->iv_sz;
> +
> +     if (s->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
> +             ol3_len = m->outer_l3_len;
> +             /**<
> +              * application provided l3len assumed to include length of
> +              * ipv4/6 hdr + ext hdrs
> +              */
> +
> +     if (s->udp_encap.enabled)
> +             ol4_len = sizeof(struct rte_udp_hdr);
> +
> +     l3_len = m->l3_len;
> +     l4_len = m->l4_len;
> +
> +     return rte_pktmbuf_pkt_len(m) - (ol2_len + ol3_len + ol4_len +
> +                     esp_hlen + l3_len + l4_len + esp_tlen);
> +}
> +
> +static int
> +iavf_ipsec_crypto_pkt_metadata_set(void *device,
> +                      struct rte_security_session *session,
> +                      struct rte_mbuf *m, void *params)
> +{
> +     struct rte_eth_dev *ethdev = device;
> +     struct iavf_adapter *adapter =
> +                     IAVF_DEV_PRIVATE_TO_ADAPTER(ethdev->data->dev_private);
> +     struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +     struct iavf_security_session *iavf_sess = session->sess_private_data;
> +     struct iavf_ipsec_crypto_pkt_metadata *md;
> +     struct rte_esp_tail *esp_tail;
> +     uint64_t *sqn = params;
> +     uint16_t esp_trailer_length;
> +
> +     /* Check we have valid session and is associated with this device */
> +     if (unlikely(iavf_sess == NULL || iavf_sess->adapter != adapter))
> +             return -EINVAL;
> +
> +     /* Get dynamic metadata location from mbuf */
> +     md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
> +             struct iavf_ipsec_crypto_pkt_metadata *);
> +
> +     /* Set immutatable metadata values from session template */
> +     memcpy(md, &iavf_sess->pkt_metadata_template,
> +             sizeof(struct iavf_ipsec_crypto_pkt_metadata));
> +
> +     esp_tail = iavf_ipsec_crypto_get_esp_trailer(m, iavf_sess,
> +                     &esp_trailer_length);
> +
> +     /* Set per packet mutable metadata values */
> +     md->esp_trailer_len = esp_trailer_length;
> +     md->l4_payload_len = iavf_ipsec_crypto_compute_l4_payload_length(m,
> +                             iavf_sess, esp_trailer_length);
> +     md->next_proto = esp_tail->next_proto;
> +
> +     /* If Extended SN in use set the upper 32-bits in metadata */
> +     if (iavf_sess->esn.enabled && sqn != NULL)
> +             md->esn = (uint32_t)(*sqn >> 32);
> +
> +     return 0;
> +}
> +
> +static int
> +iavf_ipsec_crypto_device_capabilities_get(struct iavf_adapter *adapter,
> +             struct virtchnl_ipsec_cap *capability)
> +{
> +     /* Perform pf-vf comms */
> +     struct inline_ipsec_msg *request = NULL, *response = NULL;
> +     size_t request_len, response_len;
> +     int rc;
> +
> +     request_len = sizeof(struct inline_ipsec_msg);
> +
> +     request = rte_malloc("iavf-device-capability-request", request_len, 0);
> +     if (request == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     response_len = sizeof(struct inline_ipsec_msg) +
> +                     sizeof(struct virtchnl_ipsec_cap);
> +     response = rte_malloc("iavf-device-capability-response",
> +                     response_len, 0);
> +     if (response == NULL) {
> +             rc = -ENOMEM;
> +             goto update_cleanup;
> +     }
> +
> +     /* set msg header params */
> +     request->ipsec_opcode = INLINE_IPSEC_OP_GET_CAP;
> +     request->req_id = (uint16_t)0xDEADBEEF;
> +
> +     /* send virtual channel request to add SA to hardware database */
> +     rc = iavf_ipsec_crypto_request(adapter,
> +                     (uint8_t *)request, request_len,
> +                     (uint8_t *)response, response_len);
> +     if (rc)
> +             goto update_cleanup;
> +
> +     /* verify response id */
> +     if (response->ipsec_opcode != request->ipsec_opcode ||
> +             response->req_id != request->req_id){
> +             rc = -EFAULT;
> +             goto update_cleanup;
> +     }
> +     memcpy(capability, response->ipsec_data.ipsec_cap, sizeof(*capability));
> +
> +update_cleanup:
> +     rte_free(response);
> +     rte_free(request);
> +
> +     return rc;
> +}
> +
> +enum rte_crypto_auth_algorithm auth_maptbl[] = {
> +     /* Hash Algorithm */
> +     [VIRTCHNL_HASH_NO_ALG] = RTE_CRYPTO_AUTH_NULL,
> +     [VIRTCHNL_AES_CBC_MAC] = RTE_CRYPTO_AUTH_AES_CBC_MAC,
> +     [VIRTCHNL_AES_CMAC] = RTE_CRYPTO_AUTH_AES_CMAC,
> +     [VIRTCHNL_AES_GMAC] = RTE_CRYPTO_AUTH_AES_GMAC,
> +     [VIRTCHNL_AES_XCBC_MAC] = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
> +     [VIRTCHNL_MD5_HMAC] = RTE_CRYPTO_AUTH_MD5_HMAC,
> +     [VIRTCHNL_SHA1_HMAC] = RTE_CRYPTO_AUTH_SHA1_HMAC,
> +     [VIRTCHNL_SHA224_HMAC] = RTE_CRYPTO_AUTH_SHA224_HMAC,
> +     [VIRTCHNL_SHA256_HMAC] = RTE_CRYPTO_AUTH_SHA256_HMAC,
> +     [VIRTCHNL_SHA384_HMAC] = RTE_CRYPTO_AUTH_SHA384_HMAC,
> +     [VIRTCHNL_SHA512_HMAC] = RTE_CRYPTO_AUTH_SHA512_HMAC,
> +     [VIRTCHNL_SHA3_224_HMAC] = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
> +     [VIRTCHNL_SHA3_256_HMAC] = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
> +     [VIRTCHNL_SHA3_384_HMAC] = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
> +     [VIRTCHNL_SHA3_512_HMAC] = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
> +};
> +
> +static void
> +update_auth_capabilities(struct rte_cryptodev_capabilities *scap,
> +             struct virtchnl_algo_cap *acap)
> +{
> +     struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
> +
> +     scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +
> +     capability->xform_type = RTE_CRYPTO_SYM_XFORM_AUTH;
> +
> +     capability->auth.algo = auth_maptbl[acap->algo_type];
> +     capability->auth.block_size = acap->block_size;
> +
> +     capability->auth.key_size.min = acap->min_key_size;
> +     capability->auth.key_size.max = acap->max_key_size;
> +     capability->auth.key_size.increment = acap->inc_key_size;
> +
> +     capability->auth.digest_size.min = acap->min_digest_size;
> +     capability->auth.digest_size.max = acap->max_digest_size;
> +     capability->auth.digest_size.increment = acap->inc_digest_size;
> +}
> +
> +enum rte_crypto_cipher_algorithm cipher_maptbl[] = {
> +     /* Cipher Algorithm */
> +     [VIRTCHNL_CIPHER_NO_ALG] = RTE_CRYPTO_CIPHER_NULL,
> +     [VIRTCHNL_3DES_CBC] = RTE_CRYPTO_CIPHER_3DES_CBC,
> +     [VIRTCHNL_AES_CBC] = RTE_CRYPTO_CIPHER_AES_CBC,
> +     [VIRTCHNL_AES_CTR] = RTE_CRYPTO_CIPHER_AES_CTR,
> +};
> +
> +static void
> +update_cipher_capabilities(struct rte_cryptodev_capabilities *scap,
> +     struct virtchnl_algo_cap *acap)
> +{
> +     struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
> +
> +     scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +
> +     capability->xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER;
> +
> +     capability->cipher.algo = cipher_maptbl[acap->algo_type];
> +
> +     capability->cipher.block_size = acap->block_size;
> +
> +     capability->cipher.key_size.min = acap->min_key_size;
> +     capability->cipher.key_size.max = acap->max_key_size;
> +     capability->cipher.key_size.increment = acap->inc_key_size;
> +
> +     capability->cipher.iv_size.min = acap->min_iv_size;
> +     capability->cipher.iv_size.max = acap->max_iv_size;
> +     capability->cipher.iv_size.increment = acap->inc_iv_size;
> +}
> +
> +enum rte_crypto_aead_algorithm aead_maptbl[] = {
> +     /* AEAD Algorithm */
> +     [VIRTCHNL_AES_CCM] = RTE_CRYPTO_AEAD_AES_CCM,
> +     [VIRTCHNL_AES_GCM] = RTE_CRYPTO_AEAD_AES_GCM,
> +     [VIRTCHNL_CHACHA20_POLY1305] =
> RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +};
> +
> +static void
> +update_aead_capabilities(struct rte_cryptodev_capabilities *scap,
> +     struct virtchnl_algo_cap *acap)
> +{
> +     struct rte_cryptodev_symmetric_capability *capability = &scap->sym;
> +
> +     scap->op = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
> +
> +     capability->xform_type = RTE_CRYPTO_SYM_XFORM_AEAD;
> +
> +     capability->aead.algo = aead_maptbl[acap->algo_type];
> +
> +     capability->aead.block_size = acap->block_size;
> +
> +     capability->aead.key_size.min = acap->min_key_size;
> +     capability->aead.key_size.max = acap->max_key_size;
> +     capability->aead.key_size.increment = acap->inc_key_size;
> +
> +     capability->aead.aad_size.min = acap->min_aad_size;
> +     capability->aead.aad_size.max = acap->max_aad_size;
> +     capability->aead.aad_size.increment = acap->inc_aad_size;
> +
> +     capability->aead.iv_size.min = acap->min_iv_size;
> +     capability->aead.iv_size.max = acap->max_iv_size;
> +     capability->aead.iv_size.increment = acap->inc_iv_size;
> +
> +     capability->aead.digest_size.min = acap->min_digest_size;
> +     capability->aead.digest_size.max = acap->max_digest_size;
> +     capability->aead.digest_size.increment = acap->inc_digest_size;
> +}
> +
> +/**
> + * Dynamically set crypto capabilities based on virtchannel IPsec
> + * capabilities structure.
> + */
> +int
> +iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
> +             *iavf_sctx, struct virtchnl_ipsec_cap *vch_cap)
> +{
> +     struct rte_cryptodev_capabilities *capabilities;
> +     int i, j, number_of_capabilities = 0, ci = 0;
> +
> +     /* Count the total number of crypto algorithms supported */
> +     for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++)
> +             number_of_capabilities += vch_cap->cap[i].algo_cap_num;
> +
> +     /**
> +      * Allocate cryptodev capabilities structure for
> +      * *number_of_capabilities* items plus one item to null terminate the
> +      * array
> +      */
> +     capabilities = rte_zmalloc("crypto_cap",
> +             sizeof(struct rte_cryptodev_capabilities) *
> +             (number_of_capabilities + 1), 0);
> +     capabilities[number_of_capabilities].op =
> RTE_CRYPTO_OP_TYPE_UNDEFINED;
> +
> +     /**
> +      * Iterate over each virtchl crypto capability by crypto type and
> +      * algorithm.
> +      */
> +     for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
> +             for (j = 0; j < vch_cap->cap[i].algo_cap_num; j++, ci++) {
> +                     switch (vch_cap->cap[i].crypto_type) {
> +                     case VIRTCHNL_AUTH:
> +                             update_auth_capabilities(&capabilities[ci],
> +                                     &vch_cap->cap[i].algo_cap_list[j]);
> +                             break;
> +                     case VIRTCHNL_CIPHER:
> +                             update_cipher_capabilities(&capabilities[ci],
> +                                     &vch_cap->cap[i].algo_cap_list[j]);
> +                             break;
> +                     case VIRTCHNL_AEAD:
> +                             update_aead_capabilities(&capabilities[ci],
> +                                     &vch_cap->cap[i].algo_cap_list[j]);
> +                             break;
> +                     default:
> +                             capabilities[ci].op =
> +                                             RTE_CRYPTO_OP_TYPE_UNDEFINED;
> +                             break;
> +                     }
> +             }
> +     }
> +
> +     iavf_sctx->crypto_capabilities = capabilities;
> +     return 0;
> +}
> +
> +/**
> + * Get security capabilities for device
> + */
> +static const struct rte_security_capability *
> +iavf_ipsec_crypto_capabilities_get(void *device)
> +{
> +     struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)device;
> +     struct iavf_adapter *adapter =
> +             IAVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
> +     struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +     unsigned int i;
> +
> +     static struct rte_security_capability iavf_security_capabilities[] = {
> +             { /* IPsec Inline Crypto ESP Tunnel Egress */
> +                     .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +                     .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +                     .ipsec = {
> +                             .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +                             .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +                             .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +                             .options = { .udp_encap = 1,
> +                                             .stats = 1, .esn = 1 },
> +                     },
> +                     .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +             },
> +             { /* IPsec Inline Crypto ESP Tunnel Ingress */
> +                     .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +                     .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +                     .ipsec = {
> +                             .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +                             .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
> +                             .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +                             .options = { .udp_encap = 1,
> +                                             .stats = 1, .esn = 1 },
> +                     },
> +                     .ol_flags = 0
> +             },
> +             { /* IPsec Inline Crypto ESP Transport Egress */
> +                     .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +                     .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +                     .ipsec = {
> +                             .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +                             .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +                             .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
> +                             .options = { .udp_encap = 1, .stats = 1,
> +                                             .esn = 1 },
> +                     },
> +                     .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
> +             },
> +             { /* IPsec Inline Crypto ESP Transport Ingress */
> +                     .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
> +                     .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
> +                     .ipsec = {
> +                             .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
> +                             .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
> +                             .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
> +                             .options = { .udp_encap = 1, .stats = 1,
> +                                             .esn = 1 }
> +                     },
> +                     .ol_flags = 0
> +             },
> +             {
> +                     .action = RTE_SECURITY_ACTION_TYPE_NONE
> +             }
> +     };
> +
> +     /**
> +      * Update the security capabilities struct with the runtime discovered
> +      * crypto capabilities, except for last element of the array which is
> +      * the null terminatation
> +      */
> +     for (i = 0; i < ((sizeof(iavf_security_capabilities) /
> +                     sizeof(iavf_security_capabilities[0])) - 1); i++) {
> +             iavf_security_capabilities[i].crypto_capabilities =
> +                     iavf_sctx->crypto_capabilities;
> +     }
> +
> +     return iavf_security_capabilities;
> +}
> +
> +static struct rte_security_ops iavf_ipsec_crypto_ops = {
> +     .session_get_size               = iavf_ipsec_crypto_session_size_get,
> +     .session_create                 = iavf_ipsec_crypto_session_create,
> +     .session_update                 = iavf_ipsec_crypto_session_update,
> +     .session_stats_get              = iavf_ipsec_crypto_session_stats_get,
> +     .session_destroy                = iavf_ipsec_crypto_session_destroy,
> +     .set_pkt_metadata               = iavf_ipsec_crypto_pkt_metadata_set,
> +     .get_userdata                   = NULL,
> +     .capabilities_get               = iavf_ipsec_crypto_capabilities_get,
> +};
> +
> +int
> +iavf_security_ctx_create(struct iavf_adapter *adapter)
> +{
> +     struct rte_security_ctx *sctx;
> +
> +     sctx = rte_malloc("security_ctx", sizeof(struct rte_security_ctx), 0);
> +     if (sctx == NULL)
> +             return -ENOMEM;
> +
> +     sctx->device = adapter->vf.eth_dev;
> +     sctx->ops = &iavf_ipsec_crypto_ops;
> +     sctx->sess_cnt = 0;
> +
> +     adapter->vf.eth_dev->security_ctx = sctx;
> +
> +     if (adapter->security_ctx == NULL) {
> +             adapter->security_ctx = rte_malloc("iavf_security_ctx",
> +                             sizeof(struct iavf_security_ctx), 0);
> +             if (adapter->security_ctx == NULL)
> +                     return -ENOMEM;
> +     }
> +
> +     return 0;
> +}
> +
> +int
> +iavf_security_init(struct iavf_adapter *adapter)
> +{
> +     struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +     struct rte_mbuf_dynfield pkt_md_dynfield = {
> +             .name = "iavf_ipsec_crypto_pkt_metadata",
> +             .size = sizeof(struct iavf_ipsec_crypto_pkt_metadata),
> +             .align = __alignof__(struct iavf_ipsec_crypto_pkt_metadata)
> +     };
> +     struct virtchnl_ipsec_cap capabilities;
> +     int rc;
> +
> +     iavf_sctx->adapter = adapter;
> +
> +     iavf_sctx->pkt_md_offset =
> rte_mbuf_dynfield_register(&pkt_md_dynfield);
> +     if (iavf_sctx->pkt_md_offset < 0)
> +             return iavf_sctx->pkt_md_offset;
> +
> +     /* Get device capabilities from Inline IPsec driver over PF-VF comms */
> +     rc = iavf_ipsec_crypto_device_capabilities_get(adapter, &capabilities);
> +     if (rc)
> +             return rc;
> +
> +     return  iavf_ipsec_crypto_set_security_capabililites(iavf_sctx,
> +                     &capabilities);
> +}
> +
> +int
> +iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter)
> +{
> +     struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +
> +     return iavf_sctx->pkt_md_offset;
> +}
> +
> +int
> +iavf_security_ctx_destroy(struct iavf_adapter *adapter)
> +{
> +     struct rte_security_ctx *sctx  = adapter->vf.eth_dev->security_ctx;
> +     struct iavf_security_ctx *iavf_sctx = adapter->security_ctx;
> +
> +     if (iavf_sctx == NULL)
> +             return -ENODEV;
> +
> +     /* TODO: Add resources cleanup */
> +
> +     /* free and reset security data structures */
> +     rte_free(iavf_sctx);
> +     rte_free(sctx);
> +
> +     iavf_sctx = NULL;
> +     sctx = NULL;
> +
> +     return 0;
> +}
> +
> +int
> +iavf_ipsec_crypto_supported(struct iavf_adapter *adapter)
> +{
> +     struct virtchnl_vf_resource *resources = adapter->vf.vf_res;
> +
> +     /** Capability check for IPsec Crypto */
> +     if (resources && (resources->vf_cap_flags &
> +             VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO))
> +             return true;
> +
> +     return false;
> +}
> +
> +#define IAVF_IPSEC_INSET_ESP (\
> +     IAVF_INSET_ESP_SPI)
> +
> +#define IAVF_IPSEC_INSET_AH (\
> +     IAVF_INSET_AH_SPI)
> +
> +#define IAVF_IPSEC_INSET_IPV4_NATT_ESP (\
> +     IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
> +     IAVF_INSET_ESP_SPI)
> +
> +#define IAVF_IPSEC_INSET_IPV6_NATT_ESP (\
> +     IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
> +     IAVF_INSET_ESP_SPI)
> +
> +enum iavf_ipsec_flow_pt_type {
> +     IAVF_PATTERN_ESP = 1,
> +     IAVF_PATTERN_AH,
> +     IAVF_PATTERN_UDP_ESP,
> +};
> +enum iavf_ipsec_flow_pt_ip_ver {
> +     IAVF_PATTERN_IPV4 = 1,
> +     IAVF_PATTERN_IPV6,
> +};
> +
> +#define IAVF_PATTERN(t, ipt) ((void *)((t) | ((ipt) << 4)))
> +#define IAVF_PATTERN_TYPE(pt) ((pt) & 0x0F)
> +#define IAVF_PATTERN_IP_V(pt) ((pt) >> 4)
> +
> +static struct iavf_pattern_match_item iavf_ipsec_flow_pattern[] = {
> +     {iavf_pattern_eth_ipv4_esp,     IAVF_IPSEC_INSET_ESP,
> +                     IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV4)},
> +     {iavf_pattern_eth_ipv6_esp,     IAVF_IPSEC_INSET_ESP,
> +                     IAVF_PATTERN(IAVF_PATTERN_ESP, IAVF_PATTERN_IPV6)},
> +     {iavf_pattern_eth_ipv4_ah,      IAVF_IPSEC_INSET_AH,
> +                     IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV4)},
> +     {iavf_pattern_eth_ipv6_ah,      IAVF_IPSEC_INSET_AH,
> +                     IAVF_PATTERN(IAVF_PATTERN_AH, IAVF_PATTERN_IPV6)},
> +     {iavf_pattern_eth_ipv4_udp_esp, IAVF_IPSEC_INSET_IPV4_NATT_ESP,
> +                     IAVF_PATTERN(IAVF_PATTERN_UDP_ESP,
> IAVF_PATTERN_IPV4)},
> +     {iavf_pattern_eth_ipv6_udp_esp, IAVF_IPSEC_INSET_IPV6_NATT_ESP,
> +                     IAVF_PATTERN(IAVF_PATTERN_UDP_ESP,
> IAVF_PATTERN_IPV6)},
> +};
> +
> +struct iavf_ipsec_flow_item {
> +     uint64_t id;
> +     uint8_t is_ipv4;
> +     uint32_t spi;
> +     struct rte_ether_hdr eth_hdr;
> +     union {
> +             struct rte_ipv4_hdr ipv4_hdr;
> +             struct rte_ipv6_hdr ipv6_hdr;
> +     };
> +     struct rte_udp_hdr udp_hdr;
> +};
> +
> +static void
> +parse_eth_item(const struct rte_flow_item_eth *item,
> +             struct rte_ether_hdr *eth)
> +{
> +     memcpy(eth->src_addr.addr_bytes,
> +                     item->src.addr_bytes, sizeof(eth->src_addr));
> +     memcpy(eth->dst_addr.addr_bytes,
> +                     item->dst.addr_bytes, sizeof(eth->dst_addr));
> +}
> +
> +static void
> +parse_ipv4_item(const struct rte_flow_item_ipv4 *item,
> +             struct rte_ipv4_hdr *ipv4)
> +{
> +     ipv4->src_addr = item->hdr.src_addr;
> +     ipv4->dst_addr = item->hdr.dst_addr;
> +}
> +
> +static void
> +parse_ipv6_item(const struct rte_flow_item_ipv6 *item,
> +             struct rte_ipv6_hdr *ipv6)
> +{
> +     memcpy(ipv6->src_addr, item->hdr.src_addr, 16);
> +     memcpy(ipv6->dst_addr, item->hdr.dst_addr, 16);
> +}
> +
> +static void
> +parse_udp_item(const struct rte_flow_item_udp *item, struct rte_udp_hdr
> *udp)
> +{
> +     udp->dst_port = item->hdr.dst_port;
> +     udp->src_port = item->hdr.src_port;
> +}
> +
> +static int
> +has_security_action(const struct rte_flow_action actions[],
> +     const void **session)
> +{
> +     /* only {SECURITY; END} supported */
> +     if (actions[0].type == RTE_FLOW_ACTION_TYPE_SECURITY &&
> +             actions[1].type == RTE_FLOW_ACTION_TYPE_END) {
> +             *session = actions[0].conf;
> +             return true;
> +     }
> +     return false;
> +}
> +
> +static struct iavf_ipsec_flow_item *
> +iavf_ipsec_flow_item_parse(struct rte_eth_dev *ethdev,
> +             const struct rte_flow_item pattern[],
> +             const struct rte_flow_action actions[],
> +             uint32_t type)
> +{
> +     const void *session;
> +     struct iavf_ipsec_flow_item
> +             *ipsec_flow = rte_malloc("security-flow-rule",
> +             sizeof(struct iavf_ipsec_flow_item), 0);
> +     enum iavf_ipsec_flow_pt_type p_type = IAVF_PATTERN_TYPE(type);
> +     enum iavf_ipsec_flow_pt_ip_ver p_ip_type = IAVF_PATTERN_IP_V(type);
> +
> +     if (ipsec_flow == NULL)
> +             return NULL;
> +
> +     ipsec_flow->is_ipv4 = (p_ip_type == IAVF_PATTERN_IPV4);
> +
> +     if (pattern[0].spec)
> +             parse_eth_item((const struct rte_flow_item_eth *)
> +                             pattern[0].spec, &ipsec_flow->eth_hdr);
> +
> +     switch (p_type) {
> +     case IAVF_PATTERN_ESP:
> +             if (ipsec_flow->is_ipv4) {
> +                     parse_ipv4_item((const struct rte_flow_item_ipv4 *)
> +                                     pattern[1].spec,
> +                                     &ipsec_flow->ipv4_hdr);
> +             } else {
> +                     parse_ipv6_item((const struct rte_flow_item_ipv6 *)
> +                                     pattern[1].spec,
> +                                     &ipsec_flow->ipv6_hdr);
> +             }
> +             ipsec_flow->spi =
> +                     ((const struct rte_flow_item_esp *)
> +                                     pattern[2].spec)->hdr.spi;
> +             break;
> +     case IAVF_PATTERN_AH:
> +             if (ipsec_flow->is_ipv4) {
> +                     parse_ipv4_item((const struct rte_flow_item_ipv4 *)
> +                                     pattern[1].spec,
> +                                     &ipsec_flow->ipv4_hdr);
> +             } else {
> +                     parse_ipv6_item((const struct rte_flow_item_ipv6 *)
> +                                     pattern[1].spec,
> +                                     &ipsec_flow->ipv6_hdr);
> +             }
> +             ipsec_flow->spi =
> +                     ((const struct rte_flow_item_ah *)
> +                                     pattern[2].spec)->spi;
> +             break;
> +     case IAVF_PATTERN_UDP_ESP:
> +             if (ipsec_flow->is_ipv4) {
> +                     parse_ipv4_item((const struct rte_flow_item_ipv4 *)
> +                                     pattern[1].spec,
> +                                     &ipsec_flow->ipv4_hdr);
> +             } else {
> +                     parse_ipv6_item((const struct rte_flow_item_ipv6 *)
> +                                     pattern[1].spec,
> +                                     &ipsec_flow->ipv6_hdr);
> +             }
> +             parse_udp_item((const struct rte_flow_item_udp *)
> +                             pattern[2].spec,
> +                     &ipsec_flow->udp_hdr);
> +             ipsec_flow->spi =
> +                     ((const struct rte_flow_item_esp *)
> +                                     pattern[3].spec)->hdr.spi;
> +             break;
> +     default:
> +             goto flow_cleanup;
> +     }
> +
> +     if (!has_security_action(actions, &session))
> +             goto flow_cleanup;
> +
> +     if (!iavf_ipsec_crypto_action_valid(ethdev, session,
> +                     ipsec_flow->spi))
> +             goto flow_cleanup;
> +
> +     return ipsec_flow;
> +
> +flow_cleanup:
> +     rte_free(ipsec_flow);
> +     return NULL;
> +}
> +
> +
> +static struct iavf_flow_parser iavf_ipsec_flow_parser;
> +
> +static int
> +iavf_ipsec_flow_init(struct iavf_adapter *ad)
> +{
> +     struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
> +     struct iavf_flow_parser *parser;
> +
> +     if (!vf->vf_res)
> +             return -EINVAL;
> +
> +     if (vf->vf_res->vf_cap_flags &
> VIRTCHNL_VF_OFFLOAD_INLINE_IPSEC_CRYPTO)
> +             parser = &iavf_ipsec_flow_parser;
> +     else
> +             return -ENOTSUP;
> +
> +     return iavf_register_parser(parser, ad);
> +}
> +
> +static void
> +iavf_ipsec_flow_uninit(struct iavf_adapter *ad)
> +{
> +     iavf_unregister_parser(&iavf_ipsec_flow_parser, ad);
> +}
> +
> +static int
> +iavf_ipsec_flow_create(struct iavf_adapter *ad,
> +             struct rte_flow *flow,
> +             void *meta,
> +             struct rte_flow_error *error)
> +{
> +     struct iavf_ipsec_flow_item *ipsec_flow = meta;
> +     if (!ipsec_flow) {
> +             rte_flow_error_set(error, EINVAL,
> +                             RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +                             "NULL rule.");
> +             return -rte_errno;
> +     }
> +
> +     if (ipsec_flow->is_ipv4) {
> +             ipsec_flow->id =
> +                     iavf_ipsec_crypto_inbound_security_policy_add(ad,
> +                     ipsec_flow->spi,
> +                     1,
> +                     ipsec_flow->ipv4_hdr.dst_addr,
> +                     NULL,
> +                     0);
> +     } else {
> +             ipsec_flow->id =
> +                     iavf_ipsec_crypto_inbound_security_policy_add(ad,
> +                     ipsec_flow->spi,
> +                     0,
> +                     0,
> +                     ipsec_flow->ipv6_hdr.dst_addr,
> +                     0);
> +     }
> +
> +     if (ipsec_flow->id < 1) {
> +             rte_flow_error_set(error, EINVAL,
> +                             RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
> +                             "Failed to add SA.");
> +             return -rte_errno;
> +     }
> +
> +     flow->rule = ipsec_flow;
> +
> +     return 0;
> +}
> +
> +static int
> +iavf_ipsec_flow_destroy(struct iavf_adapter *ad,
> +             struct rte_flow *flow,
> +             struct rte_flow_error *error)
> +{
> +     struct iavf_ipsec_flow_item *ipsec_flow = flow->rule;
> +     if (!ipsec_flow) {
> +             rte_flow_error_set(error, EINVAL,
> +                             RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
> +                             "NULL rule.");
> +             return -rte_errno;
> +     }
> +
> +     iavf_ipsec_crypto_security_policy_delete(ad,
> +                     ipsec_flow->is_ipv4, ipsec_flow->id);
> +     rte_free(ipsec_flow);
> +     return 0;
> +}
> +
> +static struct iavf_flow_engine iavf_ipsec_flow_engine = {
> +     .init = iavf_ipsec_flow_init,
> +     .uninit = iavf_ipsec_flow_uninit,
> +     .create = iavf_ipsec_flow_create,
> +     .destroy = iavf_ipsec_flow_destroy,
> +     .type = IAVF_FLOW_ENGINE_IPSEC_CRYPTO,
> +};
> +
> +static int
> +iavf_ipsec_flow_parse(struct iavf_adapter *ad,
> +                    struct iavf_pattern_match_item *array,
> +                    uint32_t array_len,
> +                    const struct rte_flow_item pattern[],
> +                    const struct rte_flow_action actions[],
> +                    void **meta,
> +                    struct rte_flow_error *error)
> +{
> +     struct iavf_pattern_match_item *item = NULL;
> +     int ret = -1;
> +
> +     item = iavf_search_pattern_match_item(pattern, array, array_len, error);
> +     if (item && item->meta) {
> +             uint32_t type = (uint64_t)(item->meta);
> +             struct iavf_ipsec_flow_item *fi =
> +                             iavf_ipsec_flow_item_parse(ad->vf.eth_dev,
> +                                             pattern, actions, type);
> +             if (fi && meta) {
> +                     *meta = fi;
> +                     ret = 0;
> +             }
> +     }
> +     return ret;
> +}
> +
> +static struct iavf_flow_parser iavf_ipsec_flow_parser = {
> +     .engine = &iavf_ipsec_flow_engine,
> +     .array = iavf_ipsec_flow_pattern,
> +     .array_len = RTE_DIM(iavf_ipsec_flow_pattern),
> +     .parse_pattern_action = iavf_ipsec_flow_parse,
> +     .stage = IAVF_FLOW_STAGE_IPSEC_CRYPTO,
> +};
> +
> +RTE_INIT(iavf_ipsec_flow_engine_register)
> +{
> +     iavf_register_flow_engine(&iavf_ipsec_flow_engine);
> +}
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto.h
> b/drivers/net/iavf/iavf_ipsec_crypto.h
> new file mode 100644
> index 0000000000..4e4c8798ec
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_ipsec_crypto.h
> @@ -0,0 +1,160 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#ifndef _IAVF_IPSEC_CRYPTO_H_
> +#define _IAVF_IPSEC_CRYPTO_H_
> +
> +#include <rte_security.h>
> +
> +#include "iavf.h"
> +
> +
> +
> +struct iavf_tx_ipsec_desc {
> +     union {
> +             struct {
> +                     __le64 qw0;
> +                     __le64 qw1;
> +             };
> +             struct {
> +                     __le16 l4payload_length;
> +                     __le32 esn;
> +                     __le16 trailer_length;
> +                     u8 type:4;
> +                     u8 rsv:1;
> +                     u8 udp:1;
> +                     u8 ivlen:2;
> +                     u8 next_header;
> +                     __le16 ipv6_ext_hdr_length;
> +                     __le32 said;
> +             } __rte_packed;
> +     };
> +} __rte_packed;
> +
> +#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT    0
> +#define IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_MASK     (0x3FFFULL << \
> +                     IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT    16
> +#define IAVF_IPSEC_TX_DESC_QW0_IPSECESN_MASK     (0xFFFFFFFFULL <<
> \
> +                     IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT  48
> +#define IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_MASK   (0x3FULL << \
> +                     IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT         5
> +#define IAVF_IPSEC_TX_DESC_QW1_UDP_MASK          (0x1ULL << \
> +                     IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT       6
> +#define IAVF_IPSEC_TX_DESC_QW1_IVLEN_MASK        (0x3ULL << \
> +                     IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT     8
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECNH_MASK      (0xFFULL << \
> +                     IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT      16
> +#define IAVF_IPSEC_TX_DESC_QW1_EXTLEN_MASK       (0xFFULL << \
> +                     IAVF_IPSEC_TX_DESC_QW1_EXTLEN_SHIFT)
> +
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT     32
> +#define IAVF_IPSEC_TX_DESC_QW1_IPSECSA_MASK      (0xFFFFFULL << \
> +                     IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT)
> +
> +/* Initialization Vector Length type */
> +enum iavf_ipsec_iv_len {
> +     IAVF_IPSEC_IV_LEN_NONE,         /* No IV */
> +     IAVF_IPSEC_IV_LEN_DW,           /* 4B IV */
> +     IAVF_IPSEC_IV_LEN_DDW,          /* 8B IV */
> +     IAVF_IPSEC_IV_LEN_QDW,          /* 16B IV */
> +};
> +
> +
> +/* IPsec Crypto Packet Metaday offload flags */
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN            (0x1 << 0)
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN                       (0x1 << 1)
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS     (0x1 << 2)
> +#define IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT                      (0x1 << 3)
> +
> +/**
> + * Packet metadata data structure used to hold parameters required by the
> iAVF
> + * transmit data path. Parameters set for session by calling
> + * rte_security_set_pkt_metadata() API.
> + */
> +struct iavf_ipsec_crypto_pkt_metadata {
> +     uint32_t sa_idx;                /* SA hardware index (20b/4B) */
> +
> +     uint8_t ol_flags;               /* flags (1B) */
> +     uint8_t len_iv;                 /* IV length (2b/1B) */
> +     uint8_t ctx_desc_ipsec_params;  /* IPsec params for ctx desc (7b/1B) */
> +     uint8_t esp_trailer_len;        /* ESP trailer length (6b/1B) */
> +
> +     uint16_t l4_payload_len;        /* L4 payload length */
> +     uint8_t ipv6_ext_hdrs_len;      /* IPv6 extender headers len (5b/1B) */
> +     uint8_t next_proto;             /* Next Protocol (8b/1B) */
> +
> +     uint32_t esn;                   /* Extended Sequence Number (32b/4B) */
> +} __rte_packed;
> +
> +/**
> + * Inline IPsec Crypto offload is supported
> + */
> +int
> +iavf_ipsec_crypto_supported(struct iavf_adapter *adapter);
> +
> +/**
> + * Create security context
> + */
> +int iavf_security_ctx_create(struct iavf_adapter *adapter);
> +
> +/**
> + * Create security context
> + */
> +int iavf_security_init(struct iavf_adapter *adapter);
> +
> +/**
> + * Set security capabilities
> + */
> +int iavf_ipsec_crypto_set_security_capabililites(struct iavf_security_ctx
> +             *iavf_sctx, struct virtchnl_ipsec_cap *virtchl_capabilities);
> +
> +
> +int iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
> +
> +/**
> + * Destroy security context
> + */
> +int iavf_security_ctx_destroy(struct iavf_adapter *adapterv);
> +
> +/**
> + * Verify that the inline IPsec Crypto action is valid for this device
> + */
> +uint32_t
> +iavf_ipsec_crypto_action_valid(struct rte_eth_dev *ethdev,
> +     const struct rte_security_session *session, uint32_t spi);
> +
> +/**
> + * Add inbound security policy rule to hardware
> + */
> +int
> +iavf_ipsec_crypto_inbound_security_policy_add(struct iavf_adapter *adapter,
> +     uint32_t esp_spi,
> +     uint8_t is_v4,
> +     rte_be32_t v4_dst_addr,
> +     uint8_t *v6_dst_addr,
> +     uint8_t drop);
> +
> +/**
> + * Delete inbound security policy rule from hardware
> + */
> +int
> +iavf_ipsec_crypto_security_policy_delete(struct iavf_adapter *adapter,
> +     uint8_t is_v4, uint32_t flow_id);
> +
> +int
> +iavf_security_get_pkt_md_offset(struct iavf_adapter *adapter);
> +
> +#endif /* _IAVF_IPSEC_CRYPTO_H_ */
> diff --git a/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> new file mode 100644
> index 0000000000..70ce8dd638
> --- /dev/null
> +++ b/drivers/net/iavf/iavf_ipsec_crypto_capabilities.h
> @@ -0,0 +1,383 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2020 Intel Corporation
> + */
> +
> +#ifndef _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
> +#define _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_
> +
> +static const struct rte_cryptodev_capabilities iavf_crypto_capabilities[] = {
> +     {       /* SHA1 HMAC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
> +                             .block_size = 64,
> +                             .key_size = {
> +                                     .min = 1,
> +                                     .max = 64,
> +                                     .increment = 1
> +                             },
> +                             .digest_size = {
> +                                     .min = 20,
> +                                     .max = 20,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = { 0 }
> +                     }, }
> +             }, }
> +     },
> +     {       /* SHA256 HMAC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
> +                             .block_size = 64,
> +                             .key_size = {
> +                                     .min = 1,
> +                                     .max = 64,
> +                                     .increment = 1
> +                             },
> +                             .digest_size = {
> +                                     .min = 32,
> +                                     .max = 32,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = { 0 }
> +                     }, }
> +             }, }
> +     },
> +     {       /* SHA384 HMAC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
> +                             .block_size = 128,
> +                             .key_size = {
> +                                     .min = 1,
> +                                     .max = 128,
> +                                     .increment = 1
> +                             },
> +                             .digest_size = {
> +                                     .min = 48,
> +                                     .max = 48,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = { 0 }
> +                     }, }
> +             }, }
> +     },
> +     {       /* SHA512 HMAC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
> +                             .block_size = 128,
> +                             .key_size = {
> +                                     .min = 1,
> +                                     .max = 128,
> +                                     .increment = 1
> +                             },
> +                             .digest_size = {
> +                                     .min = 64,
> +                                     .max = 64,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = { 0 }
> +                     }, }
> +             }, }
> +     },
> +     {       /* MD5 HMAC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
> +                             .block_size = 64,
> +                             .key_size = {
> +                                     .min = 1,
> +                                     .max = 64,
> +                                     .increment = 1
> +                             },
> +                             .digest_size = {
> +                                     .min = 16,
> +                                     .max = 16,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = { 0 }
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES XCBC MAC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 16,
> +                                     .increment = 0
> +                             },
> +                             .digest_size = {
> +                                     .min = 16,
> +                                     .max = 16,
> +                                     .increment = 0
> +                             },
> +                             .aad_size = { 0 },
> +                             .iv_size = { 0 }
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES GCM */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +                     {.aead = {
> +                             .algo = RTE_CRYPTO_AEAD_AES_GCM,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 32,
> +                                     .increment = 8
> +                             },
> +                             .digest_size = {
> +                                     .min = 8,
> +                                     .max = 16,
> +                                     .increment = 4
> +                             },
> +                             .aad_size = {
> +                                     .min = 0,
> +                                     .max = 240,
> +                                     .increment = 1
> +                             },
> +                             .iv_size = {
> +                                     .min = 8,
> +                                     .max = 8,
> +                                     .increment = 0
> +                             },
> +                     }, }
> +             }, }
> +     },
> +     {       /* ChaCha20-Poly1305 */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +                     {.aead = {
> +                             .algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 32,
> +                                     .max = 32,
> +                                     .increment = 0
> +                             },
> +                             .digest_size = {
> +                                     .min = 8,
> +                                     .max = 16,
> +                                     .increment = 4
> +                             },
> +                             .aad_size = {
> +                                     .min = 0,
> +                                     .max = 240,
> +                                     .increment = 1
> +                             },
> +                             .iv_size = {
> +                                     .min = 12,
> +                                     .max = 12,
> +                                     .increment = 0
> +                             },
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES CCM */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
> +                     {.aead = {
> +                             .algo = RTE_CRYPTO_AEAD_AES_CCM,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 32,
> +                                     .increment = 8
> +                             },
> +                             .digest_size = {
> +                                     .min = 8,
> +                                     .max = 16,
> +                                     .increment = 4
> +                             },
> +                             .aad_size = {
> +                                     .min = 0,
> +                                     .max = 240,
> +                                     .increment = 1
> +                             },
> +                             .iv_size = {
> +                                     .min = 12,
> +                                     .max = 12,
> +                                     .increment = 0
> +                             },
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES GMAC (AUTH) */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_AES_GMAC,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 32,
> +                                     .increment = 8
> +                             },
> +                             .digest_size = {
> +                                     .min = 8,
> +                                     .max = 16,
> +                                     .increment = 4
> +                             },
> +                             .iv_size = {
> +                                     .min = 12,
> +                                     .max = 12,
> +                                     .increment = 0
> +                             }
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES CMAC (AUTH) */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_AES_CMAC,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 32,
> +                                     .increment = 8
> +                             },
> +                             .digest_size = {
> +                                     .min = 8,
> +                                     .max = 16,
> +                                     .increment = 4
> +                             },
> +                             .iv_size = {
> +                                     .min = 12,
> +                                     .max = 12,
> +                                     .increment = 0
> +                             }
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES CBC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +                     {.cipher = {
> +                             .algo = RTE_CRYPTO_CIPHER_AES_CBC,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 32,
> +                                     .increment = 8
> +                             },
> +                             .iv_size = {
> +                                     .min = 16,
> +                                     .max = 16,
> +                                     .increment = 0
> +                             }
> +                     }, }
> +             }, }
> +     },
> +     {       /* AES CTR */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +                     {.cipher = {
> +                             .algo = RTE_CRYPTO_CIPHER_AES_CTR,
> +                             .block_size = 16,
> +                             .key_size = {
> +                                     .min = 16,
> +                                     .max = 32,
> +                                     .increment = 8
> +                             },
> +                             .iv_size = {
> +                                     .min = 8,
> +                                     .max = 8,
> +                                     .increment = 0
> +                             }
> +                     }, }
> +             }, }
> +     },
> +     {       /* NULL (AUTH) */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
> +                     {.auth = {
> +                             .algo = RTE_CRYPTO_AUTH_NULL,
> +                             .block_size = 1,
> +                             .key_size = {
> +                                     .min = 0,
> +                                     .max = 0,
> +                                     .increment = 0
> +                             },
> +                             .digest_size = {
> +                                     .min = 0,
> +                                     .max = 0,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = { 0 }
> +                     }, },
> +             }, },
> +     },
> +     {       /* NULL (CIPHER) */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +                     {.cipher = {
> +                             .algo = RTE_CRYPTO_CIPHER_NULL,
> +                             .block_size = 1,
> +                             .key_size = {
> +                                     .min = 0,
> +                                     .max = 0,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = {
> +                                     .min = 0,
> +                                     .max = 0,
> +                                     .increment = 0
> +                             }
> +                     }, },
> +             }, }
> +     },
> +     {       /* 3DES CBC */
> +             .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
> +             {.sym = {
> +                     .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
> +                     {.cipher = {
> +                             .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
> +                             .block_size = 8,
> +                             .key_size = {
> +                                     .min = 24,
> +                                     .max = 24,
> +                                     .increment = 0
> +                             },
> +                             .iv_size = {
> +                                     .min = 8,
> +                                     .max = 8,
> +                                     .increment = 0
> +                             }
> +                     }, }
> +             }, }
> +     },
> +     {
> +             .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
> +     }
> +};
> +
> +
> +#endif /* _IAVF_IPSEC_CRYPTO_CAPABILITIES_H_ */
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
> index 128691aaf1..80438f9f8a 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -27,6 +27,7 @@
> 
>  #include "iavf.h"
>  #include "iavf_rxtx.h"
> +#include "iavf_ipsec_crypto.h"
>  #include "rte_pmd_iavf.h"
> 
>  /* Offset of mbuf dynamic field for protocol extraction's metadata */
> @@ -39,6 +40,7 @@ uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
>  uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
>  uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
>  uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
> +uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
> 
>  uint8_t
>  iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
> @@ -51,6 +53,8 @@ iavf_proto_xtr_type_to_rxdid(uint8_t flex_type)
>               [IAVF_PROTO_XTR_IPV6_FLOW] =
> IAVF_RXDID_COMMS_AUX_IPV6_FLOW,
>               [IAVF_PROTO_XTR_TCP]       = IAVF_RXDID_COMMS_AUX_TCP,
>               [IAVF_PROTO_XTR_IP_OFFSET] =
> IAVF_RXDID_COMMS_AUX_IP_OFFSET,
> +             [IAVF_PROTO_XTR_IPSEC_CRYPTO_SAID] =
> +                             IAVF_RXDID_COMMS_IPSEC_CRYPTO,
>       };
> 
>       return flex_type < RTE_DIM(rxdid_map) ?
> @@ -508,6 +512,12 @@ iavf_select_rxd_to_pkt_fields_handler(struct
> iavf_rx_queue *rxq, uint32_t rxdid)
>               rxq->rxd_to_pkt_fields =
>                       iavf_rxd_to_pkt_fields_by_comms_aux_v2;
>               break;
> +     case IAVF_RXDID_COMMS_IPSEC_CRYPTO:
> +             rxq->xtr_ol_flag =
> +                     rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
> +             rxq->rxd_to_pkt_fields =
> +                     iavf_rxd_to_pkt_fields_by_comms_aux_v2;
> +             break;
>       case IAVF_RXDID_COMMS_OVS_1:
>               rxq->rxd_to_pkt_fields = iavf_rxd_to_pkt_fields_by_comms_ovs;
>               break;
> @@ -692,6 +702,8 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>                      const struct rte_eth_txconf *tx_conf)
>  {
>       struct iavf_hw *hw =
> IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> +     struct iavf_adapter *adapter =
> +             IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>       struct iavf_info *vf =
>               IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
>       struct iavf_tx_queue *txq;
> @@ -736,9 +748,9 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>               return -ENOMEM;
>       }
> 
> -     if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
> +     if (adapter->vf.vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN_V2)
> {
>               struct virtchnl_vlan_supported_caps *insertion_support =
> -                     &vf->vlan_v2_caps.offloads.insertion_support;
> +                     &adapter->vf.vlan_v2_caps.offloads.insertion_support;
>               uint32_t insertion_cap;
> 
>               if (insertion_support->outer)
> @@ -762,6 +774,10 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
>       txq->offloads = offloads;
>       txq->tx_deferred_start = tx_conf->tx_deferred_start;
> 
> +     if (iavf_ipsec_crypto_supported(adapter))
> +             txq->ipsec_crypto_pkt_md_offset =
> +                     iavf_security_get_pkt_md_offset(adapter);
> +
>       /* Allocate software ring */
>       txq->sw_ring =
>               rte_zmalloc_socket("iavf tx sw ring",
> @@ -1081,6 +1097,70 @@ iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
>  #endif
>  }
> 
> +static inline void
> +iavf_flex_rxd_to_ipsec_crypto_said_get(struct rte_mbuf *mb,
> +                       volatile union iavf_rx_flex_desc *rxdp)
> +{
> +     volatile struct iavf_32b_rx_flex_desc_comms_ipsec *desc =
> +             (volatile struct iavf_32b_rx_flex_desc_comms_ipsec *)rxdp;
> +
> +     mb->dynfield1[0] = desc->ipsec_said &
> +                      IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_SAID_MASK;
> +     }
> +
> +static inline void
> +iavf_flex_rxd_to_ipsec_crypto_status(struct rte_mbuf *mb,
> +                       volatile union iavf_rx_flex_desc *rxdp,
> +                       struct iavf_ipsec_crypto_stats *stats)
> +{
> +     uint16_t status1 = rte_le_to_cpu_64(rxdp->wb.status_error1);
> +
> +     if (status1 &
> BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED)) {
> +             uint16_t ipsec_status;
> +
> +             mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
> +
> +             ipsec_status = status1 &
> +                     IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK;
> +
> +
> +             if (unlikely(ipsec_status !=
> +                     IAVF_IPSEC_CRYPTO_STATUS_SUCCESS)) {
> +                     mb->ol_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
> +
> +                     switch (ipsec_status) {
> +                     case IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS:
> +                             stats->ierrors.sad_miss++;
> +                             break;
> +                     case IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED:
> +                             stats->ierrors.not_processed++;
> +                             break;
> +                     case IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL:
> +                             stats->ierrors.icv_check++;
> +                             break;
> +                     case IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR:
> +                             stats->ierrors.ipsec_length++;
> +                             break;
> +                     case IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR:
> +                             stats->ierrors.misc++;
> +                             break;
> +}
> +
> +                     stats->ierrors.count++;
> +                     return;
> +             }
> +
> +             stats->icount++;
> +             stats->ibytes += rxdp->wb.pkt_len & 0x3FFF;
> +
> +             if (rxdp->wb.rxdid == IAVF_RXDID_COMMS_IPSEC_CRYPTO &&
> +                     ipsec_status !=
> +                             IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS)
> +                     iavf_flex_rxd_to_ipsec_crypto_said_get(mb, rxdp);
> +     }
> +}
> +
> +
>  /* Translate the rx descriptor status and error fields to pkt flags */
>  static inline uint64_t
>  iavf_rxd_to_pkt_flags(uint64_t qword)
> @@ -1399,6 +1479,8 @@ iavf_recv_pkts_flex_rxd(void *rx_queue,
>               rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>                       rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>               iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
> +             iavf_flex_rxd_to_ipsec_crypto_status(rxm, &rxd,
> +                             &rxq->stats.ipsec_crypto);
>               rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
>               pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
>               rxm->ol_flags |= pkt_flags;
> @@ -1541,6 +1623,8 @@ iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
> struct rte_mbuf **rx_pkts,
>               first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M
> &
>                       rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
>               iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
> +             iavf_flex_rxd_to_ipsec_crypto_status(first_seg, &rxd,
> +                             &rxq->stats.ipsec_crypto);
>               rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
>               pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
> 
> @@ -1779,6 +1863,8 @@ iavf_rx_scan_hw_ring_flex_rxd(struct
> iavf_rx_queue *rxq)
>                       mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
>                               rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
>                       iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
> +                     iavf_flex_rxd_to_ipsec_crypto_status(mb, &rxdp[j],
> +                             &rxq->stats.ipsec_crypto);
>                       rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
>                       stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
>                       pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
> @@ -2091,6 +2177,18 @@ iavf_fill_ctx_desc_cmd_field(volatile uint64_t *field,
> struct rte_mbuf *m)
>       *field |= cmd;
>  }
> 
> +static inline void
> +iavf_fill_ctx_desc_ipsec_field(volatile uint64_t *field,
> +     struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
> +{
> +     uint64_t ipsec_field =
> +             (uint64_t)ipsec_md->ctx_desc_ipsec_params <<
> +                     IAVF_TXD_CTX_QW1_IPSEC_PARAMS_CIPHERBLK_SHIFT;
> +
> +     *field |= ipsec_field;
> +}
> +
> +
>  static inline void
>  iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t *qw0,
>               const struct rte_mbuf *m)
> @@ -2123,15 +2221,19 @@ iavf_fill_ctx_desc_tunnelling_field(volatile
> uint64_t *qw0,
> 
>  static inline uint16_t
>  iavf_fill_ctx_desc_segmentation_field(volatile uint64_t *field,
> -     struct rte_mbuf *m)
> +     struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md)
>  {
>       uint64_t segmentation_field = 0;
>       uint64_t total_length = 0;
> 
> -     total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
> +     if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
> +             total_length = ipsec_md->l4_payload_len;
> +     } else {
> +             total_length = m->pkt_len - (m->l2_len + m->l3_len + m->l4_len);
> 
> -     if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> -             total_length -= m->outer_l3_len;
> +             if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
> +                     total_length -= m->outer_l3_len;
> +     }
> 
>  #ifdef RTE_LIBRTE_IAVF_DEBUG_TX
>       if (!m->l4_len || !m->tso_segsz)
> @@ -2160,7 +2262,8 @@ struct iavf_tx_context_desc_qws {
> 
>  static inline void
>  iavf_fill_context_desc(volatile struct iavf_tx_context_desc *desc,
> -     struct rte_mbuf *m, uint16_t *tlen)
> +     struct rte_mbuf *m, struct iavf_ipsec_crypto_pkt_metadata *ipsec_md,
> +     uint16_t *tlen)
>  {
>       volatile struct iavf_tx_context_desc_qws *desc_qws =
>                       (volatile struct iavf_tx_context_desc_qws *)desc;
> @@ -2172,8 +2275,13 @@ iavf_fill_context_desc(volatile struct
> iavf_tx_context_desc *desc,
> 
>       /* fill segmentation field */
>       if (m->ol_flags & (RTE_MBUF_F_TX_TCP_SEG |
> RTE_MBUF_F_TX_UDP_SEG)) {
> +             /* fill IPsec field */
> +             if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
> +                     iavf_fill_ctx_desc_ipsec_field(&desc_qws->qw1,
> +                             ipsec_md);
> +
>               *tlen = iavf_fill_ctx_desc_segmentation_field(&desc_qws->qw1,
> -                             m);
> +                             m, ipsec_md);
>       }
> 
>       /* fill tunnelling field */
> @@ -2187,6 +2295,38 @@ iavf_fill_context_desc(volatile struct
> iavf_tx_context_desc *desc,
>  }
> 
> 
> +static inline void
> +iavf_fill_ipsec_desc(volatile struct iavf_tx_ipsec_desc *desc,
> +     const struct iavf_ipsec_crypto_pkt_metadata *md, uint16_t *ipsec_len)
> +{
> +     desc->qw0 = rte_cpu_to_le_64(((uint64_t)md->l4_payload_len <<
> +             IAVF_IPSEC_TX_DESC_QW0_L4PAYLEN_SHIFT) |
> +             ((uint64_t)md->esn << IAVF_IPSEC_TX_DESC_QW0_IPSECESN_SHIFT)
> |
> +             ((uint64_t)md->esp_trailer_len <<
> +                             IAVF_IPSEC_TX_DESC_QW0_TRAILERLEN_SHIFT));
> +
> +     desc->qw1 = rte_cpu_to_le_64(((uint64_t)md->sa_idx <<
> +             IAVF_IPSEC_TX_DESC_QW1_IPSECSA_SHIFT) |
> +             ((uint64_t)md->next_proto <<
> +                             IAVF_IPSEC_TX_DESC_QW1_IPSECNH_SHIFT) |
> +             ((uint64_t)(md->len_iv & 0x3) <<
> +                             IAVF_IPSEC_TX_DESC_QW1_IVLEN_SHIFT) |
> +             ((uint64_t)(md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
> +                             1ULL : 0ULL) <<
> +                             IAVF_IPSEC_TX_DESC_QW1_UDP_SHIFT) |
> +             (uint64_t)IAVF_TX_DESC_DTYPE_IPSEC);
> +
> +     /**
> +      * TODO: Pre-calculate this in the Session initialization
> +      *
> +      * Calculate IPsec length required in data descriptor func when TSO
> +      * offload is enabled
> +      */
> +     *ipsec_len = sizeof(struct rte_esp_hdr) + (md->len_iv >> 2) +
> +                     (md->ol_flags & IAVF_IPSEC_CRYPTO_OL_FLAGS_NATT ?
> +                     sizeof(struct rte_udp_hdr) : 0);
> +}
> +
>  static inline void
>  iavf_build_data_desc_cmd_offset_fields(volatile uint64_t *qw1,
>               struct rte_mbuf *m)
> @@ -2298,6 +2438,17 @@ iavf_fill_data_desc(volatile struct iavf_tx_desc
> *desc,
>  }
> 
> 
> +static struct iavf_ipsec_crypto_pkt_metadata *
> +iavf_ipsec_crypto_get_pkt_metadata(const struct iavf_tx_queue *txq,
> +             struct rte_mbuf *m)
> +{
> +     if (m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)
> +             return RTE_MBUF_DYNFIELD(m, txq->ipsec_crypto_pkt_md_offset,
> +                             struct iavf_ipsec_crypto_pkt_metadata *);
> +
> +     return NULL;
> +}
> +
>  /* TX function */
>  uint16_t
>  iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> @@ -2326,7 +2477,9 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> 
>       for (idx = 0; idx < nb_pkts; idx++) {
>               volatile struct iavf_tx_desc *ddesc;
> -             uint16_t nb_desc_ctx;
> +             struct iavf_ipsec_crypto_pkt_metadata *ipsec_md;
> +
> +             uint16_t nb_desc_ctx, nb_desc_ipsec;
>               uint16_t nb_desc_data, nb_desc_required;
>               uint16_t tlen = 0, ipseclen = 0;
>               uint64_t ddesc_template = 0;
> @@ -2336,16 +2489,23 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
> 
>               RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
> 
> +             /**
> +              * Get metadata for ipsec crypto from mbuf dynamic fields if
> +              * security offload is specified.
> +              */
> +             ipsec_md = iavf_ipsec_crypto_get_pkt_metadata(txq, mb);
> +
>               nb_desc_data = mb->nb_segs;
>               nb_desc_ctx = !!(mb->ol_flags &
>                       (RTE_MBUF_F_TX_TCP_SEG | RTE_MBUF_F_TX_UDP_SEG |
> RTE_MBUF_F_TX_TUNNEL_MASK));
> +             nb_desc_ipsec = !!(mb->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
> 
>               /**
>                * The number of descriptors that must be allocated for
>                * a packet equals to the number of the segments of that
>                * packet plus the context and ipsec descriptors if needed.
>                */
> -             nb_desc_required = nb_desc_data + nb_desc_ctx;
> +             nb_desc_required = nb_desc_data + nb_desc_ctx + nb_desc_ipsec;
> 
>               desc_idx_last = (uint16_t)(desc_idx + nb_desc_required - 1);
> 
> @@ -2396,7 +2556,7 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>                               txe->mbuf = NULL;
>                       }
> 
> -                     iavf_fill_context_desc(ctx_desc, mb, &tlen);
> +                     iavf_fill_context_desc(ctx_desc, mb, ipsec_md, &tlen);
>                       IAVF_DUMP_TX_DESC(txq, ctx_desc, desc_idx);
> 
>                       txe->last_id = desc_idx_last;
> @@ -2404,7 +2564,27 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf
> **tx_pkts, uint16_t nb_pkts)
>                       txe = txn;
>                       }
> 
> +             if (nb_desc_ipsec) {
> +                     volatile struct iavf_tx_ipsec_desc *ipsec_desc =
> +                             (volatile struct iavf_tx_ipsec_desc *)
> +                                     &txr[desc_idx];
> +
> +                     txn = &txe_ring[txe->next_id];
> +                     RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
> 
> +                     if (txe->mbuf) {
> +                             rte_pktmbuf_free_seg(txe->mbuf);
> +                             txe->mbuf = NULL;
> +             }
> +
> +                     iavf_fill_ipsec_desc(ipsec_desc, ipsec_md, &ipseclen);
> +
> +                     IAVF_DUMP_TX_DESC(txq, ipsec_desc, desc_idx);
> +
> +                     txe->last_id = desc_idx_last;
> +                     desc_idx = txe->next_id;
> +                     txe = txn;
> +             }
> 
>               mb_seg = mb;
> 
> diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
> index 1da1278452..b88c81f8f6 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -25,7 +25,8 @@
> 
>  #define IAVF_TX_NO_VECTOR_FLAGS (                             \
>               RTE_ETH_TX_OFFLOAD_MULTI_SEGS |          \
> -             RTE_ETH_TX_OFFLOAD_TCP_TSO)
> +             RTE_ETH_TX_OFFLOAD_TCP_TSO |             \
> +             RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_VECTOR_OFFLOAD (                              \
>               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |                 \
> @@ -36,10 +37,10 @@
>               RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
> 
>  #define IAVF_RX_VECTOR_OFFLOAD (                              \
> -             RTE_ETH_RX_OFFLOAD_CHECKSUM |            \
> -             RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |          \
> -             RTE_ETH_RX_OFFLOAD_VLAN |                \
> -             RTE_ETH_RX_OFFLOAD_RSS_HASH)
> +             DEV_RX_OFFLOAD_CHECKSUM |                \
> +             DEV_RX_OFFLOAD_SCTP_CKSUM |              \
> +             DEV_RX_OFFLOAD_VLAN |            \
> +             DEV_RX_OFFLOAD_RSS_HASH)
> 
>  #define IAVF_VECTOR_PATH 0
>  #define IAVF_VECTOR_OFFLOAD_PATH 1
> @@ -47,23 +48,26 @@
>  #define DEFAULT_TX_RS_THRESH     32
>  #define DEFAULT_TX_FREE_THRESH   32
> 
> -#define IAVF_MIN_TSO_MSS          88
> +#define IAVF_MIN_TSO_MSS          256
>  #define IAVF_MAX_TSO_MSS          9668
>  #define IAVF_TSO_MAX_SEG          UINT8_MAX
>  #define IAVF_TX_MAX_MTU_SEG       8
> 
> -#define IAVF_TX_CKSUM_OFFLOAD_MASK (RTE_MBUF_F_TX_IP_CKSUM |
>        \
> +#define IAVF_TX_CKSUM_OFFLOAD_MASK (          \
> +             RTE_MBUF_F_TX_IP_CKSUM |                 \
>               RTE_MBUF_F_TX_L4_MASK |          \
>               RTE_MBUF_F_TX_TCP_SEG)
> 
> -#define IAVF_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 |              \
> +#define IAVF_TX_OFFLOAD_MASK (  \
> +             RTE_MBUF_F_TX_OUTER_IPV6 |               \
>               RTE_MBUF_F_TX_OUTER_IPV4 |               \
>               RTE_MBUF_F_TX_IPV6 |                     \
>               RTE_MBUF_F_TX_IPV4 |                     \
>               RTE_MBUF_F_TX_VLAN |             \
>               RTE_MBUF_F_TX_IP_CKSUM |                 \
>               RTE_MBUF_F_TX_L4_MASK |          \
> -             RTE_MBUF_F_TX_TCP_SEG)
> +             RTE_MBUF_F_TX_TCP_SEG |          \
> +             RTE_ETH_TX_OFFLOAD_SECURITY)
> 
>  #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
>               (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
> @@ -161,6 +165,24 @@ struct iavf_txq_ops {
>       void (*release_mbufs)(struct iavf_tx_queue *txq);
>  };
> 
> +struct iavf_ipsec_crypto_stats {
> +     uint64_t icount;
> +     uint64_t ibytes;
> +     struct {
> +             uint64_t count;
> +             uint64_t sad_miss;
> +             uint64_t not_processed;
> +             uint64_t icv_check;
> +             uint64_t ipsec_length;
> +             uint64_t misc;
> +     } ierrors;
> +};
> +
> +struct iavf_rx_queue_stats {
> +     uint64_t reserved;
> +     struct iavf_ipsec_crypto_stats ipsec_crypto;
> +};
> +
>  /* Structure associated with each Rx queue. */
>  struct iavf_rx_queue {
>       struct rte_mempool *mp;       /* mbuf pool to populate Rx ring */
> @@ -209,6 +231,7 @@ struct iavf_rx_queue {
>               /* flexible descriptor metadata extraction offload flag */
>       iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
>                               /* handle flexible descriptor by RXDID */
> +     struct iavf_rx_queue_stats stats;
>       uint64_t offloads;
>  };
> 
> @@ -243,6 +266,7 @@ struct iavf_tx_queue {
>       uint64_t offloads;
>       uint16_t next_dd;              /* next to set RS, for VPMD */
>       uint16_t next_rs;              /* next to check DD,  for VPMD */
> +     uint16_t ipsec_crypto_pkt_md_offset;
> 
>       bool q_set;                    /* if rx queue has been configured */
>       bool tx_deferred_start;        /* don't start this queue in dev start */
> @@ -345,6 +369,40 @@ struct iavf_32b_rx_flex_desc_comms_ovs {
>       } flex_ts;
>  };
> 
> +/* Rx Flex Descriptor
> + * RxDID Profile ID 24 Inline IPsec
> + * Flex-field 0: RSS hash lower 16-bits
> + * Flex-field 1: RSS hash upper 16-bits
> + * Flex-field 2: Flow ID lower 16-bits
> + * Flex-field 3: Flow ID upper 16-bits
> + * Flex-field 4: Inline IPsec SAID lower 16-bits
> + * Flex-field 5: Inline IPsec SAID upper 16-bits
> + */
> +struct iavf_32b_rx_flex_desc_comms_ipsec {
> +     /* Qword 0 */
> +     u8 rxdid;
> +     u8 mir_id_umb_cast;
> +     __le16 ptype_flexi_flags0;
> +     __le16 pkt_len;
> +     __le16 hdr_len_sph_flex_flags1;
> +
> +     /* Qword 1 */
> +     __le16 status_error0;
> +     __le16 l2tag1;
> +     __le32 rss_hash;
> +
> +     /* Qword 2 */
> +     __le16 status_error1;
> +     u8 flexi_flags2;
> +     u8 ts_low;
> +     __le16 l2tag2_1st;
> +     __le16 l2tag2_2nd;
> +
> +     /* Qword 3 */
> +     __le32 flow_id;
> +     __le32 ipsec_said;
> +};
> +
>  /* Receive Flex Descriptor profile IDs: There are a total
>   * of 64 profiles where profile IDs 0/1 are for legacy; and
>   * profiles 2-63 are flex profiles that can be programmed
> @@ -364,6 +422,7 @@ enum iavf_rxdid {
>       IAVF_RXDID_COMMS_AUX_TCP        = 21,
>       IAVF_RXDID_COMMS_OVS_1          = 22,
>       IAVF_RXDID_COMMS_OVS_2          = 23,
> +     IAVF_RXDID_COMMS_IPSEC_CRYPTO   = 24,
>       IAVF_RXDID_COMMS_AUX_IP_OFFSET  = 25,
>       IAVF_RXDID_LAST                 = 63,
>  };
> @@ -391,9 +450,13 @@ enum iavf_rx_flex_desc_status_error_0_bits {
> 
>  enum iavf_rx_flex_desc_status_error_1_bits {
>       /* Note: These are predefined bit offsets */
> -     IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
> -     IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
> -     IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
> +     /* Bits 3:0 are reserved for inline ipsec status */
> +     IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0 = 0,
> +     IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1,
> +     IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2,
> +     IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3,
> +     IAVF_RX_FLEX_DESC_STATUS1_NAT_S,
> +     IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_PROCESSED,
>       /* [10:6] reserved */
>       IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
>       IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
> @@ -403,6 +466,23 @@ enum iavf_rx_flex_desc_status_error_1_bits {
>       IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
>  };
> 
> +#define IAVF_RX_FLEX_DESC_IPSEC_CRYPTO_STATUS_MASK  (                \
> +     BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_0) |  \
> +     BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_1) |  \
> +     BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_2) |  \
> +     BIT(IAVF_RX_FLEX_DESC_STATUS1_IPSEC_CRYPTO_STATUS_3))
> +
> +enum iavf_rx_flex_desc_ipsec_crypto_status {
> +     IAVF_IPSEC_CRYPTO_STATUS_SUCCESS = 0,
> +     IAVF_IPSEC_CRYPTO_STATUS_SAD_MISS,
> +     IAVF_IPSEC_CRYPTO_STATUS_NOT_PROCESSED,
> +     IAVF_IPSEC_CRYPTO_STATUS_ICV_CHECK_FAIL,
> +     IAVF_IPSEC_CRYPTO_STATUS_LENGTH_ERR,
> +     /* Reserved */
> +     IAVF_IPSEC_CRYPTO_STATUS_MISC_ERR = 0xF
> +};
> +
> +
> 
>  #define IAVF_TXD_DATA_QW1_DTYPE_SHIFT        (0)
>  #define IAVF_TXD_DATA_QW1_DTYPE_MASK (0xFUL <<
> IAVF_TXD_QW1_DTYPE_SHIFT)
> @@ -670,6 +750,9 @@ void iavf_dump_tx_descriptor(const struct
> iavf_tx_queue *txq,
>       case IAVF_TX_DESC_DTYPE_CONTEXT:
>               name = "Tx_context_desc";
>               break;
> +     case IAVF_TX_DESC_DTYPE_IPSEC:
> +             name = "Tx_IPsec_desc";
> +             break;
>       default:
>               name = "unknown_desc";
>               break;
> diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
> index 53d1506677..353521d726 100644
> --- a/drivers/net/iavf/iavf_vchnl.c
> +++ b/drivers/net/iavf/iavf_vchnl.c
> @@ -1774,3 +1774,32 @@ iavf_get_max_rss_queue_region(struct
> iavf_adapter *adapter)
> 
>       return 0;
>  }
> +
> +
> +
> +int
> +iavf_ipsec_crypto_request(struct iavf_adapter *adapter,
> +             uint8_t *msg, size_t msg_len,
> +             uint8_t *resp_msg, size_t resp_msg_len)
> +{
> +     struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
> +     struct iavf_cmd_info args;
> +     int err;
> +
> +     args.ops = VIRTCHNL_OP_INLINE_IPSEC_CRYPTO;
> +     args.in_args = msg;
> +     args.in_args_size = msg_len;
> +     args.out_buffer = vf->aq_resp;
> +     args.out_size = IAVF_AQ_BUF_SZ;
> +
> +     err = iavf_execute_vf_cmd(adapter, &args, 1);
> +     if (err) {
> +             PMD_DRV_LOG(ERR, "fail to execute command %s",
> +                             "OP_INLINE_IPSEC_CRYPTO");
> +             return err;
> +     }
> +
> +     memcpy(resp_msg, args.out_buffer, resp_msg_len);
> +
> +     return 0;
> +}
> diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
> index 36a82e3faa..5eb230f687 100644
> --- a/drivers/net/iavf/meson.build
> +++ b/drivers/net/iavf/meson.build
> @@ -5,7 +5,7 @@
>  cflags += ['-Wno-strict-aliasing']
> 
>  includes += include_directories('../../common/iavf')
> -deps += ['common_iavf']
> +deps += ['common_iavf', 'security', 'cryptodev']
> 
>  sources = files(
>          'iavf_ethdev.c',
> @@ -15,6 +15,7 @@ sources = files(
>          'iavf_fdir.c',
>          'iavf_hash.c',
>          'iavf_tm.c',
> +        'iavf_ipsec_crypto.c',
>  )
> 
>  if arch_subdir == 'x86'
> diff --git a/drivers/net/iavf/rte_pmd_iavf.h b/drivers/net/iavf/rte_pmd_iavf.h
> index 3a045040f1..7426eb9be3 100644
> --- a/drivers/net/iavf/rte_pmd_iavf.h
> +++ b/drivers/net/iavf/rte_pmd_iavf.h
> @@ -92,6 +92,7 @@ extern uint64_t
> rte_pmd_ifd_dynflag_proto_xtr_ipv6_mask;
>  extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
>  extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
>  extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
> +extern uint64_t rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
> 
>  /**
>   * The mbuf dynamic field pointer for flexible descriptor's extraction
> metadata.
> diff --git a/drivers/net/iavf/version.map b/drivers/net/iavf/version.map
> index f3efe756cf..97f0f87311 100644
> --- a/drivers/net/iavf/version.map
> +++ b/drivers/net/iavf/version.map
> @@ -13,4 +13,7 @@ EXPERIMENTAL {
>       rte_pmd_ifd_dynflag_proto_xtr_ipv6_flow_mask;
>       rte_pmd_ifd_dynflag_proto_xtr_tcp_mask;
>       rte_pmd_ifd_dynflag_proto_xtr_ip_offset_mask;
> +
> +     # added in 21.11
> +     rte_pmd_ifd_dynflag_proto_xtr_ipsec_crypto_said_mask;
>  };
> --
> 2.25.1

Reply via email to