Hi,

Comments inline


On 10/18/2017 10:29 PM, Ananyev, Konstantin wrote:
Hi Radu,
Few comments from me below.
Konstantin

<snip>

+#define IXGBE_WRITE_REG_THEN_POLL_MASK(hw, reg, val, mask, poll_ms)    \
+{                                                                      \
+       uint32_t cnt = poll_ms;                                         \
+       IXGBE_WRITE_REG(hw, (reg), (val));                              \
+       while (((IXGBE_READ_REG(hw, (reg))) & (mask)) && (cnt--))   \
+               rte_delay_ms(1);                                        \
+}
+
As you have a macro that consists from multiple statements, you'll need a do { 
..} while (0)  wrapper
around it.
Though I still suggest to make it an inline function - would be much better.
I will add do-while wrapper, but making it an inline function there brings in a circular dependency.

<snip>
+
+static int
+ixgbe_crypto_update_mb(void *device __rte_unused,
+               struct rte_security_session *session,
+                      struct rte_mbuf *m, void *params __rte_unused)
+{
+       struct ixgbe_crypto_session *ic_session =
+                       get_sec_session_private_data(session);
+       if (ic_session->op == IXGBE_OP_AUTHENTICATED_ENCRYPTION) {
+               struct ixgbe_crypto_tx_desc_md *mdata =
+                       (struct ixgbe_crypto_tx_desc_md *)&m->udata64;
+               mdata->enc = 1;
+               mdata->sa_idx = ic_session->sa_index;
+               mdata->pad_len = *rte_pktmbuf_mtod_offset(m,
+                       uint8_t *, rte_pktmbuf_pkt_len(m) - 18) + 18;
Could you explain what pad_len supposed to contain?
Also what is a magical constant '18'?
Could you create some macro if needed?
I added an explanation in the code, we read the payload padding size that is stored at the len-18 bytes and add 18 bytes, 2 for ESP trailer and 16 for ICV.
+       }
+       return 0;
+}
+
+struct rte_cryptodev_capabilities aes_gmac_crypto_capabilities[] = {
+       {       /* AES GMAC (128-bit) */
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+               {.sym = {
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+                       {.auth = {
+                               .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+                               .block_size = 16,
+                               .key_size = {
+                                       .min = 16,
+                                       .max = 16,
+                                       .increment = 0
+                               },
+                               .digest_size = {
+                                       .min = 12,
+                                       .max = 12,
+                                       .increment = 0
+                               },
+                               .iv_size = {
+                                       .min = 12,
+                                       .max = 12,
+                                       .increment = 0
+                               }
+                       }, }
+               }, }
+       },
+       {
+               .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+               {.sym = {
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+               }, }
+       },
+};
+
+struct rte_cryptodev_capabilities aes_gcm_gmac_crypto_capabilities[] = {
+       {       /* AES GMAC (128-bit) */
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+               {.sym = {
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+                       {.auth = {
+                               .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+                               .block_size = 16,
+                               .key_size = {
+                                       .min = 16,
+                                       .max = 16,
+                                       .increment = 0
+                               },
+                               .digest_size = {
+                                       .min = 12,
+                                       .max = 12,
+                                       .increment = 0
+                               },
+                               .iv_size = {
+                                       .min = 12,
+                                       .max = 12,
+                                       .increment = 0
+                               }
+                       }, }
+               }, }
+       },
+       {       /* AES GCM (128-bit) */
+               .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+               {.sym = {
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+                       {.aead = {
+                               .algo = RTE_CRYPTO_AEAD_AES_GCM,
+                               .block_size = 16,
+                               .key_size = {
+                                       .min = 16,
+                                       .max = 16,
+                                       .increment = 0
+                               },
+                               .digest_size = {
+                                       .min = 8,
+                                       .max = 16,
+                                       .increment = 4
+                               },
+                               .aad_size = {
+                                       .min = 0,
+                                       .max = 65535,
+                                       .increment = 1
+                               },
+                               .iv_size = {
+                                       .min = 12,
+                                       .max = 12,
+                                       .increment = 0
+                               }
+                       }, }
+               }, }
+       },
+       {
+               .op = RTE_CRYPTO_OP_TYPE_UNDEFINED,
+               {.sym = {
+                       .xform_type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED
+               }, }
+       },
+};
+
+static const struct rte_security_capability ixgbe_security_capabilities[] = {
+       { /* IPsec Inline Crypto ESP Transport Egress */
+               .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+               .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+               .ipsec = {
+                       .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+                       .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+                       .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+                       .options = { 0 }
+               },
+               .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+               .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+       },
+       { /* IPsec Inline Crypto ESP Transport Ingress */
+               .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+               .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+               .ipsec = {
+                       .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+                       .mode = RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+                       .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+                       .options = { 0 }
+               },
+               .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+               .ol_flags = 0
+       },
+       { /* IPsec Inline Crypto ESP Tunnel Egress */
+               .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+               .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+               .ipsec = {
+                       .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+                       .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+                       .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+                       .options = { 0 }
+               },
+               .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+               .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+       },
+       { /* IPsec Inline Crypto ESP Tunnel Ingress */
+               .action = RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO,
+               .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+               .ipsec = {
+                       .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+                       .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+                       .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+                       .options = { 0 }
+               },
+               .crypto_capabilities = aes_gcm_gmac_crypto_capabilities,
+               .ol_flags = 0
+       },
+       {
+               .action = RTE_SECURITY_ACTION_TYPE_NONE
+       }
+};
+
+static const struct rte_security_capability *
+ixgbe_crypto_capabilities_get(void *device __rte_unused)
+{
As a nit: if ixgbe_security_capabilities are not used in any other place -
you can move its definition inside that function.
Done.

+};
+
+struct ixgbe_crypto_tx_desc_md {
+       union {
+               uint64_t data;
+               struct {
+                         uint32_t sa_idx;
+                         uint8_t pad_len;
+                         uint8_t enc;
+               };
+       };
+};

Why just not:
union  ixgbe_crypto_tx_desc_md {
      uint64_t data;
      struct {...};
};
?
Done.

+
+struct ixgbe_ipsec {
+       struct ixgbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
+       struct ixgbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
+       struct ixgbe_crypto_tx_sa_table tx_sa_tbl[IPSEC_MAX_SA_COUNT];
+};
+
+extern struct rte_security_ops ixgbe_security_ops;
+
+
+int ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev);
+int ixgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+                                         const void *ip_spec,
+                                         uint8_t is_ipv6);
+
+
+
+#endif /*IXGBE_IPSEC_H_*/
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 0038dfb..279e3fa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -93,6 +93,7 @@
                PKT_TX_TCP_SEG |                 \
                PKT_TX_MACSEC |                  \
                PKT_TX_OUTER_IP_CKSUM |          \
+               PKT_TX_SEC_OFFLOAD |     \
                IXGBE_TX_IEEE1588_TMST)

  #define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
@@ -395,7 +396,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
  static inline void
  ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
-               uint64_t ol_flags, union ixgbe_tx_offload tx_offload)
+               uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
+               struct rte_mbuf *mb)
I don't think you need to pass mb as a parameter to that function:
you already have ol_flags as a parameter and all you need is just struct 
ixgbe_crypto_tx_desc_md md
here as an extra parameter.
Done.

  {
        uint32_t type_tucmd_mlhl;
        uint32_t mss_l4len_idx = 0;
@@ -479,6 +481,18 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
                seqnum_seed |= tx_offload.l2_len
                               << IXGBE_ADVTXD_TUNNEL_LEN;
        }
+       if (mb->ol_flags & PKT_TX_SEC_OFFLOAD) {
+               struct ixgbe_crypto_tx_desc_md *mdata =
+                               (struct ixgbe_crypto_tx_desc_md *)
+                               &mb->udata64;
+               seqnum_seed |=
+                       (IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK & mdata->sa_idx);
+               type_tucmd_mlhl |= mdata->enc ?
+                               (IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
+                               IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN) : 0;
+               type_tucmd_mlhl |=
+                       (mdata->pad_len & IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK);
Shouldn't we also update tx_offload_mask here?
We do - updated.


Reply via email to