Outbound ESP sequence number should be incremented atomically and refeenced indirectly. Otherwise, concurrent access by multiple threads will break coherency.
Fixes: d299106e8e31 ("examples/ipsec-secgw: add IPsec sample application") Fixes: 906257e965b7 ("examples/ipsec-secgw: support IPv6") Fixes: cef50fc6f1e2 ("examples/ipsec-secgw: change CBC IV generation") Fixes: 2a41fb7c6525 ("examples/ipsec-secgw: convert IV to big endian") Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload") Cc: sergio.gonzalez.mon...@intel.com Cc: avia...@mellanox.com Cc: akhil.go...@nxp.com Cc: sta...@dpdk.org Signed-off-by: Yongseok Koh <ys...@mellanox.com> --- examples/ipsec-secgw/esp.c | 13 +++++++------ examples/ipsec-secgw/ipsec.h | 2 +- examples/ipsec-secgw/sa.c | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index faa84ddd13..6f616f3f69 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -222,6 +222,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_sym_op *sym_cop; int32_t i; uint16_t pad_payload_len, pad_len, ip_hdr_len; + uint64_t sa_seq; RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); @@ -316,14 +317,14 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, } } - sa->seq++; esp->spi = rte_cpu_to_be_32(sa->spi); - esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); + sa_seq = rte_atomic64_add_return(&sa->seq, 1); + esp->seq = rte_cpu_to_be_32((uint32_t)sa_seq); /* set iv */ uint64_t *iv = (uint64_t *)(esp + 1); if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { - *iv = rte_cpu_to_be_64(sa->seq); + *iv = rte_cpu_to_be_64(sa_seq); } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: @@ -332,7 +333,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, memset(iv, 0, sa->iv_len); break; case RTE_CRYPTO_CIPHER_AES_CTR: - *iv = rte_cpu_to_be_64(sa->seq); + *iv = rte_cpu_to_be_64(sa_seq); break; default: RTE_LOG(ERR, IPSEC_ESP, @@ -373,7 +374,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; - icb->iv = rte_cpu_to_be_64(sa->seq); + icb->iv = rte_cpu_to_be_64(sa_seq); icb->cnt = rte_cpu_to_be_32(1); aad = get_aad(m); @@ -414,7 +415,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; - icb->iv = rte_cpu_to_be_64(sa->seq); + icb->iv = rte_cpu_to_be_64(sa_seq); icb->cnt = rte_cpu_to_be_32(1); switch (sa->auth_algo) { diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 99f49d65f8..742d09aa36 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -87,7 +87,7 @@ struct ipsec_sa { struct rte_ipsec_session ips; /* one session per sa for now */ uint32_t spi; uint32_t cdev_id_qp; - uint64_t seq; + rte_atomic64_t seq; uint32_t salt; union { struct rte_cryptodev_sym_session *crypto_session; diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index a7298a30c2..adf6ac3f9a 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -795,7 +795,7 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], return -EINVAL; } *sa = entries[i]; - sa->seq = 0; + rte_atomic64_set(&sa->seq, -1); if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { -- 2.11.0