Add support for inline ingress session with out-of-place
support.

Signed-off-by: Nithin Dabilpuram <ndabilpu...@marvell.com>
---

Depends-on: series-27660 ("common/cnxk: allocate dynamic BPIDs)

 drivers/event/cnxk/cn10k_worker.h   |  28 ++++-
 drivers/net/cnxk/cn10k_ethdev.c     |  13 +-
 drivers/net/cnxk/cn10k_ethdev_sec.c |  43 +++++++
 drivers/net/cnxk/cn10k_rx.h         | 185 ++++++++++++++++++++++------
 drivers/net/cnxk/cn10k_rxtx.h       |   1 +
 drivers/net/cnxk/cnxk_ethdev.h      |   9 ++
 6 files changed, 233 insertions(+), 46 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_worker.h 
b/drivers/event/cnxk/cn10k_worker.h
index 07f0dad97d..75244638d2 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -16,7 +16,7 @@
 static __rte_always_inline void
 cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, uint8_t port_id,
                  const uint32_t tag, const uint32_t flags,
-                 const void *const lookup_mem)
+                 const void *const lookup_mem, uintptr_t cpth)
 {
        const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
                                   (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
@@ -27,7 +27,7 @@ cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t __mbuf, 
uint8_t port_id,
 
        cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
                              (struct rte_mbuf *)mbuf, lookup_mem,
-                             mbuf_init | ((uint64_t)port_id) << 48, flags);
+                             mbuf_init | ((uint64_t)port_id) << 48, cpth, 
flags);
 }
 
 static void
@@ -62,6 +62,7 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const 
uint32_t flags, struc
        uint16_t lmt_id, d_off;
        struct rte_mbuf **wqe;
        struct rte_mbuf *mbuf;
+       uintptr_t cpth = 0;
        uint8_t loff = 0;
        uint64_t sa_base;
        int i;
@@ -125,13 +126,20 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, 
const uint32_t flags, struc
                        const uint64_t cq_w1 = *((const uint64_t *)cqe + 1);
                        const uint64_t cq_w5 = *((const uint64_t *)cqe + 5);
 
+                       cpth = ((uintptr_t)mbuf + (uint16_t)d_off);
+
+                       /* Update mempool pointer for full mode pkt */
+                       if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+                           !((*(uint64_t *)cpth) & BIT(15)))
+                               mbuf->pool = mp;
+
                        mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, 
laddr,
                                                       &loff, mbuf, d_off,
                                                       flags, mbuf_init);
                }
 
                cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
-                                     mbuf_init, flags);
+                                     mbuf_init, cpth, flags);
 
                if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
                        cn10k_sso_process_tstamp((uint64_t)wqe[0],
@@ -162,6 +170,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, 
uint64_t *u64,
                u64[1] = cn10k_cpt_crypto_adapter_vector_dequeue(u64[1]);
        } else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_ETHDEV) {
                uint8_t port = CNXK_SUB_EVENT_FROM_TAG(u64[0]);
+               uintptr_t cpth = 0;
                uint64_t mbuf;
 
                mbuf = u64[1] - sizeof(struct rte_mbuf);
@@ -191,12 +200,19 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, 
uint64_t *u64,
                        sa_base = cnxk_nix_sa_base_get(port, ws->lookup_mem);
                        sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
 
+                       cpth = ((uintptr_t)mbuf + (uint16_t)d_off);
+                       mp = (struct rte_mempool 
*)cnxk_nix_inl_metapool_get(port, lookup_mem);
+                       meta_aura = mp ? mp->pool_id : m->pool->pool_id;
+
+                       /* Update mempool pointer for full mode pkt */
+                       if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+                           !((*(uint64_t *)cpth) & BIT(15)))
+                               ((struct rte_mbuf *)mbuf)->pool = mp;
+
                        mbuf = (uint64_t)nix_sec_meta_to_mbuf_sc(
                                cq_w1, cq_w5, sa_base, (uintptr_t)&iova, &loff,
                                (struct rte_mbuf *)mbuf, d_off, flags,
                                mbuf_init | ((uint64_t)port) << 48);
-                       mp = (struct rte_mempool 
*)cnxk_nix_inl_metapool_get(port, lookup_mem);
-                       meta_aura = mp ? mp->pool_id : m->pool->pool_id;
 
                        if (loff)
                                roc_npa_aura_op_free(meta_aura, 0, iova);
@@ -204,7 +220,7 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, 
uint64_t *u64,
 
                u64[0] = CNXK_CLR_SUB_EVENT(u64[0]);
                cn10k_wqe_to_mbuf(u64[1], mbuf, port, u64[0] & 0xFFFFF, flags,
-                                 ws->lookup_mem);
+                                 ws->lookup_mem, cpth);
                if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
                        cn10k_sso_process_tstamp(u64[1], mbuf,
                                                 ws->tstamp[port]);
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 2b4ab8b772..c33646846e 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -352,11 +352,13 @@ cn10k_nix_rx_queue_meta_aura_update(struct rte_eth_dev 
*eth_dev)
                rq = &dev->rqs[i];
                rxq = eth_dev->data->rx_queues[i];
                rxq->meta_aura = rq->meta_aura_handle;
+               rxq->meta_pool = dev->nix.meta_mempool;
                /* Assume meta packet from normal aura if meta aura is not setup
                 */
                if (!rxq->meta_aura) {
                        rxq_sp = cnxk_eth_rxq_to_sp(rxq);
                        rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
+                       rxq->meta_pool = (uintptr_t)rxq_sp->qconf.mp;
                }
        }
        /* Store mempool in lookup mem */
@@ -623,14 +625,17 @@ cn10k_nix_reassembly_conf_set(struct rte_eth_dev *eth_dev,
 
        if (!conf->flags) {
                /* Clear offload flags on disable */
-               dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+               if (!dev->inb.nb_oop)
+                       dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+               dev->inb.reass_en = false;
                return 0;
        }
 
-       rc = roc_nix_reassembly_configure(conf->timeout_ms,
-                               conf->max_frags);
-       if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+       rc = roc_nix_reassembly_configure(conf->timeout_ms, conf->max_frags);
+       if (!rc && dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
                dev->rx_offload_flags |= NIX_RX_REAS_F;
+               dev->inb.reass_en = true;
+       }
 
        return rc;
 }
diff --git a/drivers/net/cnxk/cn10k_ethdev_sec.c 
b/drivers/net/cnxk/cn10k_ethdev_sec.c
index 9625704ec1..f6992c8c8f 100644
--- a/drivers/net/cnxk/cn10k_ethdev_sec.c
+++ b/drivers/net/cnxk/cn10k_ethdev_sec.c
@@ -9,6 +9,7 @@
 #include <rte_pmd_cnxk.h>
 
 #include <cn10k_ethdev.h>
+#include <cn10k_rx.h>
 #include <cnxk_security.h>
 #include <roc_priv.h>
 
@@ -293,6 +294,7 @@ static const struct rte_security_capability 
cn10k_eth_sec_capabilities[] = {
                                .l4_csum_enable = 1,
                                .stats = 1,
                                .esn = 1,
+                               .ingress_oop = 1,
                        },
                },
                .crypto_capabilities = cn10k_eth_sec_crypto_caps,
@@ -342,6 +344,7 @@ static const struct rte_security_capability 
cn10k_eth_sec_capabilities[] = {
                                .l4_csum_enable = 1,
                                .stats = 1,
                                .esn = 1,
+                               .ingress_oop = 1,
                        },
                },
                .crypto_capabilities = cn10k_eth_sec_crypto_caps,
@@ -365,6 +368,7 @@ static const struct rte_security_capability 
cn10k_eth_sec_capabilities[] = {
                                .l4_csum_enable = 1,
                                .stats = 1,
                                .esn = 1,
+                               .ingress_oop = 1,
                        },
                },
                .crypto_capabilities = cn10k_eth_sec_crypto_caps,
@@ -624,6 +628,20 @@ cn10k_eth_sec_session_create(void *device,
                        return -rte_errno;
        }
 
+       if (conf->ipsec.options.ingress_oop &&
+           rte_security_oop_dynfield_offset < 0) {
+               /* Register for security OOP dynfield if required */
+               if (rte_security_oop_dynfield_register() < 0)
+                       return -rte_errno;
+       }
+
+       /* We cannot support inbound reassembly and OOP together */
+       if (conf->ipsec.options.ip_reassembly_en &&
+           conf->ipsec.options.ingress_oop) {
+               plt_err("Cannot support Inbound reassembly and OOP together");
+               return -ENOTSUP;
+       }
+
        ipsec = &conf->ipsec;
        crypto = conf->crypto_xform;
        inbound = !!(ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
@@ -710,6 +728,12 @@ cn10k_eth_sec_session_create(void *device,
                        inb_sa_dptr->w0.s.count_mib_bytes = 1;
                        inb_sa_dptr->w0.s.count_mib_pkts = 1;
                }
+
+               /* Enable out-of-place processing */
+               if (ipsec->options.ingress_oop)
+                       inb_sa_dptr->w0.s.pkt_format =
+                               ROC_IE_OT_SA_PKT_FMT_FULL;
+
                /* Prepare session priv */
                sess_priv.inb_sa = 1;
                sess_priv.sa_idx = ipsec->spi & spi_mask;
@@ -721,6 +745,7 @@ cn10k_eth_sec_session_create(void *device,
                eth_sec->spi = ipsec->spi;
                eth_sec->inl_dev = !!dev->inb.inl_dev;
                eth_sec->inb = true;
+               eth_sec->inb_oop = !!ipsec->options.ingress_oop;
 
                TAILQ_INSERT_TAIL(&dev->inb.list, eth_sec, entry);
                dev->inb.nb_sess++;
@@ -736,6 +761,15 @@ cn10k_eth_sec_session_create(void *device,
                        inb_priv->reass_dynflag_bit = dev->reass_dynflag_bit;
                }
 
+               if (ipsec->options.ingress_oop)
+                       dev->inb.nb_oop++;
+
+               /* Update function pointer to handle OOP sessions */
+               if (dev->inb.nb_oop &&
+                   !(dev->rx_offload_flags & NIX_RX_REAS_F)) {
+                       dev->rx_offload_flags |= NIX_RX_REAS_F;
+                       cn10k_eth_set_rx_function(eth_dev);
+               }
        } else {
                struct roc_ot_ipsec_outb_sa *outb_sa, *outb_sa_dptr;
                struct cn10k_outb_priv_data *outb_priv;
@@ -880,6 +914,15 @@ cn10k_eth_sec_session_destroy(void *device, struct 
rte_security_session *sess)
                                      sizeof(struct roc_ot_ipsec_inb_sa));
                TAILQ_REMOVE(&dev->inb.list, eth_sec, entry);
                dev->inb.nb_sess--;
+               if (eth_sec->inb_oop)
+                       dev->inb.nb_oop--;
+
+               /* Clear offload flags if was used by OOP */
+               if (!dev->inb.nb_oop && !dev->inb.reass_en &&
+                   dev->rx_offload_flags & NIX_RX_REAS_F) {
+                       dev->rx_offload_flags &= ~NIX_RX_REAS_F;
+                       cn10k_eth_set_rx_function(eth_dev);
+               }
        } else {
                /* Disable SA */
                sa_dptr = dev->outb.sa_dptr;
diff --git a/drivers/net/cnxk/cn10k_rx.h b/drivers/net/cnxk/cn10k_rx.h
index 9fdb5565e9..b80e7388bd 100644
--- a/drivers/net/cnxk/cn10k_rx.h
+++ b/drivers/net/cnxk/cn10k_rx.h
@@ -420,11 +420,46 @@ nix_sec_reassemble_frags(const struct cpt_parse_hdr_s 
*hdr, uint64_t cq_w1,
        return head;
 }
 
+static inline struct rte_mbuf *
+nix_sec_oop_process(const struct cpt_parse_hdr_s *hdr, struct rte_mbuf *mbuf, 
uint64_t *mbuf_init)
+{
+       uintptr_t wqe = rte_be_to_cpu_64(hdr->wqe_ptr);
+       union nix_rx_parse_u *inner_rx;
+       struct rte_mbuf *inner;
+       uint16_t data_off;
+
+       inner = ((struct rte_mbuf *)wqe) - 1;
+
+       inner_rx = (union nix_rx_parse_u *)(wqe + 8);
+       inner->pkt_len = inner_rx->pkt_lenm1 + 1;
+       inner->data_len = inner_rx->pkt_lenm1 + 1;
+
+       /* Mark inner mbuf as get */
+       RTE_MEMPOOL_CHECK_COOKIES(inner->pool,
+                                 (void **)&inner, 1, 1);
+       /* Update rearm data for full mbuf as it has
+        * cpt parse header that needs to be skipped.
+        *
+        * Since meta pool will not have private area while
+        * ethdev RQ's first skip would be considering private area
+        * calculate actual data off and update in meta mbuf.
+        */
+       data_off = (uintptr_t)hdr - (uintptr_t)mbuf->buf_addr;
+       data_off += sizeof(struct cpt_parse_hdr_s);
+       data_off += hdr->w0.pad_len;
+       *mbuf_init &= ~0xFFFFUL;
+       *mbuf_init |= data_off;
+
+       *rte_security_oop_dynfield(mbuf) = inner;
+       /* Return outer instead of inner mbuf as inner mbuf would have original 
encrypted packet */
+       return mbuf;
+}
+
 static __rte_always_inline struct rte_mbuf *
 nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, const uint64_t sa_base,
                        uintptr_t laddr, uint8_t *loff, struct rte_mbuf *mbuf,
                        uint16_t data_off, const uint16_t flags,
-                       const uint64_t mbuf_init)
+                       uint64_t mbuf_init)
 {
        const void *__p = (void *)((uintptr_t)mbuf + (uint16_t)data_off);
        const struct cpt_parse_hdr_s *hdr = (const struct cpt_parse_hdr_s *)__p;
@@ -447,9 +482,13 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, 
const uint64_t sa_base,
 
                if (!hdr->w0.num_frags) {
                        /* No Reassembly or inbound error */
-                       inner = (struct rte_mbuf *)
-                               (rte_be_to_cpu_64(hdr->wqe_ptr) -
-                                sizeof(struct rte_mbuf));
+                       if (hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) {
+                               inner = nix_sec_oop_process(hdr, mbuf, 
&mbuf_init);
+                       } else {
+                               inner = (struct rte_mbuf *)
+                                       (rte_be_to_cpu_64(hdr->wqe_ptr) -
+                                        sizeof(struct rte_mbuf));
+                       }
 
                        /* Update dynamic field with userdata */
                        *rte_security_dynfield(inner) =
@@ -506,14 +545,18 @@ nix_sec_meta_to_mbuf_sc(uint64_t cq_w1, uint64_t cq_w5, 
const uint64_t sa_base,
                /* Store meta in lmtline to free
                 * Assume all meta's from same aura.
                 */
-               *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
-               *loff = *loff + 1;
+               if (hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL) {
+                       *(uint64_t *)(laddr + (*loff << 3)) = (uint64_t)mbuf;
+                       *loff = *loff + 1;
 
-               /* Mark meta mbuf as put */
-               RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 0);
+                       /* Mark meta mbuf as put */
+                       RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf,
+                                                 1, 0);
 
-               /* Mark inner mbuf as get */
-               RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
+                       /* Mark inner mbuf as get */
+                       RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner,
+                                                 1, 1);
+               }
 
                return inner;
        } else if (cq_w1 & BIT(11)) {
@@ -602,7 +645,9 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, 
uintptr_t inb_sa,
        *rte_security_dynfield(inner) = (uint64_t)inb_priv->userdata;
 
        /* Mark inner mbuf as get */
-       RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
+       if (!(flags & NIX_RX_REAS_F) ||
+           hdr->w0.pkt_fmt != ROC_IE_OT_SA_PKT_FMT_FULL)
+               RTE_MEMPOOL_CHECK_COOKIES(inner->pool, (void **)&inner, 1, 1);
 
        if (flags & NIX_RX_REAS_F && hdr->w0.num_frags) {
                if ((!(hdr->w0.err_sum) || 
roc_ie_ot_ucc_is_success(hdr->w3.uc_ccode)) &&
@@ -633,6 +678,19 @@ nix_sec_meta_to_mbuf(uint64_t cq_w1, uint64_t cq_w5, 
uintptr_t inb_sa,
                        *rx_desc_field1 = vsetq_lane_u16(inner->data_len,
                                                         *rx_desc_field1, 4);
                }
+       } else if (flags & NIX_RX_REAS_F) {
+               /* Without fragmentation but may have to handle OOP session */
+               if (hdr->w0.pkt_fmt == ROC_IE_OT_SA_PKT_FMT_FULL) {
+                       uint64_t mbuf_init = 0;
+
+                       /* Caller has already prepared to return second pass
+                        * mbuf and inner mbuf is actually outer.
+                        * Store original buffer pointer in dynfield.
+                        */
+                       nix_sec_oop_process(hdr, inner, &mbuf_init);
+                       /* Clear and update lower 16 bit of data offset */
+                       *rearm = (*rearm & ~(BIT_ULL(16) - 1)) | mbuf_init;
+               }
        }
 }
 #endif
@@ -689,7 +747,7 @@ nix_update_match_id(const uint16_t match_id, uint64_t 
ol_flags,
 
 static __rte_always_inline void
 nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct rte_mbuf *mbuf,
-                   uint64_t rearm, const uint16_t flags)
+                   uint64_t rearm, uintptr_t cpth, const uint16_t flags)
 {
        const rte_iova_t *iova_list;
        uint16_t later_skip = 0;
@@ -703,8 +761,11 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct 
rte_mbuf *mbuf,
        cq_w1 = *(const uint64_t *)rx;
        /* Use inner rx parse for meta pkts sg list */
        if (cq_w1 & BIT(11) && flags & NIX_RX_OFFLOAD_SECURITY_F) {
+               const struct cpt_parse_hdr_s *hdr = (const struct 
cpt_parse_hdr_s *)cpth;
                const uint64_t *wqe = (const uint64_t *)(mbuf + 1);
-               rx = (const union nix_rx_parse_u *)(wqe + 1);
+
+               if (!(flags & NIX_RX_REAS_F) || hdr->w0.pkt_fmt != 
ROC_IE_OT_SA_PKT_FMT_FULL)
+                       rx = (const union nix_rx_parse_u *)(wqe + 1);
        }
 
        sg = *(const uint64_t *)(rx + 1);
@@ -763,7 +824,7 @@ nix_cqe_xtract_mseg(const union nix_rx_parse_u *rx, struct 
rte_mbuf *mbuf,
 static __rte_always_inline void
 cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
                      struct rte_mbuf *mbuf, const void *lookup_mem,
-                     const uint64_t val, const uint16_t flag)
+                     const uint64_t val, const uintptr_t cpth, const uint16_t 
flag)
 {
        const union nix_rx_parse_u *rx =
                (const union nix_rx_parse_u *)((const uint64_t *)cq + 1);
@@ -817,7 +878,7 @@ cn10k_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const 
uint32_t tag,
                 * timestamp data process.
                 * Hence, timestamp flag argument is not required.
                 */
-               nix_cqe_xtract_mseg(rx, mbuf, val, flag & 
~NIX_RX_OFFLOAD_TSTAMP_F);
+               nix_cqe_xtract_mseg(rx, mbuf, val, cpth, flag & 
~NIX_RX_OFFLOAD_TSTAMP_F);
 }
 
 static inline uint16_t
@@ -888,6 +949,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t pkts,
        const uint64_t mbuf_init = rxq->mbuf_initializer;
        const void *lookup_mem = rxq->lookup_mem;
        const uint64_t data_off = rxq->data_off;
+       struct rte_mempool *meta_pool = NULL;
        const uintptr_t desc = rxq->desc;
        const uint64_t wdata = rxq->wdata;
        const uint32_t qmask = rxq->qmask;
@@ -898,6 +960,7 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t pkts,
        struct nix_cqe_hdr_s *cq;
        struct rte_mbuf *mbuf;
        uint64_t aura_handle;
+       uintptr_t cpth = 0;
        uint64_t sa_base;
        uint16_t lmt_id;
        uint64_t laddr;
@@ -911,6 +974,8 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t pkts,
                ROC_LMT_BASE_ID_GET(lbase, lmt_id);
                laddr = lbase;
                laddr += 8;
+               if (flags & NIX_RX_REAS_F)
+                       meta_pool = (struct rte_mempool *)rxq->meta_pool;
        }
 
        while (packets < nb_pkts) {
@@ -929,13 +994,20 @@ cn10k_nix_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t pkts,
                        const uint64_t cq_w1 = *((const uint64_t *)cq + 1);
                        const uint64_t cq_w5 = *((const uint64_t *)cq + 5);
 
+                       cpth = ((uintptr_t)mbuf + (uint16_t)data_off);
+
+                       /* Update mempool pointer for full mode pkt */
+                       if ((flags & NIX_RX_REAS_F) && (cq_w1 & BIT(11)) &&
+                           !((*(uint64_t *)cpth) & BIT(15)))
+                               mbuf->pool = meta_pool;
+
                        mbuf = nix_sec_meta_to_mbuf_sc(cq_w1, cq_w5, sa_base, 
laddr,
                                                       &loff, mbuf, data_off,
                                                       flags, mbuf_init);
                }
 
                cn10k_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
-                                     flags);
+                                     cpth, flags);
                cn10k_nix_mbuf_to_tstamp(mbuf, rxq->tstamp,
                                        (flags & NIX_RX_OFFLOAD_TSTAMP_F),
                                        (uint64_t *)((uint8_t *)mbuf
@@ -1025,6 +1097,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
        const uint64_t wdata = flags & NIX_RX_VWQE_F ? 0 : rxq->wdata;
        const uintptr_t desc = flags & NIX_RX_VWQE_F ? 0 : rxq->desc;
        uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
+       uintptr_t cpth0 = 0, cpth1 = 0, cpth2 = 0, cpth3 = 0;
        uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
        uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
        uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
@@ -1032,6 +1105,7 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
        uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
        struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
        uint8_t loff = 0, lnum = 0, shft = 0;
+       struct rte_mempool *meta_pool = NULL;
        uint8x16_t f0, f1, f2, f3;
        uint16_t lmt_id, d_off;
        uint64_t lbase, laddr;
@@ -1084,6 +1158,9 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                        /* Get SA Base from lookup tbl using port_id */
                        port = mbuf_initializer >> 48;
                        sa_base = cnxk_nix_sa_base_get(port, lookup_mem);
+                       if (flags & NIX_RX_REAS_F)
+                               meta_pool = (struct rte_mempool 
*)cnxk_nix_inl_metapool_get(port,
+                                                                               
        lookup_mem);
 
                        lbase = lmt_base;
                } else {
@@ -1091,6 +1168,8 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                        d_off = rxq->data_off;
                        sa_base = rxq->sa_base;
                        lbase = rxq->lmt_base;
+                       if (flags & NIX_RX_REAS_F)
+                               meta_pool = (struct rte_mempool 
*)rxq->meta_pool;
                }
                sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
                ROC_LMT_BASE_ID_GET(lbase, lmt_id);
@@ -1325,10 +1404,6 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                        uint64_t cq1_w5 = *CQE_PTR_OFF(cq0, 1, 40, flags);
                        uint64_t cq2_w5 = *CQE_PTR_OFF(cq0, 2, 40, flags);
                        uint64_t cq3_w5 = *CQE_PTR_OFF(cq0, 3, 40, flags);
-                       uintptr_t cpth0 = (uintptr_t)mbuf0 + d_off;
-                       uintptr_t cpth1 = (uintptr_t)mbuf1 + d_off;
-                       uintptr_t cpth2 = (uintptr_t)mbuf2 + d_off;
-                       uintptr_t cpth3 = (uintptr_t)mbuf3 + d_off;
                        uint8_t code;
 
                        uint64x2_t inner0, inner1, inner2, inner3;
@@ -1336,6 +1411,11 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                        uint16x4_t lens, l2lens, ltypes;
                        uint8x8_t ucc;
 
+                       cpth0 = (uintptr_t)mbuf0 + d_off;
+                       cpth1 = (uintptr_t)mbuf1 + d_off;
+                       cpth2 = (uintptr_t)mbuf2 + d_off;
+                       cpth3 = (uintptr_t)mbuf3 + d_off;
+
                        inner0 = vld1q_u64((const uint64_t *)cpth0);
                        inner1 = vld1q_u64((const uint64_t *)cpth1);
                        inner2 = vld1q_u64((const uint64_t *)cpth2);
@@ -1488,10 +1568,19 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                                uint16_t len = vget_lane_u16(lens, 0);
 
                                cpth0 = (uintptr_t)mbuf0 + d_off;
+
                                /* Free meta to aura */
-                               NIX_PUSH_META_TO_FREE(mbuf0, laddr, &loff);
-                               mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0);
-                               mbuf0 = (struct rte_mbuf *)wqe;
+                               if (!(flags & NIX_RX_REAS_F) ||
+                                   *(uint64_t *)cpth0 & BIT_ULL(15)) {
+                                       /* Free meta to aura */
+                                       NIX_PUSH_META_TO_FREE(mbuf0, laddr,
+                                                             &loff);
+                                       mbuf01 = vsetq_lane_u64(wqe, mbuf01, 0);
+                                       mbuf0 = (struct rte_mbuf *)wqe;
+                               } else if (flags & NIX_RX_REAS_F) {
+                                       /* Update meta pool for full mode pkts 
*/
+                                       mbuf0->pool = meta_pool;
+                               }
 
                                /* Update pkt_len and data_len */
                                f0 = vsetq_lane_u16(len, f0, 2);
@@ -1513,10 +1602,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                                uint16_t len = vget_lane_u16(lens, 1);
 
                                cpth1 = (uintptr_t)mbuf1 + d_off;
+
                                /* Free meta to aura */
-                               NIX_PUSH_META_TO_FREE(mbuf1, laddr, &loff);
-                               mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1);
-                               mbuf1 = (struct rte_mbuf *)wqe;
+                               if (!(flags & NIX_RX_REAS_F) ||
+                                   *(uint64_t *)cpth1 & BIT_ULL(15)) {
+                                       NIX_PUSH_META_TO_FREE(mbuf1, laddr,
+                                                             &loff);
+                                       mbuf01 = vsetq_lane_u64(wqe, mbuf01, 1);
+                                       mbuf1 = (struct rte_mbuf *)wqe;
+                               } else if (flags & NIX_RX_REAS_F) {
+                                       /* Update meta pool for full mode pkts 
*/
+                                       mbuf1->pool = meta_pool;
+                               }
 
                                /* Update pkt_len and data_len */
                                f1 = vsetq_lane_u16(len, f1, 2);
@@ -1537,10 +1634,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                                uint16_t len = vget_lane_u16(lens, 2);
 
                                cpth2 = (uintptr_t)mbuf2 + d_off;
+
                                /* Free meta to aura */
-                               NIX_PUSH_META_TO_FREE(mbuf2, laddr, &loff);
-                               mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0);
-                               mbuf2 = (struct rte_mbuf *)wqe;
+                               if (!(flags & NIX_RX_REAS_F) ||
+                                   *(uint64_t *)cpth2 & BIT_ULL(15)) {
+                                       NIX_PUSH_META_TO_FREE(mbuf2, laddr,
+                                                             &loff);
+                                       mbuf23 = vsetq_lane_u64(wqe, mbuf23, 0);
+                                       mbuf2 = (struct rte_mbuf *)wqe;
+                               } else if (flags & NIX_RX_REAS_F) {
+                                       /* Update meta pool for full mode pkts 
*/
+                                       mbuf2->pool = meta_pool;
+                               }
 
                                /* Update pkt_len and data_len */
                                f2 = vsetq_lane_u16(len, f2, 2);
@@ -1561,10 +1666,18 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                                uint16_t len = vget_lane_u16(lens, 3);
 
                                cpth3 = (uintptr_t)mbuf3 + d_off;
+
                                /* Free meta to aura */
-                               NIX_PUSH_META_TO_FREE(mbuf3, laddr, &loff);
-                               mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1);
-                               mbuf3 = (struct rte_mbuf *)wqe;
+                               if (!(flags & NIX_RX_REAS_F) ||
+                                   *(uint64_t *)cpth3 & BIT_ULL(15)) {
+                                       NIX_PUSH_META_TO_FREE(mbuf3, laddr,
+                                                             &loff);
+                                       mbuf23 = vsetq_lane_u64(wqe, mbuf23, 1);
+                                       mbuf3 = (struct rte_mbuf *)wqe;
+                               } else if (flags & NIX_RX_REAS_F) {
+                                       /* Update meta pool for full mode pkts 
*/
+                                       mbuf3->pool = meta_pool;
+                               }
 
                                /* Update pkt_len and data_len */
                                f3 = vsetq_lane_u16(len, f3, 2);
@@ -1721,16 +1834,16 @@ cn10k_nix_recv_pkts_vector(void *args, struct rte_mbuf 
**mbufs, uint16_t pkts,
                         */
                        nix_cqe_xtract_mseg((union nix_rx_parse_u *)
                                            (CQE_PTR_OFF(cq0, 0, 8, flags)),
-                                           mbuf0, mbuf_initializer, flags);
+                                           mbuf0, mbuf_initializer, cpth0, 
flags);
                        nix_cqe_xtract_mseg((union nix_rx_parse_u *)
                                            (CQE_PTR_OFF(cq0, 1, 8, flags)),
-                                           mbuf1, mbuf_initializer, flags);
+                                           mbuf1, mbuf_initializer, cpth1, 
flags);
                        nix_cqe_xtract_mseg((union nix_rx_parse_u *)
                                            (CQE_PTR_OFF(cq0, 2, 8, flags)),
-                                           mbuf2, mbuf_initializer, flags);
+                                           mbuf2, mbuf_initializer, cpth2, 
flags);
                        nix_cqe_xtract_mseg((union nix_rx_parse_u *)
                                            (CQE_PTR_OFF(cq0, 3, 8, flags)),
-                                           mbuf3, mbuf_initializer, flags);
+                                           mbuf3, mbuf_initializer, cpth3, 
flags);
                }
 
                /* Store the mbufs to rx_pkts */
diff --git a/drivers/net/cnxk/cn10k_rxtx.h b/drivers/net/cnxk/cn10k_rxtx.h
index c256d54307..b5d8345270 100644
--- a/drivers/net/cnxk/cn10k_rxtx.h
+++ b/drivers/net/cnxk/cn10k_rxtx.h
@@ -77,6 +77,7 @@ struct cn10k_eth_rxq {
        uint64_t sa_base;
        uint64_t lmt_base;
        uint64_t meta_aura;
+       uintptr_t meta_pool;
        uint16_t rq;
        struct cnxk_timesync_info *tstamp;
 } __plt_cache_aligned;
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index 85287dd66c..2b89ebb9bc 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -217,6 +217,9 @@ struct cnxk_eth_sec_sess {
 
        /* Inbound session on inl dev */
        bool inl_dev;
+
+       /* Out-Of-Place processing */
+       bool inb_oop;
 };
 
 TAILQ_HEAD(cnxk_eth_sec_sess_list, cnxk_eth_sec_sess);
@@ -244,6 +247,12 @@ struct cnxk_eth_dev_sec_inb {
        /* DPTR for WRITE_SA microcode op */
        void *sa_dptr;
 
+       /* Number of oop sessions */
+       uint16_t nb_oop;
+
+       /* Reassembly enabled */
+       bool reass_en;
+
        /* Lock to synchronize sa setup/release */
        rte_spinlock_t lock;
 };
-- 
2.25.1

Reply via email to