From: Rahul Bhansali <rbhans...@marvell.com>

Supports creation of individual meta aura per NIX port for
CN106-B0/CN103xx SoC.

Individual pool buffer size can be passed using meta_buf_sz
devargs parameter per NIX for local meta aura creation.

Signed-off-by: Rahul Bhansali <rbhans...@marvell.com>
---
 doc/guides/nics/cnxk.rst               |  14 ++
 drivers/common/cnxk/roc_features.h     |   6 +
 drivers/common/cnxk/roc_nix.h          |   5 +
 drivers/common/cnxk/roc_nix_fc.c       |   7 +-
 drivers/common/cnxk/roc_nix_inl.c      | 232 +++++++++++++++++++------
 drivers/common/cnxk/roc_nix_inl.h      |   7 +-
 drivers/common/cnxk/roc_nix_queue.c    |   6 +-
 drivers/event/cnxk/cn10k_eventdev.c    |  10 +-
 drivers/event/cnxk/cn10k_worker.h      |  11 +-
 drivers/event/cnxk/cn9k_eventdev.c     |   7 +-
 drivers/event/cnxk/cnxk_tim_evdev.c    |   2 +-
 drivers/event/cnxk/cnxk_tim_evdev.h    |   2 +-
 drivers/net/cnxk/cn10k_ethdev.c        |   2 +
 drivers/net/cnxk/cnxk_ethdev.c         |   4 +
 drivers/net/cnxk/cnxk_ethdev.h         |   6 +-
 drivers/net/cnxk/cnxk_ethdev_devargs.c |  23 +++
 drivers/net/cnxk/cnxk_ethdev_dp.h      |  13 ++
 drivers/net/cnxk/cnxk_ethdev_sec.c     |  21 ++-
 drivers/net/cnxk/cnxk_lookup.c         |  37 +++-
 19 files changed, 330 insertions(+), 85 deletions(-)

diff --git a/doc/guides/nics/cnxk.rst b/doc/guides/nics/cnxk.rst
index 267010e760..9229056f6f 100644
--- a/doc/guides/nics/cnxk.rst
+++ b/doc/guides/nics/cnxk.rst
@@ -402,6 +402,20 @@ Runtime Config Options
 
       -a 0002:01:00.1,tx_compl_ena=1
 
+- ``Meta buffer size per ethdev port for inline inbound IPsec second pass``
+
+   Size of meta buffer allocated for inline inbound IPsec second pass per
+   ethdev port can be specified by ``meta_buf_sz`` ``devargs`` parameter.
+   Default value is computed runtime based on pkt mbuf pools created and in 
use.
+   This option is for OCTEON CN106-B0/CN103XX SoC family.
+
+   For example::
+
+      -a 0002:02:00.0,meta_buf_sz=512
+
+   With the above configuration, PMD would allocate meta buffers of size 512 
for
+   inline inbound IPsec processing second pass.
+
 .. note::
 
    Above devarg parameters are configurable per device, user needs to pass the
diff --git a/drivers/common/cnxk/roc_features.h 
b/drivers/common/cnxk/roc_features.h
index 27bccd6b9c..7796fef91b 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -16,6 +16,12 @@ roc_feature_nix_has_inl_rq_mask(void)
        return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
 }
 
+static inline bool
+roc_feature_nix_has_own_meta_aura(void)
+{
+       return (roc_model_is_cn10kb() || roc_model_is_cn10ka_b0());
+}
+
 static inline bool
 roc_feature_nix_has_late_bp(void)
 {
diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h
index f04dd63e27..0ec98ad630 100644
--- a/drivers/common/cnxk/roc_nix.h
+++ b/drivers/common/cnxk/roc_nix.h
@@ -434,12 +434,17 @@ struct roc_nix {
        uint32_t dwrr_mtu;
        bool ipsec_out_sso_pffunc;
        bool custom_sa_action;
+       bool local_meta_aura_ena;
+       uint32_t meta_buf_sz;
        /* End of input parameters */
        /* LMT line base for "Per Core Tx LMT line" mode*/
        uintptr_t lmt_base;
        bool io_enabled;
        bool rx_ptp_ena;
        uint16_t cints;
+       uint32_t buf_sz;
+       uint64_t meta_aura_handle;
+       uintptr_t meta_mempool;
 
 #define ROC_NIX_MEM_SZ (6 * 1056)
        uint8_t reserved[ROC_NIX_MEM_SZ] __plt_cache_aligned;
diff --git a/drivers/common/cnxk/roc_nix_fc.c b/drivers/common/cnxk/roc_nix_fc.c
index 7574a88bf6..cec83b31f3 100644
--- a/drivers/common/cnxk/roc_nix_fc.c
+++ b/drivers/common/cnxk/roc_nix_fc.c
@@ -295,11 +295,16 @@ nix_fc_rq_config_set(struct roc_nix *roc_nix, struct 
roc_nix_fc_cfg *fc_cfg)
        if (sso_ena < 0)
                return -EINVAL;
 
-       if (sso_ena)
+       if (sso_ena) {
                roc_nix_fc_npa_bp_cfg(roc_nix, fc_cfg->rq_cfg.pool,
                                      fc_cfg->rq_cfg.enable, true,
                                      fc_cfg->rq_cfg.tc);
 
+               if (roc_nix->local_meta_aura_ena)
+                       roc_nix_fc_npa_bp_cfg(roc_nix, 
roc_nix->meta_aura_handle,
+                                             fc_cfg->rq_cfg.enable, true, 
fc_cfg->rq_cfg.tc);
+       }
+
        /* Copy RQ config to CQ config as they are occupying same area */
        memset(&tmp, 0, sizeof(tmp));
        tmp.type = ROC_NIX_FC_CQ_CFG;
diff --git a/drivers/common/cnxk/roc_nix_inl.c 
b/drivers/common/cnxk/roc_nix_inl.c
index 19f500ee54..076d83e8d5 100644
--- a/drivers/common/cnxk/roc_nix_inl.c
+++ b/drivers/common/cnxk/roc_nix_inl.c
@@ -20,97 +20,134 @@ PLT_STATIC_ASSERT(ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ ==
                  1UL << ROC_NIX_INL_OT_IPSEC_OUTB_SA_SZ_LOG2);
 
 static int
-nix_inl_meta_aura_destroy(void)
+nix_inl_meta_aura_destroy(struct roc_nix *roc_nix)
 {
        struct idev_cfg *idev = idev_get_cfg();
        struct idev_nix_inl_cfg *inl_cfg;
+       char mempool_name[24] = {'\0'};
+       char *mp_name = NULL;
+       uint64_t *meta_aura;
        int rc;
 
        if (!idev)
                return -EINVAL;
 
        inl_cfg = &idev->inl_cfg;
+       if (roc_nix->local_meta_aura_ena) {
+               meta_aura = &roc_nix->meta_aura_handle;
+               snprintf(mempool_name, sizeof(mempool_name), 
"NIX_INL_META_POOL_%d",
+                        roc_nix->port_id + 1);
+               mp_name = mempool_name;
+       } else {
+               meta_aura = &inl_cfg->meta_aura;
+       }
+
        /* Destroy existing Meta aura */
-       if (inl_cfg->meta_aura) {
+       if (*meta_aura) {
                uint64_t avail, limit;
 
                /* Check if all buffers are back to pool */
-               avail = roc_npa_aura_op_available(inl_cfg->meta_aura);
-               limit = roc_npa_aura_op_limit_get(inl_cfg->meta_aura);
+               avail = roc_npa_aura_op_available(*meta_aura);
+               limit = roc_npa_aura_op_limit_get(*meta_aura);
                if (avail != limit)
                        plt_warn("Not all buffers are back to meta pool,"
                                 " %" PRIu64 " != %" PRIu64, avail, limit);
 
-               rc = meta_pool_cb(&inl_cfg->meta_aura, 0, 0, true);
+               rc = meta_pool_cb(meta_aura, &roc_nix->meta_mempool, 0, 0, 
true, mp_name);
                if (rc) {
                        plt_err("Failed to destroy meta aura, rc=%d", rc);
                        return rc;
                }
-               inl_cfg->meta_aura = 0;
-               inl_cfg->buf_sz = 0;
-               inl_cfg->nb_bufs = 0;
-               inl_cfg->refs = 0;
+
+               if (!roc_nix->local_meta_aura_ena) {
+                       inl_cfg->meta_aura = 0;
+                       inl_cfg->buf_sz = 0;
+                       inl_cfg->nb_bufs = 0;
+               } else
+                       roc_nix->buf_sz = 0;
        }
 
        return 0;
 }
 
 static int
-nix_inl_meta_aura_create(struct idev_cfg *idev, uint16_t first_skip)
+nix_inl_meta_aura_create(struct idev_cfg *idev, struct roc_nix *roc_nix, 
uint16_t first_skip,
+                        uint64_t *meta_aura)
 {
        uint64_t mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
        struct idev_nix_inl_cfg *inl_cfg;
        struct nix_inl_dev *nix_inl_dev;
+       int port_id = roc_nix->port_id;
+       char mempool_name[24] = {'\0'};
+       struct roc_nix_rq *inl_rq;
        uint32_t nb_bufs, buf_sz;
+       char *mp_name = NULL;
+       uint16_t inl_rq_id;
+       uintptr_t mp;
        int rc;
 
        inl_cfg = &idev->inl_cfg;
        nix_inl_dev = idev->nix_inl_dev;
 
-       /* Override meta buf count from devargs if present */
-       if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
-               nb_bufs = nix_inl_dev->nb_meta_bufs;
-       else
-               nb_bufs = roc_npa_buf_type_limit_get(mask);
-
-       /* Override meta buf size from devargs if present */
-       if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
-               buf_sz = nix_inl_dev->meta_buf_sz;
-       else
-               buf_sz = first_skip + NIX_INL_META_SIZE;
+       if (roc_nix->local_meta_aura_ena) {
+               /* Per LF Meta Aura */
+               inl_rq_id = nix_inl_dev->nb_rqs > 1 ? port_id : 0;
+               inl_rq = &nix_inl_dev->rqs[inl_rq_id];
+
+               nb_bufs = roc_npa_aura_op_limit_get(inl_rq->aura_handle);
+               if (inl_rq->spb_ena)
+                       nb_bufs += 
roc_npa_aura_op_limit_get(inl_rq->spb_aura_handle);
+
+               /* Override meta buf size from NIX devargs if present */
+               if (roc_nix->meta_buf_sz)
+                       buf_sz = roc_nix->meta_buf_sz;
+               else
+                       buf_sz = first_skip + NIX_INL_META_SIZE;
+
+               /* Create Metapool name */
+               snprintf(mempool_name, sizeof(mempool_name), 
"NIX_INL_META_POOL_%d",
+                        roc_nix->port_id + 1);
+               mp_name = mempool_name;
+       } else {
+               /* Global Meta Aura (Aura 0) */
+               /* Override meta buf count from devargs if present */
+               if (nix_inl_dev && nix_inl_dev->nb_meta_bufs)
+                       nb_bufs = nix_inl_dev->nb_meta_bufs;
+               else
+                       nb_bufs = roc_npa_buf_type_limit_get(mask);
+
+               /* Override meta buf size from devargs if present */
+               if (nix_inl_dev && nix_inl_dev->meta_buf_sz)
+                       buf_sz = nix_inl_dev->meta_buf_sz;
+               else
+                       buf_sz = first_skip + NIX_INL_META_SIZE;
+       }
 
        /* Allocate meta aura */
-       rc = meta_pool_cb(&inl_cfg->meta_aura, buf_sz, nb_bufs, false);
+       rc = meta_pool_cb(meta_aura, &mp, buf_sz, nb_bufs, false, mp_name);
        if (rc) {
                plt_err("Failed to allocate meta aura, rc=%d", rc);
                return rc;
        }
+       roc_nix->meta_mempool = mp;
+
+       if (!roc_nix->local_meta_aura_ena) {
+               inl_cfg->buf_sz = buf_sz;
+               inl_cfg->nb_bufs = nb_bufs;
+       } else
+               roc_nix->buf_sz = buf_sz;
 
-       inl_cfg->buf_sz = buf_sz;
-       inl_cfg->nb_bufs = nb_bufs;
        return 0;
 }
 
-int
-roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
+static int
+nix_inl_global_meta_buffer_validate(struct idev_cfg *idev, struct roc_nix_rq 
*rq)
 {
-       struct idev_cfg *idev = idev_get_cfg();
        struct idev_nix_inl_cfg *inl_cfg;
        uint32_t actual, expected;
        uint64_t mask, type_mask;
-       int rc;
 
-       if (!idev || !meta_pool_cb)
-               return -EFAULT;
        inl_cfg = &idev->inl_cfg;
-
-       /* Create meta aura if not present */
-       if (!inl_cfg->meta_aura) {
-               rc = nix_inl_meta_aura_create(idev, rq->first_skip);
-               if (rc)
-                       return rc;
-       }
-
        /* Validate if we have enough meta buffers */
        mask = BIT_ULL(ROC_NPA_BUF_TYPE_PACKET_IPSEC);
        expected = roc_npa_buf_type_limit_get(mask);
@@ -145,7 +182,7 @@ roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
                        expected = roc_npa_buf_type_limit_get(mask);
 
                        if (actual < expected) {
-                               plt_err("VWQE aura shared b/w Inline inbound 
and non-Inline inbound "
+                               plt_err("VWQE aura shared b/w Inline inbound 
and non-Inline "
                                        "ports needs vwqe bufs(%u) minimum of 
all pkt bufs (%u)",
                                        actual, expected);
                                return -EIO;
@@ -164,6 +201,71 @@ roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq)
                        }
                }
        }
+       return 0;
+}
+
+static int
+nix_inl_local_meta_buffer_validate(struct roc_nix *roc_nix, struct roc_nix_rq 
*rq)
+{
+       /* Validate if we have enough space for meta buffer */
+       if (roc_nix->buf_sz && (rq->first_skip + NIX_INL_META_SIZE > 
roc_nix->buf_sz)) {
+               plt_err("Meta buffer size %u not sufficient to meet RQ first 
skip %u",
+                       roc_nix->buf_sz, rq->first_skip);
+               return -EIO;
+       }
+
+       /* TODO: Validate VWQE buffers */
+
+       return 0;
+}
+
+int
+roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct roc_nix_rq *rq)
+{
+       struct nix *nix = roc_nix_to_nix_priv(roc_nix);
+       struct idev_cfg *idev = idev_get_cfg();
+       struct idev_nix_inl_cfg *inl_cfg;
+       bool aura_setup = false;
+       uint64_t *meta_aura;
+       int rc;
+
+       if (!idev || !meta_pool_cb)
+               return -EFAULT;
+
+       inl_cfg = &idev->inl_cfg;
+
+       /* Create meta aura if not present */
+       if (roc_nix->local_meta_aura_ena)
+               meta_aura = &roc_nix->meta_aura_handle;
+       else
+               meta_aura = &inl_cfg->meta_aura;
+
+       if (!(*meta_aura)) {
+               rc = nix_inl_meta_aura_create(idev, roc_nix, rq->first_skip, 
meta_aura);
+               if (rc)
+                       return rc;
+
+               aura_setup = true;
+       }
+       /* Update rq meta aura handle */
+       rq->meta_aura_handle = *meta_aura;
+
+       if (roc_nix->local_meta_aura_ena) {
+               rc = nix_inl_local_meta_buffer_validate(roc_nix, rq);
+               if (rc)
+                       return rc;
+
+               /* Check for TC config on RQ 0 when local meta aura is used as
+                * inline meta aura creation is delayed.
+                */
+               if (aura_setup && nix->rqs[0] && nix->rqs[0]->tc != 
ROC_NIX_PFC_CLASS_INVALID)
+                       roc_nix_fc_npa_bp_cfg(roc_nix, 
roc_nix->meta_aura_handle,
+                                             true, true, nix->rqs[0]->tc);
+       } else {
+               rc = nix_inl_global_meta_buffer_validate(idev, rq);
+               if (rc)
+                       return rc;
+       }
 
        return 0;
 }
@@ -426,6 +528,7 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
        struct idev_nix_inl_cfg *inl_cfg;
        uint64_t aura_handle;
        int rc = -ENOSPC;
+       uint32_t buf_sz;
        int i;
 
        if (!idev)
@@ -473,10 +576,21 @@ nix_inl_rq_mask_cfg(struct roc_nix *roc_nix, bool enable)
        msk_req->rq_mask.xqe_drop_ena = 0;
        msk_req->rq_mask.spb_ena = 0;
 
-       aura_handle = roc_npa_zero_aura_handle();
+       if (roc_nix->local_meta_aura_ena) {
+               aura_handle = roc_nix->meta_aura_handle;
+               buf_sz = roc_nix->buf_sz;
+               if (!aura_handle && enable) {
+                       plt_err("NULL meta aura handle");
+                       goto exit;
+               }
+       } else {
+               aura_handle = roc_npa_zero_aura_handle();
+               buf_sz = inl_cfg->buf_sz;
+       }
+
        msk_req->ipsec_cfg1.spb_cpt_aura = 
roc_npa_aura_handle_to_aura(aura_handle);
        msk_req->ipsec_cfg1.rq_mask_enable = enable;
-       msk_req->ipsec_cfg1.spb_cpt_sizem1 = (inl_cfg->buf_sz >> 7) - 1;
+       msk_req->ipsec_cfg1.spb_cpt_sizem1 = (buf_sz >> 7) - 1;
        msk_req->ipsec_cfg1.spb_cpt_enable = enable;
 
        rc = mbox_process(mbox);
@@ -539,7 +653,8 @@ roc_nix_inl_inb_init(struct roc_nix *roc_nix)
 
        if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
                nix->need_meta_aura = true;
-               idev->inl_cfg.refs++;
+               if (!roc_nix->local_meta_aura_ena)
+                       idev->inl_cfg.refs++;
        }
 
        nix->inl_inb_ena = true;
@@ -562,9 +677,13 @@ roc_nix_inl_inb_fini(struct roc_nix *roc_nix)
        nix->inl_inb_ena = false;
        if (nix->need_meta_aura) {
                nix->need_meta_aura = false;
-               idev->inl_cfg.refs--;
-               if (!idev->inl_cfg.refs)
-                       nix_inl_meta_aura_destroy();
+               if (roc_nix->local_meta_aura_ena) {
+                       nix_inl_meta_aura_destroy(roc_nix);
+               } else {
+                       idev->inl_cfg.refs--;
+                       if (!idev->inl_cfg.refs)
+                               nix_inl_meta_aura_destroy(roc_nix);
+               }
        }
 
        if (roc_feature_nix_has_inl_rq_mask()) {
@@ -968,7 +1087,7 @@ roc_nix_inl_dev_rq_get(struct roc_nix_rq *rq, bool enable)
 
        /* Check meta aura */
        if (enable && nix->need_meta_aura) {
-               rc = roc_nix_inl_meta_aura_check(rq);
+               rc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq);
                if (rc)
                        return rc;
        }
@@ -1058,7 +1177,7 @@ roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool 
enable)
                        return rc;
 
                if (enable && nix->need_meta_aura)
-                       return roc_nix_inl_meta_aura_check(inl_rq);
+                       return roc_nix_inl_meta_aura_check(roc_nix, inl_rq);
        }
        return 0;
 }
@@ -1084,15 +1203,22 @@ roc_nix_inl_inb_set(struct roc_nix *roc_nix, bool ena)
         * managed outside RoC.
         */
        nix->inl_inb_ena = ena;
-       if (!roc_model_is_cn9k() && !roc_errata_nix_no_meta_aura()) {
-               if (ena) {
-                       nix->need_meta_aura = true;
+
+       if (roc_model_is_cn9k() || roc_errata_nix_no_meta_aura())
+               return;
+
+       if (ena) {
+               nix->need_meta_aura = true;
+               if (!roc_nix->local_meta_aura_ena)
                        idev->inl_cfg.refs++;
-               } else if (nix->need_meta_aura) {
-                       nix->need_meta_aura = false;
+       } else if (nix->need_meta_aura) {
+               nix->need_meta_aura = false;
+               if (roc_nix->local_meta_aura_ena) {
+                       nix_inl_meta_aura_destroy(roc_nix);
+               } else {
                        idev->inl_cfg.refs--;
                        if (!idev->inl_cfg.refs)
-                               nix_inl_meta_aura_destroy();
+                               nix_inl_meta_aura_destroy(roc_nix);
                }
        }
 }
diff --git a/drivers/common/cnxk/roc_nix_inl.h 
b/drivers/common/cnxk/roc_nix_inl.h
index 105a9e4ec4..6220ba6773 100644
--- a/drivers/common/cnxk/roc_nix_inl.h
+++ b/drivers/common/cnxk/roc_nix_inl.h
@@ -118,8 +118,9 @@ roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd(void *sa)
 typedef void (*roc_nix_inl_sso_work_cb_t)(uint64_t *gw, void *args,
                                          uint32_t soft_exp_event);
 
-typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle, uint32_t 
blk_sz, uint32_t nb_bufs,
-                                         bool destroy);
+typedef int (*roc_nix_inl_meta_pool_cb_t)(uint64_t *aura_handle,  uintptr_t 
*mpool,
+                                         uint32_t blk_sz, uint32_t nb_bufs, 
bool destroy,
+                                         const char *mempool_name);
 
 struct roc_nix_inl_dev {
        /* Input parameters */
@@ -181,7 +182,7 @@ int __roc_api roc_nix_reassembly_configure(uint32_t 
max_wait_time,
 int __roc_api roc_nix_inl_ts_pkind_set(struct roc_nix *roc_nix, bool ts_ena,
                                       bool inb_inl_dev);
 int __roc_api roc_nix_inl_rq_ena_dis(struct roc_nix *roc_nix, bool ena);
-int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix_rq *rq);
+int __roc_api roc_nix_inl_meta_aura_check(struct roc_nix *roc_nix, struct 
roc_nix_rq *rq);
 
 /* NIX Inline Outbound API */
 int __roc_api roc_nix_inl_outb_init(struct roc_nix *roc_nix);
diff --git a/drivers/common/cnxk/roc_nix_queue.c 
b/drivers/common/cnxk/roc_nix_queue.c
index 33b2cdf90f..464ee0b984 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -102,7 +102,7 @@ roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable)
 
        /* Check for meta aura if RQ is enabled */
        if (enable && nix->need_meta_aura)
-               rc = roc_nix_inl_meta_aura_check(rq);
+               rc = roc_nix_inl_meta_aura_check(rq->roc_nix, rq);
        return rc;
 }
 
@@ -691,7 +691,7 @@ roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq 
*rq, bool ena)
 
        /* Check for meta aura if RQ is enabled */
        if (ena && nix->need_meta_aura) {
-               rc = roc_nix_inl_meta_aura_check(rq);
+               rc = roc_nix_inl_meta_aura_check(roc_nix, rq);
                if (rc)
                        return rc;
        }
@@ -745,7 +745,7 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct 
roc_nix_rq *rq, bool ena)
 
        /* Check for meta aura if RQ is enabled */
        if (ena && nix->need_meta_aura) {
-               rc = roc_nix_inl_meta_aura_check(rq);
+               rc = roc_nix_inl_meta_aura_check(roc_nix, rq);
                if (rc)
                        return rc;
        }
diff --git a/drivers/event/cnxk/cn10k_eventdev.c 
b/drivers/event/cnxk/cn10k_eventdev.c
index 8e74edff55..b1cf43ee57 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -843,7 +843,7 @@ cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev 
*event_dev,
 }
 
 static void
-cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem, 
uint64_t meta_aura)
+cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 {
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
        int i;
@@ -855,8 +855,6 @@ cn10k_sso_set_priv_mem(const struct rte_eventdev 
*event_dev, void *lookup_mem, u
                ws->tstamp = dev->tstamp;
                if (lookup_mem)
                        ws->lookup_mem = lookup_mem;
-               if (meta_aura)
-                       ws->meta_aura = meta_aura;
        }
 }
 
@@ -867,7 +865,6 @@ cn10k_sso_rx_adapter_queue_add(
        const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 {
        struct cn10k_eth_rxq *rxq;
-       uint64_t meta_aura;
        void *lookup_mem;
        int rc;
 
@@ -881,8 +878,7 @@ cn10k_sso_rx_adapter_queue_add(
                return -EINVAL;
        rxq = eth_dev->data->rx_queues[0];
        lookup_mem = rxq->lookup_mem;
-       meta_aura = rxq->meta_aura;
-       cn10k_sso_set_priv_mem(event_dev, lookup_mem, meta_aura);
+       cn10k_sso_set_priv_mem(event_dev, lookup_mem);
        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
        return 0;
@@ -1056,7 +1052,7 @@ cn10k_crypto_adapter_qp_add(const struct rte_eventdev 
*event_dev,
        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
        ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
-       cn10k_sso_set_priv_mem(event_dev, NULL, 0);
+       cn10k_sso_set_priv_mem(event_dev, NULL);
 
        return ret;
 }
diff --git a/drivers/event/cnxk/cn10k_worker.h 
b/drivers/event/cnxk/cn10k_worker.h
index 2bea1f6ca6..06c71c6092 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -55,9 +55,10 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const 
uint32_t flags, struc
        struct cnxk_timesync_info *tstamp = ws->tstamp[port_id];
        void *lookup_mem = ws->lookup_mem;
        uintptr_t lbase = ws->lmt_base;
+       uint64_t meta_aura = 0, laddr;
        struct rte_event_vector *vec;
-       uint64_t meta_aura, laddr;
        uint16_t nb_mbufs, non_vec;
+       struct rte_mempool *mp;
        uint16_t lmt_id, d_off;
        struct rte_mbuf **wqe;
        struct rte_mbuf *mbuf;
@@ -77,7 +78,12 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const 
uint32_t flags, struc
        if (flags & NIX_RX_OFFLOAD_TSTAMP_F && tstamp)
                mbuf_init |= 8;
 
-       meta_aura = ws->meta_aura;
+       if (flags & NIX_RX_OFFLOAD_SECURITY_F) {
+               mp = (struct rte_mempool *)cnxk_nix_inl_metapool_get(port_id, 
lookup_mem);
+               if (mp)
+                       meta_aura = mp->pool_id;
+       }
+
        nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
        nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, wqe, nb_mbufs,
                                              flags | NIX_RX_VWQE_F,
@@ -94,7 +100,6 @@ cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const 
uint32_t flags, struc
                /* Pick first mbuf's aura handle assuming all
                 * mbufs are from a vec and are from same RQ.
                 */
-               meta_aura = ws->meta_aura;
                if (!meta_aura)
                        meta_aura = mbuf->pool->pool_id;
                ROC_LMT_BASE_ID_GET(lbase, lmt_id);
diff --git a/drivers/event/cnxk/cn9k_eventdev.c 
b/drivers/event/cnxk/cn9k_eventdev.c
index 131d42a95b..7e8339bd3a 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -945,8 +945,7 @@ cn9k_sso_rx_adapter_caps_get(const struct rte_eventdev 
*event_dev,
 }
 
 static void
-cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
-                     uint64_t aura __rte_unused)
+cn9k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 {
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
        int i;
@@ -992,7 +991,7 @@ cn9k_sso_rx_adapter_queue_add(
 
        rxq = eth_dev->data->rx_queues[0];
        lookup_mem = rxq->lookup_mem;
-       cn9k_sso_set_priv_mem(event_dev, lookup_mem, 0);
+       cn9k_sso_set_priv_mem(event_dev, lookup_mem);
        cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
        return 0;
@@ -1141,7 +1140,7 @@ cn9k_crypto_adapter_qp_add(const struct rte_eventdev 
*event_dev,
        cn9k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 
        ret = cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id, conf);
-       cn9k_sso_set_priv_mem(event_dev, NULL, 0);
+       cn9k_sso_set_priv_mem(event_dev, NULL);
 
        return ret;
 }
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.c 
b/drivers/event/cnxk/cnxk_tim_evdev.c
index fac3806e14..121480df15 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.c
+++ b/drivers/event/cnxk/cnxk_tim_evdev.c
@@ -265,7 +265,7 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
        cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,
                              RTE_EVENT_TYPE_TIMER);
        cnxk_sso_xae_reconfigure(dev->event_dev);
-       sso_set_priv_mem_fn(dev->event_dev, NULL, 0);
+       sso_set_priv_mem_fn(dev->event_dev, NULL);
 
        plt_tim_dbg(
                "Total memory used %" PRIu64 "MB\n",
diff --git a/drivers/event/cnxk/cnxk_tim_evdev.h 
b/drivers/event/cnxk/cnxk_tim_evdev.h
index 7253a37d3d..3a0b036cb4 100644
--- a/drivers/event/cnxk/cnxk_tim_evdev.h
+++ b/drivers/event/cnxk/cnxk_tim_evdev.h
@@ -81,7 +81,7 @@
        (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
 
 typedef void (*cnxk_sso_set_priv_mem_t)(const struct rte_eventdev *event_dev,
-                                       void *lookup_mem, uint64_t aura);
+                                       void *lookup_mem);
 
 struct cnxk_tim_ctl {
        uint16_t ring;
diff --git a/drivers/net/cnxk/cn10k_ethdev.c b/drivers/net/cnxk/cn10k_ethdev.c
index 2dbca698af..019c8299ce 100644
--- a/drivers/net/cnxk/cn10k_ethdev.c
+++ b/drivers/net/cnxk/cn10k_ethdev.c
@@ -362,6 +362,8 @@ cn10k_nix_rx_queue_meta_aura_update(struct rte_eth_dev 
*eth_dev)
                        rxq->meta_aura = rxq_sp->qconf.mp->pool_id;
                }
        }
+       /* Store mempool in lookup mem */
+       cnxk_nix_lookup_mem_metapool_set(dev);
 }
 
 static int
diff --git a/drivers/net/cnxk/cnxk_ethdev.c b/drivers/net/cnxk/cnxk_ethdev.c
index d8ccd307a8..1cae3084e1 100644
--- a/drivers/net/cnxk/cnxk_ethdev.c
+++ b/drivers/net/cnxk/cnxk_ethdev.c
@@ -275,6 +275,8 @@ nix_security_release(struct cnxk_eth_dev *dev)
                        plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
                ret |= rc;
 
+               cnxk_nix_lookup_mem_metapool_clear(dev);
+
                if (dev->inb.sa_dptr) {
                        plt_free(dev->inb.sa_dptr);
                        dev->inb.sa_dptr = NULL;
@@ -1852,6 +1854,8 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        nix->pci_dev = pci_dev;
        nix->hw_vlan_ins = true;
        nix->port_id = eth_dev->data->port_id;
+       if (roc_feature_nix_has_own_meta_aura())
+               nix->local_meta_aura_ena = true;
        rc = roc_nix_dev_init(nix);
        if (rc) {
                plt_err("Failed to initialize roc nix rc=%d", rc);
diff --git a/drivers/net/cnxk/cnxk_ethdev.h b/drivers/net/cnxk/cnxk_ethdev.h
index f0eab4244c..12c56ccd55 100644
--- a/drivers/net/cnxk/cnxk_ethdev.h
+++ b/drivers/net/cnxk/cnxk_ethdev.h
@@ -594,6 +594,8 @@ int cnxk_eth_outb_sa_idx_get(struct cnxk_eth_dev *dev, 
uint32_t *idx_p,
 int cnxk_eth_outb_sa_idx_put(struct cnxk_eth_dev *dev, uint32_t idx);
 int cnxk_nix_lookup_mem_sa_base_set(struct cnxk_eth_dev *dev);
 int cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev);
+int cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev);
+int cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev);
 __rte_internal
 int cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev);
 struct cnxk_eth_sec_sess *cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev 
*dev,
@@ -601,8 +603,8 @@ struct cnxk_eth_sec_sess 
*cnxk_eth_sec_sess_get_by_spi(struct cnxk_eth_dev *dev,
 struct cnxk_eth_sec_sess *
 cnxk_eth_sec_sess_get_by_sess(struct cnxk_eth_dev *dev,
                              struct rte_security_session *sess);
-int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t 
nb_bufs,
-                             bool destroy);
+int cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, 
uint32_t buf_sz,
+                             uint32_t nb_bufs, bool destroy, const char 
*mempool_name);
 
 /* Congestion Management */
 int cnxk_nix_cman_info_get(struct rte_eth_dev *dev, struct rte_eth_cman_info 
*info);
diff --git a/drivers/net/cnxk/cnxk_ethdev_devargs.c 
b/drivers/net/cnxk/cnxk_ethdev_devargs.c
index dbf5bd847d..e1a0845ece 100644
--- a/drivers/net/cnxk/cnxk_ethdev_devargs.c
+++ b/drivers/net/cnxk/cnxk_ethdev_devargs.c
@@ -182,6 +182,22 @@ parse_sqb_count(const char *key, const char *value, void 
*extra_args)
        return 0;
 }
 
+static int
+parse_meta_bufsize(const char *key, const char *value, void *extra_args)
+{
+       RTE_SET_USED(key);
+       uint32_t val;
+
+       errno = 0;
+       val = strtoul(value, NULL, 0);
+       if (errno)
+               val = 0;
+
+       *(uint32_t *)extra_args = val;
+
+       return 0;
+}
+
 static int
 parse_switch_header_type(const char *key, const char *value, void *extra_args)
 {
@@ -248,6 +264,7 @@ parse_sdp_channel_mask(const char *key, const char *value, 
void *extra_args)
 #define CNXK_FLOW_PRE_L2_INFO  "flow_pre_l2_info"
 #define CNXK_CUSTOM_SA_ACT     "custom_sa_act"
 #define CNXK_SQB_SLACK         "sqb_slack"
+#define CNXK_NIX_META_BUF_SZ   "meta_buf_sz"
 
 int
 cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, struct cnxk_eth_dev 
*dev)
@@ -270,6 +287,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, 
struct cnxk_eth_dev *dev)
        uint16_t tx_compl_ena = 0;
        uint16_t custom_sa_act = 0;
        struct rte_kvargs *kvlist;
+       uint32_t meta_buf_sz = 0;
        uint16_t no_inl_dev = 0;
        uint8_t lock_rx_ctx = 0;
 
@@ -319,6 +337,7 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, 
struct cnxk_eth_dev *dev)
                           &custom_sa_act);
        rte_kvargs_process(kvlist, CNXK_SQB_SLACK, &parse_sqb_count,
                           &sqb_slack);
+       rte_kvargs_process(kvlist, CNXK_NIX_META_BUF_SZ, &parse_meta_bufsize, 
&meta_buf_sz);
        rte_kvargs_free(kvlist);
 
 null_devargs:
@@ -337,6 +356,10 @@ cnxk_ethdev_parse_devargs(struct rte_devargs *devargs, 
struct cnxk_eth_dev *dev)
        dev->nix.lock_rx_ctx = lock_rx_ctx;
        dev->nix.custom_sa_action = custom_sa_act;
        dev->nix.sqb_slack = sqb_slack;
+
+       if (roc_feature_nix_has_own_meta_aura())
+               dev->nix.meta_buf_sz = meta_buf_sz;
+
        dev->npc.flow_prealloc_size = flow_prealloc_size;
        dev->npc.flow_max_priority = flow_max_priority;
        dev->npc.switch_header_type = switch_header_type;
diff --git a/drivers/net/cnxk/cnxk_ethdev_dp.h 
b/drivers/net/cnxk/cnxk_ethdev_dp.h
index a812c78eda..c1f99a2616 100644
--- a/drivers/net/cnxk/cnxk_ethdev_dp.h
+++ b/drivers/net/cnxk/cnxk_ethdev_dp.h
@@ -34,6 +34,9 @@
 #define ERRCODE_ERRLEN_WIDTH 12
 #define ERR_ARRAY_SZ        ((BIT(ERRCODE_ERRLEN_WIDTH)) * sizeof(uint32_t))
 
+#define SA_BASE_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uintptr_t))
+#define MEMPOOL_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uintptr_t))
+
 #define CNXK_NIX_UDP_TUN_BITMASK                                               
\
        ((1ull << (RTE_MBUF_F_TX_TUNNEL_VXLAN >> 45)) |                         
      \
         (1ull << (RTE_MBUF_F_TX_TUNNEL_GENEVE >> 45)))
@@ -164,4 +167,14 @@ cnxk_nix_sa_base_get(uint16_t port, const void *lookup_mem)
        return *((const uintptr_t *)sa_base_tbl + port);
 }
 
+static __rte_always_inline uintptr_t
+cnxk_nix_inl_metapool_get(uint16_t port, const void *lookup_mem)
+{
+       uintptr_t metapool_tbl;
+
+       metapool_tbl = (uintptr_t)lookup_mem;
+       metapool_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
+       return *((const uintptr_t *)metapool_tbl + port);
+}
+
 #endif /* __CNXK_ETHDEV_DP_H__ */
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c 
b/drivers/net/cnxk/cnxk_ethdev_sec.c
index 6c71f9554b..aa8a378a00 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -38,15 +38,22 @@ bitmap_ctzll(uint64_t slab)
 }
 
 int
-cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t buf_sz, uint32_t 
nb_bufs, bool destroy)
+cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t *mpool, uint32_t 
buf_sz,
+                         uint32_t nb_bufs, bool destroy, const char 
*mempool_name)
 {
-       const char *mp_name = CNXK_NIX_INL_META_POOL_NAME;
+       const char *mp_name = NULL;
        struct rte_pktmbuf_pool_private mbp_priv;
        struct npa_aura_s *aura;
        struct rte_mempool *mp;
        uint16_t first_skip;
        int rc;
 
+       /* Null Mempool name indicates to allocate Zero aura. */
+       if (!mempool_name)
+               mp_name = CNXK_NIX_INL_META_POOL_NAME;
+       else
+               mp_name = mempool_name;
+
        /* Destroy the mempool if requested */
        if (destroy) {
                mp = rte_mempool_lookup(mp_name);
@@ -62,6 +69,7 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t 
buf_sz, uint32_t nb_bu
                rte_mempool_free(mp);
 
                *aura_handle = 0;
+               *mpool = 0;
                return 0;
        }
 
@@ -83,10 +91,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t 
buf_sz, uint32_t nb_bu
                goto free_mp;
        }
        aura->ena = 1;
-       aura->pool_addr = 0x0;
+       if (!mempool_name)
+               aura->pool_addr = 0;
+       else
+               aura->pool_addr = 1; /* Any non zero value, so that alloc from 
next free Index */
 
-       rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(),
-                                       aura);
+       rc = rte_mempool_set_ops_byname(mp, rte_mbuf_platform_mempool_ops(), 
aura);
        if (rc) {
                plt_err("Failed to setup mempool ops for meta, rc=%d", rc);
                goto free_aura;
@@ -108,6 +118,7 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uint32_t 
buf_sz, uint32_t nb_bu
 
        rte_mempool_obj_iter(mp, rte_pktmbuf_init, NULL);
        *aura_handle = mp->pool_id;
+       *mpool = (uintptr_t)mp;
        return 0;
 free_aura:
        plt_free(aura);
diff --git a/drivers/net/cnxk/cnxk_lookup.c b/drivers/net/cnxk/cnxk_lookup.c
index 6d561f194f..c0a7129a9c 100644
--- a/drivers/net/cnxk/cnxk_lookup.c
+++ b/drivers/net/cnxk/cnxk_lookup.c
@@ -7,8 +7,7 @@
 
 #include "cnxk_ethdev.h"
 
-#define SA_BASE_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uintptr_t))
-#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ)
+#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ + 
MEMPOOL_TBL_SZ)
 const uint32_t *
 cnxk_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
 {
@@ -371,3 +370,37 @@ cnxk_nix_lookup_mem_sa_base_clear(struct cnxk_eth_dev *dev)
        *((uintptr_t *)sa_base_tbl + port) = 0;
        return 0;
 }
+
+int
+cnxk_nix_lookup_mem_metapool_set(struct cnxk_eth_dev *dev)
+{
+       void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+       uint16_t port = dev->eth_dev->data->port_id;
+       uintptr_t mp_tbl;
+
+       if (!lookup_mem)
+               return -EIO;
+
+       /* Set Mempool in lookup mem */
+       mp_tbl = (uintptr_t)lookup_mem;
+       mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
+       *((uintptr_t *)mp_tbl + port) = dev->nix.meta_mempool;
+       return 0;
+}
+
+int
+cnxk_nix_lookup_mem_metapool_clear(struct cnxk_eth_dev *dev)
+{
+       void *lookup_mem = cnxk_nix_fastpath_lookup_mem_get();
+       uint16_t port = dev->eth_dev->data->port_id;
+       uintptr_t mp_tbl;
+
+       if (!lookup_mem)
+               return -EIO;
+
+       /* Clear Mempool in lookup mem */
+       mp_tbl = (uintptr_t)lookup_mem;
+       mp_tbl += PTYPE_ARRAY_SZ + ERR_ARRAY_SZ + SA_BASE_TBL_SZ;
+       *((uintptr_t *)mp_tbl + port) = dev->nix.meta_mempool;
+       return 0;
+}
-- 
2.25.1

Reply via email to