From: Kalesh AP <kalesh-anakkur.pura...@broadcom.com>

Newer devices like SR2 may have chip backing store and do not require
host backed memory allocation.

In these cases, HWRM_FUNC_BACKING_STORE_QCAPS will return a zero entry
size to indicate contexts for which the host should not allocate backing
store.

Selectively allocate context memory based on device capabilities and
only enable backing store for the appropriate contexts.

Signed-off-by: Kalesh AP <kalesh-anakkur.pura...@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khapa...@broadcom.com>
---
 drivers/net/bnxt/bnxt_ethdev.c | 60 ++++++++++++++++++++--------------
 drivers/net/bnxt/bnxt_hwrm.c   |  3 ++
 2 files changed, 39 insertions(+), 24 deletions(-)

diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 8ca4fb151..e11751cc1 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -4212,39 +4212,49 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
 
        ctx_pg = &ctx->qp_mem;
        ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
-       mem_size = ctx->qp_entry_size * ctx_pg->entries;
-       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
-       if (rc)
-               return rc;
+       if (ctx->qp_entry_size) {
+               mem_size = ctx->qp_entry_size * ctx_pg->entries;
+               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "qp_mem", 0);
+               if (rc)
+                       return rc;
+       }
 
        ctx_pg = &ctx->srq_mem;
        ctx_pg->entries = ctx->srq_max_l2_entries;
-       mem_size = ctx->srq_entry_size * ctx_pg->entries;
-       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
-       if (rc)
-               return rc;
+       if (ctx->srq_entry_size) {
+               mem_size = ctx->srq_entry_size * ctx_pg->entries;
+               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "srq_mem", 0);
+               if (rc)
+                       return rc;
+       }
 
        ctx_pg = &ctx->cq_mem;
        ctx_pg->entries = ctx->cq_max_l2_entries;
-       mem_size = ctx->cq_entry_size * ctx_pg->entries;
-       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
-       if (rc)
-               return rc;
+       if (ctx->cq_entry_size) {
+               mem_size = ctx->cq_entry_size * ctx_pg->entries;
+               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "cq_mem", 0);
+               if (rc)
+                       return rc;
+       }
 
        ctx_pg = &ctx->vnic_mem;
        ctx_pg->entries = ctx->vnic_max_vnic_entries +
                ctx->vnic_max_ring_table_entries;
-       mem_size = ctx->vnic_entry_size * ctx_pg->entries;
-       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 0);
-       if (rc)
-               return rc;
+       if (ctx->vnic_entry_size) {
+               mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "vnic_mem", 
0);
+               if (rc)
+                       return rc;
+       }
 
        ctx_pg = &ctx->stat_mem;
        ctx_pg->entries = ctx->stat_max_entries;
-       mem_size = ctx->stat_entry_size * ctx_pg->entries;
-       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 0);
-       if (rc)
-               return rc;
+       if (ctx->stat_entry_size) {
+               mem_size = ctx->stat_entry_size * ctx_pg->entries;
+               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "stat_mem", 
0);
+               if (rc)
+                       return rc;
+       }
 
        min = ctx->tqm_min_entries_per_ring;
 
@@ -4260,10 +4270,12 @@ int bnxt_alloc_ctx_mem(struct bnxt *bp)
        for (i = 0, ena = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
                ctx_pg = ctx->tqm_mem[i];
                ctx_pg->entries = i ? entries : entries_sp;
-               mem_size = ctx->tqm_entry_size * ctx_pg->entries;
-               rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, "tqm_mem", i);
-               if (rc)
-                       return rc;
+               if (ctx->tqm_entry_size) {
+                       mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+                       rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size, 
"tqm_mem", i);
+                       if (rc)
+                               return rc;
+               }
                ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
        }
 
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 56e2e33a9..6d54b1656 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -64,6 +64,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info 
*rmem,
                                  uint8_t *pg_attr,
                                  uint64_t *pg_dir)
 {
+       if (rmem->nr_pages == 0)
+               return;
+
        if (rmem->nr_pages > 1) {
                *pg_attr = 1;
                *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
-- 
2.21.1 (Apple Git-122.3)


-- 
This electronic communication and the information and any files transmitted 
with it, or attached to it, are confidential and are intended solely for 
the use of the individual or entity to whom it is addressed and may contain 
information that is confidential, legally privileged, protected by privacy 
laws, or otherwise restricted from disclosure to anyone else. If you are 
not the intended recipient or the person responsible for delivering the 
e-mail to the intended recipient, you are hereby notified that any use, 
copying, distributing, dissemination, forwarding, printing, or copying of 
this e-mail is strictly prohibited. If you received this e-mail in error, 
please return the e-mail to the sender, delete it from your computer, and 
destroy any printed copy of it.

Reply via email to