This patch changes the DMA page chain allocation and free mechanism
used for fast path resources. It introduces addr_shadow() structure to
maintain virtual address and DMA mapping. The ecore_chain_params() is
used to maintain all the DMA page chain parameters.

Signed-off-by: Rasesh Mody <rm...@marvell.com>
Signed-off-by: Igor Russkikh <irussk...@marvell.com>
---
 drivers/net/qede/base/ecore_chain.h   | 241 +++++++++++++++-----------
 drivers/net/qede/base/ecore_dev.c     | 119 ++++++++-----
 drivers/net/qede/base/ecore_dev_api.h |  13 +-
 drivers/net/qede/base/ecore_spq.c     |  55 +++---
 drivers/net/qede/qede_if.h            |  20 +--
 drivers/net/qede/qede_main.c          |   1 +
 drivers/net/qede/qede_rxtx.c          |  67 +++----
 7 files changed, 301 insertions(+), 215 deletions(-)

diff --git a/drivers/net/qede/base/ecore_chain.h 
b/drivers/net/qede/base/ecore_chain.h
index c69920be5..8c7971081 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -1,9 +1,9 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  * Copyright (c) 2016 - 2018 Cavium Inc.
+ * Copyright (c) 2018 - 2020 Marvell Semiconductor Inc.
  * All rights reserved.
- * www.cavium.com
+ * www.marvell.com
  */
-
 #ifndef __ECORE_CHAIN_H__
 #define __ECORE_CHAIN_H__
 
@@ -24,8 +24,8 @@ enum ecore_chain_mode {
 };
 
 enum ecore_chain_use_mode {
-       ECORE_CHAIN_USE_TO_PRODUCE,     /* Chain starts empty */
-       ECORE_CHAIN_USE_TO_CONSUME,     /* Chain starts full */
+       ECORE_CHAIN_USE_TO_PRODUCE,             /* Chain starts empty */
+       ECORE_CHAIN_USE_TO_CONSUME,             /* Chain starts full */
        ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,     /* Chain starts empty */
 };
 
@@ -38,35 +38,40 @@ enum ecore_chain_cnt_type {
 };
 
 struct ecore_chain_next {
-       struct regpair next_phys;
-       void *next_virt;
+       struct regpair  next_phys;
+       void            *next_virt;
 };
 
 struct ecore_chain_pbl_u16 {
-       u16 prod_page_idx;
-       u16 cons_page_idx;
+       u16     prod_page_idx;
+       u16     cons_page_idx;
 };
 
 struct ecore_chain_pbl_u32 {
-       u32 prod_page_idx;
-       u32 cons_page_idx;
+       u32     prod_page_idx;
+       u32     cons_page_idx;
 };
 
 struct ecore_chain_ext_pbl {
-       dma_addr_t p_pbl_phys;
-       void *p_pbl_virt;
+       dma_addr_t      p_pbl_phys;
+       void            *p_pbl_virt;
 };
 
 struct ecore_chain_u16 {
        /* Cyclic index of next element to produce/consme */
-       u16 prod_idx;
-       u16 cons_idx;
+       u16     prod_idx;
+       u16     cons_idx;
 };
 
 struct ecore_chain_u32 {
        /* Cyclic index of next element to produce/consme */
-       u32 prod_idx;
-       u32 cons_idx;
+       u32     prod_idx;
+       u32     cons_idx;
+};
+
+struct addr_shadow {
+       void *virt_addr;
+       dma_addr_t dma_map;
 };
 
 struct ecore_chain {
@@ -74,16 +79,17 @@ struct ecore_chain {
         * as produce / consume.
         */
        /* Point to next element to produce/consume */
-       void *p_prod_elem;
-       void *p_cons_elem;
+       void                            *p_prod_elem;
+       void                            *p_cons_elem;
 
        /* Fastpath portions of the PBL [if exists] */
 
        struct {
-               /* Table for keeping the virtual addresses of the chain pages,
-                * respectively to the physical addresses in the pbl table.
+               /* Table for keeping the virtual and physical addresses of the
+                * chain pages, respectively to the physical addresses
+                * in the pbl table.
                 */
-               void            **pp_virt_addr_tbl;
+               struct addr_shadow *shadow;
 
                union {
                        struct ecore_chain_pbl_u16      pbl_u16;
@@ -92,8 +98,8 @@ struct ecore_chain {
        } pbl;
 
        union {
-               struct ecore_chain_u16 chain16;
-               struct ecore_chain_u32 chain32;
+               struct ecore_chain_u16  chain16;
+               struct ecore_chain_u32  chain32;
        } u;
 
        /* Capacity counts only usable elements */
@@ -106,12 +112,12 @@ struct ecore_chain {
        enum ecore_chain_mode           mode;
 
        /* Elements information for fast calculations */
-       u16 elem_per_page;
-       u16 elem_per_page_mask;
-       u16 elem_size;
-       u16 next_page_mask;
-       u16 usable_per_page;
-       u8 elem_unusable;
+       u16                             elem_per_page;
+       u16                             elem_per_page_mask;
+       u16                             elem_size;
+       u16                             next_page_mask;
+       u16                             usable_per_page;
+       u8                              elem_unusable;
 
        u8                              cnt_type;
 
@@ -119,6 +125,8 @@ struct ecore_chain {
         * but isn't involved in regular functionality.
         */
 
+       u32                             page_size;
+
        /* Base address of a pre-allocated buffer for pbl */
        struct {
                dma_addr_t              p_phys_table;
@@ -140,24 +148,35 @@ struct ecore_chain {
        /* TBD - do we really need this? Couldn't find usage for it */
        bool                            b_external_pbl;
 
-       void *dp_ctx;
+       void                            *dp_ctx;
+};
+
+struct ecore_chain_params {
+       enum ecore_chain_use_mode       intended_use;
+       enum ecore_chain_mode           mode;
+       enum ecore_chain_cnt_type       cnt_type;
+       u32                             num_elems;
+       osal_size_t                     elem_size;
+       u32                             page_size;
+       struct ecore_chain_ext_pbl      *ext_pbl;
 };
 
-#define ECORE_CHAIN_PBL_ENTRY_SIZE     (8)
-#define ECORE_CHAIN_PAGE_SIZE          (0x1000)
-#define ELEMS_PER_PAGE(elem_size)      (ECORE_CHAIN_PAGE_SIZE / (elem_size))
+#define ECORE_CHAIN_PBL_ENTRY_SIZE             8
+#define ECORE_CHAIN_PAGE_SIZE                  0x1000
+#define ELEMS_PER_PAGE(page_size, elem_size)   ((page_size) / (elem_size))
 
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)               \
-         ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ?                \
-          (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) /    \
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)                       \
+         (((mode) == ECORE_CHAIN_MODE_NEXT_PTR) ?                      \
+          (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) /            \
                     (elem_size))) : 0)
 
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode)         \
-       ((u32)(ELEMS_PER_PAGE(elem_size) -                      \
-       UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode)              \
+         ((u32)(ELEMS_PER_PAGE(page_size, elem_size) -                 \
+          UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
 
-#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode)                \
-       DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode)     \
+       DIV_ROUND_UP(elem_cnt,                                          \
+                    USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode))
 
 #define is_chain_u16(p)        ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
 #define is_chain_u32(p)        ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
@@ -195,18 +214,41 @@ static OSAL_INLINE u32 
ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
 #define ECORE_U16_MAX  ((u16)~0U)
 #define ECORE_U32_MAX  ((u32)~0U)
 
+static OSAL_INLINE u16
+ecore_chain_get_elem_used(struct ecore_chain *p_chain)
+{
+       u16 elem_per_page = p_chain->elem_per_page;
+       u32 prod = p_chain->u.chain16.prod_idx;
+       u32 cons = p_chain->u.chain16.cons_idx;
+       u16 used;
+
+       OSAL_ASSERT(is_chain_u16(p_chain));
+
+       if (prod < cons)
+               prod += (u32)ECORE_U16_MAX + 1;
+
+       used = (u16)(prod - cons);
+       if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+               used -= prod / elem_per_page - cons / elem_per_page;
+
+       return (u16)(used);
+}
+
 static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
 {
+       u16 elem_per_page = p_chain->elem_per_page;
+       u32 prod = p_chain->u.chain16.prod_idx;
+       u32 cons = p_chain->u.chain16.cons_idx;
        u16 used;
 
        OSAL_ASSERT(is_chain_u16(p_chain));
 
-       used = (u16)(((u32)ECORE_U16_MAX + 1 +
-                     (u32)(p_chain->u.chain16.prod_idx)) -
-                    (u32)p_chain->u.chain16.cons_idx);
+       if (prod < cons)
+               prod += (u32)ECORE_U16_MAX + 1;
+
+       used = (u16)(prod - cons);
        if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
-                       p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+               used -= prod / elem_per_page - cons / elem_per_page;
 
        return (u16)(p_chain->capacity - used);
 }
@@ -214,16 +256,19 @@ static OSAL_INLINE u16 ecore_chain_get_elem_left(struct 
ecore_chain *p_chain)
 static OSAL_INLINE u32
 ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
 {
+       u16 elem_per_page = p_chain->elem_per_page;
+       u64 prod = p_chain->u.chain32.prod_idx;
+       u64 cons = p_chain->u.chain32.cons_idx;
        u32 used;
 
        OSAL_ASSERT(is_chain_u32(p_chain));
 
-       used = (u32)(((u64)ECORE_U32_MAX + 1 +
-                     (u64)(p_chain->u.chain32.prod_idx)) -
-                    (u64)p_chain->u.chain32.cons_idx);
+       if (prod < cons)
+               prod += (u64)ECORE_U32_MAX + 1;
+
+       used = (u32)(prod - cons);
        if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
-                       p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+               used -= (u32)(prod / elem_per_page - cons / elem_per_page);
 
        return p_chain->capacity - used;
 }
@@ -319,7 +364,7 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void 
**p_next_elem,
                                *(u32 *)page_to_inc = 0;
                        page_index = *(u32 *)page_to_inc;
                }
-               *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+               *p_next_elem = p_chain->pbl.shadow[page_index].virt_addr;
        }
 }
 
@@ -330,24 +375,20 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, 
void **p_next_elem,
        (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
 
 #define is_unusable_next_idx(p, idx)           \
-       ((((p)->u.chain16.idx + 1) &            \
-       (p)->elem_per_page_mask) == (p)->usable_per_page)
+       ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == 
(p)->usable_per_page)
 
 #define is_unusable_next_idx_u32(p, idx)       \
-       ((((p)->u.chain32.idx + 1) &            \
-       (p)->elem_per_page_mask) == (p)->usable_per_page)
-
-#define test_and_skip(p, idx)                                          \
-       do {                                                            \
-               if (is_chain_u16(p)) {                                  \
-                       if (is_unusable_idx(p, idx))                    \
-                               (p)->u.chain16.idx +=                   \
-                                       (p)->elem_unusable;             \
-               } else {                                                \
-                       if (is_unusable_idx_u32(p, idx))                \
-                               (p)->u.chain32.idx +=                   \
-                                       (p)->elem_unusable;             \
-               }                                                       \
+       ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == 
(p)->usable_per_page)
+
+#define test_and_skip(p, idx)                                                  
\
+       do {                                                                    
\
+               if (is_chain_u16(p)) {                                          
\
+                       if (is_unusable_idx(p, idx))                            
\
+                               (p)->u.chain16.idx += (p)->elem_unusable;       
\
+               } else {                                                        
\
+                       if (is_unusable_idx_u32(p, idx))                        
\
+                               (p)->u.chain32.idx += (p)->elem_unusable;       
\
+               }                                                               
\
        } while (0)
 
 /**
@@ -395,7 +436,7 @@ static OSAL_INLINE void ecore_chain_return_produced(struct 
ecore_chain *p_chain)
  *
  * @param p_chain
  *
- * @return void*, a pointer to next element
+ * @return void *, a pointer to next element
  */
 static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
 {
@@ -403,7 +444,8 @@ static OSAL_INLINE void *ecore_chain_produce(struct 
ecore_chain *p_chain)
 
        if (is_chain_u16(p_chain)) {
                if ((p_chain->u.chain16.prod_idx &
-                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                    p_chain->elem_per_page_mask) ==
+                   p_chain->next_page_mask) {
                        p_prod_idx = &p_chain->u.chain16.prod_idx;
                        p_prod_page_idx = &p_chain->pbl.c.pbl_u16.prod_page_idx;
                        ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
@@ -412,7 +454,8 @@ static OSAL_INLINE void *ecore_chain_produce(struct 
ecore_chain *p_chain)
                p_chain->u.chain16.prod_idx++;
        } else {
                if ((p_chain->u.chain32.prod_idx &
-                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                    p_chain->elem_per_page_mask) ==
+                   p_chain->next_page_mask) {
                        p_prod_idx = &p_chain->u.chain32.prod_idx;
                        p_prod_page_idx = &p_chain->pbl.c.pbl_u32.prod_page_idx;
                        ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
@@ -423,7 +466,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct 
ecore_chain *p_chain)
 
        p_ret = p_chain->p_prod_elem;
        p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
-                                       p_chain->elem_size);
+                                      p_chain->elem_size);
 
        return p_ret;
 }
@@ -469,7 +512,7 @@ void ecore_chain_recycle_consumed(struct ecore_chain 
*p_chain)
  *
  * @param p_chain
  *
- * @return void*, a pointer to the next buffer written
+ * @return void *, a pointer to the next buffer written
  */
 static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
 {
@@ -477,7 +520,8 @@ static OSAL_INLINE void *ecore_chain_consume(struct 
ecore_chain *p_chain)
 
        if (is_chain_u16(p_chain)) {
                if ((p_chain->u.chain16.cons_idx &
-                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                    p_chain->elem_per_page_mask) ==
+                   p_chain->next_page_mask) {
                        p_cons_idx = &p_chain->u.chain16.cons_idx;
                        p_cons_page_idx = &p_chain->pbl.c.pbl_u16.cons_page_idx;
                        ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
@@ -486,7 +530,8 @@ static OSAL_INLINE void *ecore_chain_consume(struct 
ecore_chain *p_chain)
                p_chain->u.chain16.cons_idx++;
        } else {
                if ((p_chain->u.chain32.cons_idx &
-                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                    p_chain->elem_per_page_mask) ==
+                   p_chain->next_page_mask) {
                        p_cons_idx = &p_chain->u.chain32.cons_idx;
                        p_cons_page_idx = &p_chain->pbl.c.pbl_u32.cons_page_idx;
                        ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
@@ -497,7 +542,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct 
ecore_chain *p_chain)
 
        p_ret = p_chain->p_cons_elem;
        p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
-                                       p_chain->elem_size);
+                                      p_chain->elem_size);
 
        return p_ret;
 }
@@ -524,7 +569,7 @@ static OSAL_INLINE void ecore_chain_reset(struct 
ecore_chain *p_chain)
        p_chain->p_prod_elem = p_chain->p_virt_addr;
 
        if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
-               /* Use "page_cnt-1" as a reset value for the prod/cons page's
+               /* Use "page_cnt - 1" as a reset value for the prod/cons page's
                 * indices, to avoid unnecessary page advancing on the first
                 * call to ecore_chain_produce/consume. Instead, the indices
                 * will be advanced to page_cnt and then will be wrapped to 0.
@@ -556,7 +601,7 @@ static OSAL_INLINE void ecore_chain_reset(struct 
ecore_chain *p_chain)
 }
 
 /**
- * @brief ecore_chain_init_params -
+ * @brief ecore_chain_init -
  *
  * Initalizes a basic chain struct
  *
@@ -569,10 +614,12 @@ static OSAL_INLINE void ecore_chain_reset(struct 
ecore_chain *p_chain)
  * @param dp_ctx
  */
 static OSAL_INLINE void
-ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 
elem_size,
-                       enum ecore_chain_use_mode intended_use,
-                       enum ecore_chain_mode mode,
-                       enum ecore_chain_cnt_type cnt_type, void *dp_ctx)
+ecore_chain_init(struct ecore_chain *p_chain,
+                u32 page_cnt, u8 elem_size, u32 page_size,
+                enum ecore_chain_use_mode intended_use,
+                enum ecore_chain_mode mode,
+                enum ecore_chain_cnt_type cnt_type,
+                void *dp_ctx)
 {
        /* chain fixed parameters */
        p_chain->p_virt_addr = OSAL_NULL;
@@ -582,20 +629,22 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 
page_cnt, u8 elem_size,
        p_chain->mode = mode;
        p_chain->cnt_type = (u8)cnt_type;
 
-       p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
-       p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->elem_per_page = ELEMS_PER_PAGE(page_size, elem_size);
+       p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, page_size,
+                                                        mode);
        p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
        p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
        p_chain->next_page_mask = (p_chain->usable_per_page &
                                   p_chain->elem_per_page_mask);
 
+       p_chain->page_size = page_size;
        p_chain->page_cnt = page_cnt;
        p_chain->capacity = p_chain->usable_per_page * page_cnt;
        p_chain->size = p_chain->elem_per_page * page_cnt;
        p_chain->b_external_pbl = false;
        p_chain->pbl_sp.p_phys_table = 0;
        p_chain->pbl_sp.p_virt_table = OSAL_NULL;
-       p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+       p_chain->pbl.shadow = OSAL_NULL;
 
        p_chain->dp_ctx = dp_ctx;
 }
@@ -636,11 +685,11 @@ static OSAL_INLINE void ecore_chain_init_mem(struct 
ecore_chain *p_chain,
 static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
                                                 void *p_virt_pbl,
                                                 dma_addr_t p_phys_pbl,
-                                                void **pp_virt_addr_tbl)
+                                                struct addr_shadow *shadow)
 {
        p_chain->pbl_sp.p_phys_table = p_phys_pbl;
        p_chain->pbl_sp.p_virt_table = p_virt_pbl;
-       p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+       p_chain->pbl.shadow = shadow;
 }
 
 /**
@@ -677,7 +726,7 @@ ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, 
void *p_virt_curr,
  *
  * @param p_chain
  *
- * @return void*
+ * @return void *
  */
 static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
 {
@@ -695,9 +744,8 @@ static OSAL_INLINE void *ecore_chain_get_last_elem(struct 
ecore_chain *p_chain)
                p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
                while (p_next->next_virt != p_chain->p_virt_addr) {
                        p_virt_addr = p_next->next_virt;
-                       p_next =
-                           (struct ecore_chain_next *)((u8 *)p_virt_addr +
-                                                       size);
+                       p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr +
+                                                            size);
                }
                break;
        case ECORE_CHAIN_MODE_SINGLE:
@@ -705,12 +753,12 @@ static OSAL_INLINE void *ecore_chain_get_last_elem(struct 
ecore_chain *p_chain)
                break;
        case ECORE_CHAIN_MODE_PBL:
                last_page_idx = p_chain->page_cnt - 1;
-               p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+               p_virt_addr = p_chain->pbl.shadow[last_page_idx].virt_addr;
                break;
        }
        /* p_virt_addr points at this stage to the last page of the chain */
        size = p_chain->elem_size * (p_chain->usable_per_page - 1);
-       p_virt_addr = ((u8 *)p_virt_addr + size);
+       p_virt_addr = (u8 *)p_virt_addr + size;
 out:
        return p_virt_addr;
 }
@@ -825,8 +873,8 @@ static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct 
ecore_chain *p_chain)
        page_cnt = ecore_chain_get_page_cnt(p_chain);
 
        for (i = 0; i < page_cnt; i++)
-               OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
-                             ECORE_CHAIN_PAGE_SIZE);
+               OSAL_MEM_ZERO(p_chain->pbl.shadow[i].virt_addr,
+                             p_chain->page_size);
 }
 
 int ecore_chain_print(struct ecore_chain *p_chain, char *buffer,
@@ -835,8 +883,7 @@ int ecore_chain_print(struct ecore_chain *p_chain, char 
*buffer,
                      int (*func_ptr_print_element)(struct ecore_chain *p_chain,
                                                    void *p_element,
                                                    char *buffer),
-                     int (*func_ptr_print_metadata)(struct ecore_chain
-                                                    *p_chain,
+                     int (*func_ptr_print_metadata)(struct ecore_chain 
*p_chain,
                                                     char *buffer));
 
 #endif /* __ECORE_CHAIN_H__ */
diff --git a/drivers/net/qede/base/ecore_dev.c 
b/drivers/net/qede/base/ecore_dev.c
index 63e5d6860..93af8c897 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -7784,43 +7784,40 @@ static void ecore_chain_free_single(struct ecore_dev 
*p_dev,
                return;
 
        OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
-                              p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
+                              p_chain->p_phys_addr, p_chain->page_size);
 }
 
 static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
                                 struct ecore_chain *p_chain)
 {
-       void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
-       u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
+       struct addr_shadow *shadow = p_chain->pbl.shadow;
        u32 page_cnt = p_chain->page_cnt, i, pbl_size;
 
-       if (!pp_virt_addr_tbl)
+       if (!shadow)
                return;
 
-       if (!p_pbl_virt)
-               goto out;
-
        for (i = 0; i < page_cnt; i++) {
-               if (!pp_virt_addr_tbl[i])
+               if (!shadow[i].virt_addr || !shadow[i].dma_map)
                        break;
 
-               OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
-                                      *(dma_addr_t *)p_pbl_virt,
-                                      ECORE_CHAIN_PAGE_SIZE);
-
-               p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+               OSAL_DMA_FREE_COHERENT(p_dev, shadow[i].virt_addr,
+                                      shadow[i].dma_map,
+                                      p_chain->page_size);
        }
 
        pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
 
-       if (!p_chain->b_external_pbl)
+       if (!p_chain->b_external_pbl) {
                OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
                                       p_chain->pbl_sp.p_phys_table, pbl_size);
-out:
-       OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
+       }
+
+       OSAL_VFREE(p_dev, p_chain->pbl.shadow);
+       p_chain->pbl.shadow = OSAL_NULL;
 }
 
-void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+void ecore_chain_free(struct ecore_dev *p_dev,
+                     struct ecore_chain *p_chain)
 {
        switch (p_chain->mode) {
        case ECORE_CHAIN_MODE_NEXT_PTR:
@@ -7833,14 +7830,18 @@ void ecore_chain_free(struct ecore_dev *p_dev, struct 
ecore_chain *p_chain)
                ecore_chain_free_pbl(p_dev, p_chain);
                break;
        }
+
+       /* reset chain addresses to avoid double free */
+       ecore_chain_init_mem(p_chain, OSAL_NULL, 0);
 }
 
 static enum _ecore_status_t
 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
                               enum ecore_chain_cnt_type cnt_type,
-                              osal_size_t elem_size, u32 page_cnt)
+                              osal_size_t elem_size,
+                              u32 page_size, u32 page_cnt)
 {
-       u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+       u64 chain_size = ELEMS_PER_PAGE(page_size, elem_size) * page_cnt;
 
        /* The actual chain size can be larger than the maximal possible value
         * after rounding up the requested elements number to pages, and after
@@ -7853,8 +7854,8 @@ ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
            (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
             chain_size > ECORE_U32_MAX)) {
                DP_NOTICE(p_dev, true,
-                         "The actual chain size (0x%lx) is larger than the 
maximal possible value\n",
-                         (unsigned long)chain_size);
+                         "The actual chain size (0x%" PRIx64 ") is larger than 
the maximal possible value\n",
+                         chain_size);
                return ECORE_INVAL;
        }
 
@@ -7870,7 +7871,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, 
struct ecore_chain *p_chain)
 
        for (i = 0; i < p_chain->page_cnt; i++) {
                p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
-                                                ECORE_CHAIN_PAGE_SIZE);
+                                                p_chain->page_size);
                if (!p_virt) {
                        DP_NOTICE(p_dev, false,
                                  "Failed to allocate chain memory\n");
@@ -7903,7 +7904,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct 
ecore_chain *p_chain)
        dma_addr_t p_phys = 0;
        void *p_virt = OSAL_NULL;
 
-       p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, p_chain->page_size);
        if (!p_virt) {
                DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
                return ECORE_NOMEM;
@@ -7922,22 +7923,22 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
 {
        u32 page_cnt = p_chain->page_cnt, size, i;
        dma_addr_t p_phys = 0, p_pbl_phys = 0;
-       void **pp_virt_addr_tbl = OSAL_NULL;
+       struct addr_shadow *shadow = OSAL_NULL;
        u8 *p_pbl_virt = OSAL_NULL;
        void *p_virt = OSAL_NULL;
 
-       size = page_cnt * sizeof(*pp_virt_addr_tbl);
-       pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
-       if (!pp_virt_addr_tbl) {
+       size = page_cnt * sizeof(struct addr_shadow);
+       shadow = (struct addr_shadow *)OSAL_VZALLOC(p_dev, size);
+       if (!shadow) {
                DP_NOTICE(p_dev, false,
-                         "Failed to allocate memory for the chain virtual 
addresses table\n");
+                         "Failed to allocate memory for the chain 
virtual/physical addresses table\n");
                return ECORE_NOMEM;
        }
 
        /* The allocation of the PBL table is done with its full size, since it
         * is expected to be successive.
         * ecore_chain_init_pbl_mem() is called even in a case of an allocation
-        * failure, since pp_virt_addr_tbl was previously allocated, and it
+        * failure, since tbl was previously allocated, and it
         * should be saved to allow its freeing during the error flow.
         */
        size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
@@ -7950,8 +7951,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
                p_chain->b_external_pbl = true;
        }
 
-       ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
-                                pp_virt_addr_tbl);
+       ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, shadow);
        if (!p_pbl_virt) {
                DP_NOTICE(p_dev, false, "Failed to allocate chain pbl 
memory\n");
                return ECORE_NOMEM;
@@ -7959,7 +7959,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
 
        for (i = 0; i < page_cnt; i++) {
                p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
-                                                ECORE_CHAIN_PAGE_SIZE);
+                                                p_chain->page_size);
                if (!p_virt) {
                        DP_NOTICE(p_dev, false,
                                  "Failed to allocate chain memory\n");
@@ -7974,7 +7974,8 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
                /* Fill the PBL table with the physical address of the page */
                *(dma_addr_t *)p_pbl_virt = p_phys;
                /* Keep the virtual address of the page */
-               p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+               p_chain->pbl.shadow[i].virt_addr = p_virt;
+               p_chain->pbl.shadow[i].dma_map = p_phys;
 
                p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
        }
@@ -7982,36 +7983,58 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
        return ECORE_SUCCESS;
 }
 
+void ecore_chain_params_init(struct ecore_chain_params *p_params,
+                            enum ecore_chain_use_mode intended_use,
+                            enum ecore_chain_mode mode,
+                            enum ecore_chain_cnt_type cnt_type,
+                            u32 num_elems,
+                            osal_size_t elem_size)
+{
+       p_params->intended_use = intended_use;
+       p_params->mode = mode;
+       p_params->cnt_type = cnt_type;
+       p_params->num_elems = num_elems;
+       p_params->elem_size = elem_size;
+
+       /* common values */
+       p_params->page_size = ECORE_CHAIN_PAGE_SIZE;
+       p_params->ext_pbl = OSAL_NULL;
+}
+
 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
-                                      enum ecore_chain_use_mode intended_use,
-                                      enum ecore_chain_mode mode,
-                                      enum ecore_chain_cnt_type cnt_type,
-                                      u32 num_elems, osal_size_t elem_size,
                                       struct ecore_chain *p_chain,
-                                      struct ecore_chain_ext_pbl *ext_pbl)
+                                      struct ecore_chain_params *p_params)
 {
        u32 page_cnt;
        enum _ecore_status_t rc = ECORE_SUCCESS;
 
-       if (mode == ECORE_CHAIN_MODE_SINGLE)
+       if (p_params->mode == ECORE_CHAIN_MODE_SINGLE)
                page_cnt = 1;
        else
-               page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
-
-       rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
+               page_cnt = ECORE_CHAIN_PAGE_CNT(p_params->num_elems,
+                                               p_params->elem_size,
+                                               p_params->page_size,
+                                               p_params->mode);
+
+       rc = ecore_chain_alloc_sanity_check(p_dev, p_params->cnt_type,
+                                           p_params->elem_size,
+                                           p_params->page_size,
                                            page_cnt);
        if (rc) {
                DP_NOTICE(p_dev, false,
                          "Cannot allocate a chain with the given arguments:\n"
-                         "[use_mode %d, mode %d, cnt_type %d, num_elems %d, 
elem_size %zu]\n",
-                         intended_use, mode, cnt_type, num_elems, elem_size);
+                         "[use_mode %d, mode %d, cnt_type %d, num_elems %d, 
elem_size %zu, page_size %u]\n",
+                         p_params->intended_use, p_params->mode,
+                         p_params->cnt_type, p_params->num_elems,
+                         p_params->elem_size, p_params->page_size);
                return rc;
        }
 
-       ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
-                               mode, cnt_type, p_dev->dp_ctx);
+       ecore_chain_init(p_chain, page_cnt, (u8)p_params->elem_size,
+                        p_params->page_size, p_params->intended_use,
+                        p_params->mode, p_params->cnt_type, p_dev->dp_ctx);
 
-       switch (mode) {
+       switch (p_params->mode) {
        case ECORE_CHAIN_MODE_NEXT_PTR:
                rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
                break;
@@ -8019,7 +8042,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev 
*p_dev,
                rc = ecore_chain_alloc_single(p_dev, p_chain);
                break;
        case ECORE_CHAIN_MODE_PBL:
-               rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
+               rc = ecore_chain_alloc_pbl(p_dev, p_chain, p_params->ext_pbl);
                break;
        }
        if (rc)
diff --git a/drivers/net/qede/base/ecore_dev_api.h 
b/drivers/net/qede/base/ecore_dev_api.h
index 1ffe286d7..2e06e77c9 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -482,6 +482,12 @@ struct ecore_eth_stats {
 };
 #endif
 
+void ecore_chain_params_init(struct ecore_chain_params *p_params,
+                            enum ecore_chain_use_mode intended_use,
+                            enum ecore_chain_mode mode,
+                            enum ecore_chain_cnt_type cnt_type,
+                            u32 num_elems,
+                            osal_size_t elem_size);
 /**
  * @brief ecore_chain_alloc - Allocate and initialize a chain
  *
@@ -496,13 +502,8 @@ struct ecore_eth_stats {
  */
 enum _ecore_status_t
 ecore_chain_alloc(struct ecore_dev *p_dev,
-                 enum ecore_chain_use_mode intended_use,
-                 enum ecore_chain_mode mode,
-                 enum ecore_chain_cnt_type cnt_type,
-                 u32 num_elems,
-                 osal_size_t elem_size,
                  struct ecore_chain *p_chain,
-                 struct ecore_chain_ext_pbl *ext_pbl);
+                 struct ecore_chain_params *p_params);
 
 /**
  * @brief ecore_chain_free - Free chain DMA memory
diff --git a/drivers/net/qede/base/ecore_spq.c 
b/drivers/net/qede/base/ecore_spq.c
index 47c8a4e90..dda36c995 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -471,7 +471,8 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn  
*p_hwfn,
 
 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
 {
-       struct ecore_eq *p_eq;
+       struct ecore_chain_params chain_params;
+       struct ecore_eq *p_eq;
 
        /* Allocate EQ struct */
        p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
@@ -482,13 +483,13 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn 
*p_hwfn, u16 num_elem)
        }
 
        /* Allocate and initialize EQ chain*/
-       if (ecore_chain_alloc(p_hwfn->p_dev,
-                             ECORE_CHAIN_USE_TO_PRODUCE,
-                             ECORE_CHAIN_MODE_PBL,
-                             ECORE_CHAIN_CNT_TYPE_U16,
-                             num_elem,
-                             sizeof(union event_ring_element),
-                             &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+       ecore_chain_params_init(&chain_params,
+                               ECORE_CHAIN_USE_TO_PRODUCE,
+                               ECORE_CHAIN_MODE_PBL,
+                               ECORE_CHAIN_CNT_TYPE_U16,
+                               num_elem,
+                               sizeof(union event_ring_element));
+       if (ecore_chain_alloc(p_hwfn->p_dev, &p_eq->chain, &chain_params)) {
                DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
                goto eq_allocate_fail;
        }
@@ -626,6 +627,7 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct ecore_chain_params chain_params;
        struct ecore_spq *p_spq = OSAL_NULL;
        dma_addr_t p_phys = 0;
        u32 capacity;
@@ -638,14 +640,20 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn 
*p_hwfn)
                return ECORE_NOMEM;
        }
 
+       /* SPQ for VF is needed only for async_comp callbacks */
+       if (IS_VF(p_hwfn->p_dev)) {
+               p_hwfn->p_spq = p_spq;
+               return ECORE_SUCCESS;
+       }
+
        /* SPQ ring  */
-       if (ecore_chain_alloc(p_hwfn->p_dev,
-                             ECORE_CHAIN_USE_TO_PRODUCE,
-                             ECORE_CHAIN_MODE_SINGLE,
-                             ECORE_CHAIN_CNT_TYPE_U16,
-                             0, /* N/A when the mode is SINGLE */
-                             sizeof(struct slow_path_element),
-                             &p_spq->chain, OSAL_NULL)) {
+       ecore_chain_params_init(&chain_params,
+                               ECORE_CHAIN_USE_TO_PRODUCE,
+                               ECORE_CHAIN_MODE_SINGLE,
+                               ECORE_CHAIN_CNT_TYPE_U16,
+                               0, /* N/A when the mode is SINGLE */
+                               sizeof(struct slow_path_element));
+       if (ecore_chain_alloc(p_hwfn->p_dev, &p_spq->chain, &chain_params)) {
                DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
                goto spq_allocate_fail;
        }
@@ -1120,6 +1128,7 @@ void ecore_spq_drop_next_completion(struct ecore_hwfn 
*p_hwfn)
 
 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
 {
+       struct ecore_chain_params chain_params;
        struct ecore_consq *p_consq;
 
        /* Allocate ConsQ struct */
@@ -1130,14 +1139,14 @@ enum _ecore_status_t ecore_consq_alloc(struct 
ecore_hwfn *p_hwfn)
                return ECORE_NOMEM;
        }
 
-       /* Allocate and initialize EQ chain */
-       if (ecore_chain_alloc(p_hwfn->p_dev,
-                             ECORE_CHAIN_USE_TO_PRODUCE,
-                             ECORE_CHAIN_MODE_PBL,
-                             ECORE_CHAIN_CNT_TYPE_U16,
-                             ECORE_CHAIN_PAGE_SIZE / 0x80,
-                             0x80,
-                             &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
+       /* Allocate and initialize EQ chain*/
+       ecore_chain_params_init(&chain_params,
+                               ECORE_CHAIN_USE_TO_PRODUCE,
+                               ECORE_CHAIN_MODE_PBL,
+                               ECORE_CHAIN_CNT_TYPE_U16,
+                               ECORE_CHAIN_PAGE_SIZE / 0x80,
+                               0x80);
+       if (ecore_chain_alloc(p_hwfn->p_dev, &p_consq->chain, &chain_params)) {
                DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
                goto consq_allocate_fail;
        }
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 1693a243f..daefd4b98 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -141,16 +141,16 @@ struct qed_common_ops {
                     struct rte_pci_device *pci_dev,
                     uint32_t dp_module, uint8_t dp_level, bool is_vf);
        void (*set_name)(struct ecore_dev *edev, char name[]);
-       enum _ecore_status_t
-               (*chain_alloc)(struct ecore_dev *edev,
-                              enum ecore_chain_use_mode
-                              intended_use,
-                              enum ecore_chain_mode mode,
-                              enum ecore_chain_cnt_type cnt_type,
-                              uint32_t num_elems,
-                              osal_size_t elem_size,
-                              struct ecore_chain *p_chain,
-                              struct ecore_chain_ext_pbl *ext_pbl);
+
+       void (*chain_params_init)(struct ecore_chain_params *p_params,
+                                 enum ecore_chain_use_mode intended_use,
+                                 enum ecore_chain_mode mode,
+                                 enum ecore_chain_cnt_type cnt_type,
+                                 u32 num_elems, size_t elem_size);
+
+       int (*chain_alloc)(struct ecore_dev *edev,
+                          struct ecore_chain *p_chain,
+                          struct ecore_chain_params *p_params);
 
        void (*chain_free)(struct ecore_dev *edev,
                           struct ecore_chain *p_chain);
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index e865d988f..4f99ab8b7 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -781,6 +781,7 @@ const struct qed_common_ops qed_common_ops_pass = {
        INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
        INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
        INIT_STRUCT_FIELD(set_name, &qed_set_name),
+       INIT_STRUCT_FIELD(chain_params_init, &ecore_chain_params_init),
        INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
        INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
        INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index c91fcc1fa..b6ff59457 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -135,6 +135,7 @@ qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
        struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_rx_queue *rxq;
+       struct ecore_chain_params chain_params;
        size_t size;
        int rc;
 
@@ -172,43 +173,45 @@ qede_alloc_rx_queue_mem(struct rte_eth_dev *dev,
        }
 
        /* Allocate FW Rx ring  */
-       rc = qdev->ops->common->chain_alloc(edev,
-                                           ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                           ECORE_CHAIN_MODE_NEXT_PTR,
-                                           ECORE_CHAIN_CNT_TYPE_U16,
-                                           rxq->nb_rx_desc,
-                                           sizeof(struct eth_rx_bd),
-                                           &rxq->rx_bd_ring,
-                                           NULL);
+       qdev->ops->common->chain_params_init(&chain_params,
+                                            ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                            ECORE_CHAIN_MODE_NEXT_PTR,
+                                            ECORE_CHAIN_CNT_TYPE_U16,
+                                            rxq->nb_rx_desc,
+                                            sizeof(struct eth_rx_bd));
+       rc = qdev->ops->common->chain_alloc(edev, &rxq->rx_bd_ring,
+                                           &chain_params);
 
        if (rc != ECORE_SUCCESS) {
                DP_ERR(edev, "Memory allocation fails for RX BD ring"
                       " on socket %u\n", socket_id);
-               rte_free(rxq->sw_rx_ring);
-               rte_free(rxq);
-               return NULL;
+               goto err1;
        }
 
        /* Allocate FW completion ring */
-       rc = qdev->ops->common->chain_alloc(edev,
-                                           ECORE_CHAIN_USE_TO_CONSUME,
-                                           ECORE_CHAIN_MODE_PBL,
-                                           ECORE_CHAIN_CNT_TYPE_U16,
-                                           rxq->nb_rx_desc,
-                                           sizeof(union eth_rx_cqe),
-                                           &rxq->rx_comp_ring,
-                                           NULL);
+       qdev->ops->common->chain_params_init(&chain_params,
+                                            ECORE_CHAIN_USE_TO_CONSUME,
+                                            ECORE_CHAIN_MODE_PBL,
+                                            ECORE_CHAIN_CNT_TYPE_U16,
+                                            rxq->nb_rx_desc,
+                                            sizeof(union eth_rx_cqe));
+       rc = qdev->ops->common->chain_alloc(edev, &rxq->rx_comp_ring,
+                                           &chain_params);
 
        if (rc != ECORE_SUCCESS) {
                DP_ERR(edev, "Memory allocation fails for RX CQE ring"
                       " on socket %u\n", socket_id);
-               qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
-               rte_free(rxq->sw_rx_ring);
-               rte_free(rxq);
-               return NULL;
+               goto err2;
        }
 
        return rxq;
+
+err2:
+       qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+err1:
+       rte_free(rxq->sw_rx_ring);
+       rte_free(rxq);
+       return NULL;
 }
 
 int
@@ -392,6 +395,8 @@ qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
        struct qede_dev *qdev = dev->data->dev_private;
        struct ecore_dev *edev = &qdev->edev;
        struct qede_tx_queue *txq;
+       struct ecore_chain_params chain_params;
+       union eth_tx_bd_types *p_virt;
        int rc;
 
        txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
@@ -408,14 +413,14 @@ qede_alloc_tx_queue_mem(struct rte_eth_dev *dev,
        txq->qdev = qdev;
        txq->port_id = dev->data->port_id;
 
-       rc = qdev->ops->common->chain_alloc(edev,
-                                           ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                           ECORE_CHAIN_MODE_PBL,
-                                           ECORE_CHAIN_CNT_TYPE_U16,
-                                           txq->nb_tx_desc,
-                                           sizeof(union eth_tx_bd_types),
-                                           &txq->tx_pbl,
-                                           NULL);
+       qdev->ops->common->chain_params_init(&chain_params,
+                                            ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
+                                            ECORE_CHAIN_MODE_PBL,
+                                            ECORE_CHAIN_CNT_TYPE_U16,
+                                            txq->nb_tx_desc,
+                                            sizeof(*p_virt));
+       rc = qdev->ops->common->chain_alloc(edev, &txq->tx_pbl,
+                                            &chain_params);
        if (rc != ECORE_SUCCESS) {
                DP_ERR(edev,
                       "Unable to allocate memory for txbd ring on socket %u",
-- 
2.18.0

Reply via email to