From: Harish Patil <harish.pa...@qlogic.com>

Signed-off-by: Harish Patil <harish.patil at qlogic.com>
Signed-off-by: Rasesh Mody <rasesh.mody at qlogic.com>
Signed-off-by: Sony Chacko <sony.chacko at qlogic.com>
---
 drivers/net/qede/ecore/bcm_osal.c            |   130 +
 drivers/net/qede/ecore/bcm_osal.h            |   408 +
 drivers/net/qede/ecore/common_hsi.h          |   714 ++
 drivers/net/qede/ecore/ecore.h               |   785 ++
 drivers/net/qede/ecore/ecore_attn_values.h   | 13287 +++++++++++++++++++++++++
 drivers/net/qede/ecore/ecore_chain.h         |   724 ++
 drivers/net/qede/ecore/ecore_cxt.c           |  2164 ++++
 drivers/net/qede/ecore/ecore_cxt.h           |   173 +
 drivers/net/qede/ecore/ecore_cxt_api.h       |    79 +
 drivers/net/qede/ecore/ecore_dcbx.c          |   950 ++
 drivers/net/qede/ecore/ecore_dcbx.h          |    55 +
 drivers/net/qede/ecore/ecore_dcbx_api.h      |   166 +
 drivers/net/qede/ecore/ecore_dev.c           |  3907 ++++++++
 drivers/net/qede/ecore/ecore_dev_api.h       |   497 +
 drivers/net/qede/ecore/ecore_gtt_reg_addr.h  |    42 +
 drivers/net/qede/ecore/ecore_gtt_values.h    |    33 +
 drivers/net/qede/ecore/ecore_hsi_common.h    |  1912 ++++
 drivers/net/qede/ecore/ecore_hsi_eth.h       |  1912 ++++
 drivers/net/qede/ecore/ecore_hsi_tools.h     |  1081 ++
 drivers/net/qede/ecore/ecore_hw.c            |  1000 ++
 drivers/net/qede/ecore/ecore_hw.h            |   273 +
 drivers/net/qede/ecore/ecore_hw_defs.h       |    49 +
 drivers/net/qede/ecore/ecore_init_fw_funcs.c |  1275 +++
 drivers/net/qede/ecore/ecore_init_fw_funcs.h |   263 +
 drivers/net/qede/ecore/ecore_init_ops.c      |   610 ++
 drivers/net/qede/ecore/ecore_init_ops.h      |   103 +
 drivers/net/qede/ecore/ecore_int.c           |  2234 +++++
 drivers/net/qede/ecore/ecore_int.h           |   234 +
 drivers/net/qede/ecore/ecore_int_api.h       |   277 +
 drivers/net/qede/ecore/ecore_iov_api.h       |   931 ++
 drivers/net/qede/ecore/ecore_iro.h           |   168 +
 drivers/net/qede/ecore/ecore_iro_values.h    |    59 +
 drivers/net/qede/ecore/ecore_l2.c            |  1801 ++++
 drivers/net/qede/ecore/ecore_l2.h            |   151 +
 drivers/net/qede/ecore/ecore_l2_api.h        |   401 +
 drivers/net/qede/ecore/ecore_mcp.c           |  1952 ++++
 drivers/net/qede/ecore/ecore_mcp.h           |   304 +
 drivers/net/qede/ecore/ecore_mcp_api.h       |   629 ++
 drivers/net/qede/ecore/ecore_proto_if.h      |    88 +
 drivers/net/qede/ecore/ecore_rt_defs.h       |   449 +
 drivers/net/qede/ecore/ecore_sp_api.h        |    42 +
 drivers/net/qede/ecore/ecore_sp_commands.c   |   531 +
 drivers/net/qede/ecore/ecore_sp_commands.h   |   137 +
 drivers/net/qede/ecore/ecore_spq.c           |   989 ++
 drivers/net/qede/ecore/ecore_spq.h           |   302 +
 drivers/net/qede/ecore/ecore_sriov.c         |  3422 +++++++
 drivers/net/qede/ecore/ecore_sriov.h         |   390 +
 drivers/net/qede/ecore/ecore_status.h        |    30 +
 drivers/net/qede/ecore/ecore_utils.h         |    31 +
 drivers/net/qede/ecore/ecore_vf.c            |  1319 +++
 drivers/net/qede/ecore/ecore_vf.h            |   415 +
 drivers/net/qede/ecore/ecore_vf_api.h        |   185 +
 drivers/net/qede/ecore/ecore_vfpf_if.h       |   588 ++
 drivers/net/qede/ecore/eth_common.h          |   526 +
 drivers/net/qede/ecore/mcp_public.h          |  1243 +++
 drivers/net/qede/ecore/nvm_cfg.h             |   935 ++
 drivers/net/qede/ecore/reg_addr.h            |  1112 +++
 57 files changed, 54467 insertions(+)
 create mode 100644 drivers/net/qede/ecore/bcm_osal.c
 create mode 100644 drivers/net/qede/ecore/bcm_osal.h
 create mode 100644 drivers/net/qede/ecore/common_hsi.h
 create mode 100644 drivers/net/qede/ecore/ecore.h
 create mode 100644 drivers/net/qede/ecore/ecore_attn_values.h
 create mode 100644 drivers/net/qede/ecore/ecore_chain.h
 create mode 100644 drivers/net/qede/ecore/ecore_cxt.c
 create mode 100644 drivers/net/qede/ecore/ecore_cxt.h
 create mode 100644 drivers/net/qede/ecore/ecore_cxt_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_dcbx.c
 create mode 100644 drivers/net/qede/ecore/ecore_dcbx.h
 create mode 100644 drivers/net/qede/ecore/ecore_dcbx_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_dev.c
 create mode 100644 drivers/net/qede/ecore/ecore_dev_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_gtt_reg_addr.h
 create mode 100644 drivers/net/qede/ecore/ecore_gtt_values.h
 create mode 100644 drivers/net/qede/ecore/ecore_hsi_common.h
 create mode 100644 drivers/net/qede/ecore/ecore_hsi_eth.h
 create mode 100644 drivers/net/qede/ecore/ecore_hsi_tools.h
 create mode 100644 drivers/net/qede/ecore/ecore_hw.c
 create mode 100644 drivers/net/qede/ecore/ecore_hw.h
 create mode 100644 drivers/net/qede/ecore/ecore_hw_defs.h
 create mode 100644 drivers/net/qede/ecore/ecore_init_fw_funcs.c
 create mode 100644 drivers/net/qede/ecore/ecore_init_fw_funcs.h
 create mode 100644 drivers/net/qede/ecore/ecore_init_ops.c
 create mode 100644 drivers/net/qede/ecore/ecore_init_ops.h
 create mode 100644 drivers/net/qede/ecore/ecore_int.c
 create mode 100644 drivers/net/qede/ecore/ecore_int.h
 create mode 100644 drivers/net/qede/ecore/ecore_int_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_iov_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_iro.h
 create mode 100644 drivers/net/qede/ecore/ecore_iro_values.h
 create mode 100644 drivers/net/qede/ecore/ecore_l2.c
 create mode 100644 drivers/net/qede/ecore/ecore_l2.h
 create mode 100644 drivers/net/qede/ecore/ecore_l2_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_mcp.c
 create mode 100644 drivers/net/qede/ecore/ecore_mcp.h
 create mode 100644 drivers/net/qede/ecore/ecore_mcp_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_proto_if.h
 create mode 100644 drivers/net/qede/ecore/ecore_rt_defs.h
 create mode 100644 drivers/net/qede/ecore/ecore_sp_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_sp_commands.c
 create mode 100644 drivers/net/qede/ecore/ecore_sp_commands.h
 create mode 100644 drivers/net/qede/ecore/ecore_spq.c
 create mode 100644 drivers/net/qede/ecore/ecore_spq.h
 create mode 100644 drivers/net/qede/ecore/ecore_sriov.c
 create mode 100644 drivers/net/qede/ecore/ecore_sriov.h
 create mode 100644 drivers/net/qede/ecore/ecore_status.h
 create mode 100644 drivers/net/qede/ecore/ecore_utils.h
 create mode 100644 drivers/net/qede/ecore/ecore_vf.c
 create mode 100644 drivers/net/qede/ecore/ecore_vf.h
 create mode 100644 drivers/net/qede/ecore/ecore_vf_api.h
 create mode 100644 drivers/net/qede/ecore/ecore_vfpf_if.h
 create mode 100644 drivers/net/qede/ecore/eth_common.h
 create mode 100644 drivers/net/qede/ecore/mcp_public.h
 create mode 100644 drivers/net/qede/ecore/nvm_cfg.h
 create mode 100644 drivers/net/qede/ecore/reg_addr.h

diff --git a/drivers/net/qede/ecore/bcm_osal.c 
b/drivers/net/qede/ecore/bcm_osal.c
new file mode 100644
index 0000000..ff97c06
--- /dev/null
+++ b/drivers/net/qede/ecore/bcm_osal.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <zlib.h>
+
+#include <rte_memzone.h>
+#include <rte_errno.h>
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_iov_api.h"
+
+unsigned long log2_align(unsigned long n)
+{
+       unsigned long ret = n ? 1 : 0;
+       unsigned long _n = n >> 1;
+
+       while (_n) {
+               _n >>= 1;
+               ret <<= 1;
+       }
+
+       if (ret < n)
+               ret <<= 1;
+
+       return ret;
+}
+
+u32 osal_log2(u32 val)
+{
+       u32 log = 0;
+
+       while (val >>= 1)
+               log++;
+
+       return log;
+}
+
+void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
+                             dma_addr_t *phys, size_t size)
+{
+       const struct rte_memzone *mz;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t core_id = rte_lcore_id();
+       unsigned socket_id;
+
+       OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+       snprintf(mz_name, sizeof(mz_name) - 1, "%lx", rte_get_timer_cycles());
+       if (core_id == (unsigned)LCORE_ID_ANY)
+               core_id = 0;
+       socket_id = rte_lcore_to_socket_id(core_id);
+       mz = rte_memzone_reserve_aligned(mz_name, size,
+                                        socket_id, 0, RTE_CACHE_LINE_SIZE);
+       if (!mz) {
+               DP_ERR(p_dev, "Unable to allocate DMA memory"
+                      "of size %zu bytes - %s\n",
+                      size, rte_strerror(rte_errno));
+               *phys = 0;
+               return OSAL_NULL;
+       }
+       *phys = mz->phys_addr;
+       DP_INFO(p_dev, "size=%zu phys=0x%lx virt=%p on socket=%u\n",
+               mz->len, mz->phys_addr, mz->addr, socket_id);
+       return mz->addr;
+}
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
+                                     dma_addr_t *phys, size_t size, int align)
+{
+       const struct rte_memzone *mz;
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t core_id = rte_lcore_id();
+       unsigned socket_id;
+
+       OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
+       snprintf(mz_name, sizeof(mz_name) - 1, "%lx", rte_get_timer_cycles());
+       if (core_id == (unsigned)LCORE_ID_ANY)
+               core_id = 0;
+       socket_id = rte_lcore_to_socket_id(core_id);
+       mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
+       if (!mz) {
+               DP_ERR(p_dev, "Unable to allocate DMA memory"
+                      "of size %zu bytes - %s\n",
+                      size, rte_strerror(rte_errno));
+               *phys = 0;
+               return OSAL_NULL;
+       }
+       *phys = mz->phys_addr;
+       DP_INFO(p_dev,
+               "aligned memory size=%zu phys=0x%lx virt=%p core=%d\n",
+               mz->len, mz->phys_addr, mz->addr, core_id);
+       return mz->addr;
+}
+
+u32 qed_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+                  u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+       int rc;
+
+       p_hwfn->stream->next_in = input_buf;
+       p_hwfn->stream->avail_in = input_len;
+       p_hwfn->stream->next_out = unzip_buf;
+       p_hwfn->stream->avail_out = max_size;
+
+       rc = inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+       if (rc != Z_OK) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                          "zlib init failed, rc = %d\n", rc);
+               return 0;
+       }
+
+       rc = inflate(p_hwfn->stream, Z_FINISH);
+       inflateEnd(p_hwfn->stream);
+
+       if (rc != Z_OK && rc != Z_STREAM_END) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                          "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg,
+                          rc);
+               return 0;
+       }
+
+       return p_hwfn->stream->total_out / 4;
+}
diff --git a/drivers/net/qede/ecore/bcm_osal.h 
b/drivers/net/qede/ecore/bcm_osal.h
new file mode 100644
index 0000000..639f833
--- /dev/null
+++ b/drivers/net/qede/ecore/bcm_osal.h
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __BCM_OSAL_H
+#define __BCM_OSAL_H
+
+#include <rte_byteorder.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_memcpy.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+
+/* Forward declaration */
+struct ecore_dev;
+struct ecore_hwfn;
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#undef __BIG_ENDIAN
+#ifndef __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN
+#endif
+#else
+#undef __LITTLE_ENDIAN
+#ifndef __BIG_ENDIAN
+#define __BIG_ENDIAN
+#endif
+#endif
+
+/* Memory Types */
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef int16_t s16;
+typedef int32_t s32;
+
+typedef u16 __le16;
+typedef u32 __le32;
+typedef u32 OSAL_BE32;
+
+#define osal_uintptr_t u64
+
+typedef uint64_t dma_addr_t;
+
+typedef rte_spinlock_t osal_spinlock_t;
+
+typedef void *osal_dpc_t;
+
+typedef size_t osal_size_t;
+
+typedef intptr_t osal_int_ptr_t;
+
+typedef int bool;
+#define true 1
+#define false 0
+
+#define nothing do {} while (0)
+
+/* Delays */
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+#define OSAL_UDELAY(time) usec_delay(time)
+#define OSAL_MSLEEP(time) msec_delay(time)
+
+/* Memory allocations and deallocations */
+
+#define OSAL_NULL ((void *)0)
+#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
+#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
+#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
+#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) rte_free((void *)memory)
+#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
+#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
+#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
+#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size)
+#define OSAL_MEMSET(dst, val, length) \
+       memset(dst, val, length)
+
+void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
+
+void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
+                                     size_t, int);
+
+#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
+       osal_dma_alloc_coherent(dev, phys, size)
+
+#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
+       osal_dma_alloc_coherent_aligned(dev, phys, size, align)
+
+/* TODO: */
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+
+/* HW reads/writes */
+
+#define DIRECT_REG_RD(_dev, _reg_addr) \
+       (*((volatile u32 *) (_reg_addr)))
+
+#define REG_RD(_p_hwfn, _reg_offset) \
+       DIRECT_REG_RD(_p_hwfn,          \
+                       ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
+
+#define DIRECT_REG_WR16(_reg_addr, _val) \
+       (*((volatile u16 *)(_reg_addr)) = _val)
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
+       (*((volatile u32 *)(_reg_addr)) = _val)
+
+#define REG_WR(_p_hwfn, _reg_offset, _val) \
+       DIRECT_REG_WR(NULL,  \
+       ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val)
+
+#define REG_WR16(_p_hwfn, _reg_offset, _val) \
+       DIRECT_REG_WR16(((u8 *)(uintptr_t) (_p_hwfn->regview) + \
+                       (_reg_offset)), (u16)_val)
+
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+       DIRECT_REG_WR(_p_hwfn, \
+            ((u8 *)(uintptr_t) (_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
+
+/* Mutexes */
+
+typedef pthread_mutex_t osal_mutex_t;
+#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock)
+#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL)
+#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock)
+#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing
+#define OSAL_MUTEX_DEALLOC(lock) nothing
+
+/* Spinlocks */
+
+#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
+#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
+#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
+#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
+
+/* DPC */
+
+#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
+#define OSAL_DPC_INIT(dpc, hwfn) nothing
+#define OSAL_POLL_MODE_DPC(hwfn) nothing
+
+/* Lists */
+
+#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing
+#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing
+
+typedef struct _osal_list_entry_t {
+       struct _osal_list_entry_t *next, *prev;
+} osal_list_entry_t;
+
+typedef struct osal_list_t {
+       osal_list_entry_t *head, *tail;
+       unsigned long cnt;
+} osal_list_t;
+
+#define OSAL_LIST_INIT(list) \
+       do {                    \
+               (list)->head = NULL;  \
+               (list)->tail = NULL;  \
+               (list)->cnt  = 0;       \
+       } while (0)
+
+#define OSAL_LIST_PUSH_HEAD(entry, list)               \
+       do {                                            \
+               (entry)->prev = (osal_list_entry_t *)0;         \
+               (entry)->next = (list)->head;                   \
+               if ((list)->tail == (osal_list_entry_t *)0) {   \
+                       (list)->tail = (entry);                 \
+               } else {                                        \
+                       (list)->head->prev = (entry);           \
+               }                                               \
+               (list)->head = (entry);                         \
+               (list)->cnt++;                                  \
+       } while (0)
+
+#define OSAL_LIST_PUSH_TAIL(entry, list)       \
+       do {                                    \
+               (entry)->next = (osal_list_entry_t *)0; \
+               (entry)->prev = (list)->tail;           \
+               if ((list)->tail) {                     \
+                       (list)->tail->next = (entry);   \
+               } else {                                \
+                       (list)->head = (entry);         \
+               }                                       \
+               (list)->tail = (entry);                 \
+               (list)->cnt++;                          \
+       } while (0)
+
+#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
+       (type *)((list)->head)
+
+#define OSAL_LIST_REMOVE_ENTRY(entry, list)                    \
+       do {                                                    \
+               if ((list)->head == (entry)) {                          \
+                       if ((list)->head) {                             \
+                               (list)->head = (list)->head->next;      \
+                       if ((list)->head) {                             \
+                               (list)->head->prev = (osal_list_entry_t *)0;\
+                       } else {                                        \
+                               (list)->tail = (osal_list_entry_t *)0;  \
+                       }                                               \
+                       (list)->cnt--;                                  \
+                       }                                               \
+               } else if ((list)->tail == (entry)) {                   \
+                       if ((list)->tail) {                             \
+                               (list)->tail = (list)->tail->prev;      \
+                       if ((list)->tail) {                             \
+                               (list)->tail->next = (osal_list_entry_t *)0;\
+                       } else {                                        \
+                               (list)->head = (osal_list_entry_t *)0;  \
+                       }                                               \
+                       (list)->cnt--;                                  \
+                       }                                               \
+               } else {                                                \
+                       (entry)->prev->next = (entry)->next;            \
+                       (entry)->next->prev = (entry)->prev;            \
+                       (list)->cnt--;                                  \
+               }                                                       \
+       } while (0)
+
+#define OSAL_LIST_IS_EMPTY(list) \
+       ((list)->cnt == 0)
+
+#define OSAL_LIST_NEXT(entry, field, type) \
+       (type *)((&((entry)->field))->next)
+
+/* TODO: Check field, type order */
+
+#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
+       for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
+               entry;                                          \
+               entry = OSAL_LIST_NEXT(entry, field, type))
+
+#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
+        for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
+         tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL;    \
+         entry != NULL;                                                \
+         entry = (type *)tmp_entry,                                     \
+         tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
+
+/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */
+#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \
+       OSAL_LIST_PUSH_HEAD(new_entry, list)
+
+/* PCI config space */
+
+#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing
+#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing
+#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0
+#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing
+#define OSAL_BAR_SIZE(dev, bar_id) 0
+
+/* Barriers */
+
+#define OSAL_MMIOWB(dev) rte_wmb()     /* No user space equivalent */
+#define OSAL_BARRIER(dev) rte_compiler_barrier()
+#define OSAL_SMP_RMB(dev) rte_rmb()
+#define OSAL_SMP_WMB(dev) rte_wmb()
+#define OSAL_RMB(dev) rte_rmb()
+#define OSAL_WMB(dev) rte_wmb()
+#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
+
+#define OSAL_BITS_PER_BYTE     (8)
+#define OSAL_BITS_PER_UL       (sizeof(unsigned long)*OSAL_BITS_PER_BYTE)
+#define OSAL_BITS_PER_UL_MASK   (OSAL_BITS_PER_UL - 1)
+
+#define OSAL_BUILD_BUG_ON(cond) nothing
+#define ETH_ALEN ETHER_ADDR_LEN
+
+static inline u32 osal_ffz(unsigned long word)
+{
+       unsigned long first_zero;
+
+       first_zero = __builtin_ffsl(~word);
+       return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL;
+}
+
+static inline void OSAL_SET_BIT(u32 nr, unsigned long *addr)
+{
+       addr[nr / OSAL_BITS_PER_UL] |= 1UL << (nr & OSAL_BITS_PER_UL_MASK);
+}
+
+static inline void OSAL_CLEAR_BIT(u32 nr, unsigned long *addr)
+{
+       addr[nr / OSAL_BITS_PER_UL] &= ~(1UL << (nr & OSAL_BITS_PER_UL_MASK));
+}
+
+static inline bool OSAL_TEST_BIT(u32 nr, unsigned long *addr)
+{
+       return !!(addr[nr / OSAL_BITS_PER_UL] &
+                  (1UL << (nr & OSAL_BITS_PER_UL_MASK)));
+}
+
+static inline u32 OSAL_FIND_FIRST_ZERO_BIT(unsigned long *addr, u32 limit)
+{
+       u32 i;
+       u32 nwords = 0;
+       OSAL_BUILD_BUG_ON(!limit);
+       nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+       for (i = 0; i < nwords; i++)
+               if (~(addr[i] != 0))
+                       break;
+       return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + osal_ffz(addr[i]);
+}
+
+/* SR-IOV channel */
+
+#define OSAL_LINK_UPDATE(hwfn) nothing
+#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
+#define OSAL_VF_FLR_UPDATE(hwfn) nothing
+#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
+#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol)        (0)
+#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
+#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
+#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
+#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing
+#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
+#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) nothing
+#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
+#define OSAL_IOV_GET_OS_TYPE() 0
+
+u32 qed_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
+                  u8 *input_buf, u32 max_size, u8 *unzip_buf);
+
+#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \
+       qed_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf)
+
+/* TODO: */
+#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+
+#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
+#define OSAL_NUM_ACTIVE_CPU()  0
+
+/* Utility functions */
+
+#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what)
+#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what))
+
+unsigned long log2_align(unsigned long n);
+#define OSAL_ROUNDUP_POW_OF_TWO(val) \
+       log2_align(val)
+
+u32 osal_log2(u32 val);
+#define OSAL_LOG2(val) \
+       osal_log2(val)
+
+#define PRINT(format, ...) printf
+#define PRINT_ERR(format, ...) PRINT
+
+#define OFFSETOF(str, field) __builtin_offsetof(str, field)
+#define OSAL_ASSERT(is_assert) assert(is_assert)
+#define OSAL_BEFORE_PF_START(file, engine) nothing
+#define OSAL_AFTER_PF_STOP(file, engine) nothing
+
+/* Endian macros */
+#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val)
+#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val)
+#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val)
+#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val)
+#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val)
+#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val)
+#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val)
+
+#define OSAL_ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+#define OSAL_SPRINTF(name, pattern, ...) \
+       sprintf(name, pattern, ##__VA_ARGS__)
+#define OSAL_STRLEN(string) strlen(string)
+#define OSAL_STRCPY(dst, string) strcpy(dst, string)
+#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
+#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+
+#define OSAL_INLINE inline
+#define OSAL_REG_ADDR(_p_hwfn, _offset) \
+               (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
+#define OSAL_PAGE_SIZE 4096
+#define OSAL_IOMEM volatile
+#define OSAL_UNLIKELY(x)  __builtin_expect(!!(x), 0)
+#define OSAL_MIN_T(type, __min1, __min2)       \
+       ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
+#define OSAL_MAX_T(type, __max1, __max2)       \
+       ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2))
+
+#define        OSAL_GET_PROTOCOL_STATS(p_hwfn, type, stats) (0)
+#define        OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+
+#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/ecore/common_hsi.h 
b/drivers/net/qede/ecore/common_hsi.h
new file mode 100644
index 0000000..452c401
--- /dev/null
+++ b/drivers/net/qede/ecore/common_hsi.h
@@ -0,0 +1,714 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __COMMON_HSI__
+#define __COMMON_HSI__
+
+#define CORE_SPQE_PAGE_SIZE_BYTES                       4096
+
+#define FW_MAJOR_VERSION       8
+#define FW_MINOR_VERSION       7
+#define FW_REVISION_VERSION    7
+#define FW_ENGINEERING_VERSION 0
+
+/***********************/
+/* COMMON HW CONSTANTS */
+/***********************/
+
+/* PCI functions */
+#define MAX_NUM_PORTS_K2       (4)
+#define MAX_NUM_PORTS_BB       (2)
+#define MAX_NUM_PORTS          (MAX_NUM_PORTS_K2)
+
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS    (MAX_NUM_PFS_K2)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS    (MAX_NUM_VFS_K2)
+
+#define MAX_NUM_FUNCTIONS_BB   (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS      (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER    (MAX_NUM_PFS + MAX_NUM_VFS)
+
+#define MAX_NUM_VPORTS_K2      (208)
+#define MAX_NUM_VPORTS_BB      (160)
+#define MAX_NUM_VPORTS         (MAX_NUM_VPORTS_K2)
+
+#define MAX_NUM_L2_QUEUES_K2   (320)
+#define MAX_NUM_L2_QUEUES_BB   (256)
+#define MAX_NUM_L2_QUEUES      (MAX_NUM_L2_QUEUES_K2)
+
+/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+#define NUM_PHYS_TCS_4PORT_K2  (4)
+#define NUM_OF_PHYS_TCS                (8)
+
+#define NUM_TCS_4PORT_K2       (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_OF_TCS             (NUM_OF_PHYS_TCS + 1)
+
+#define LB_TC                  (NUM_OF_PHYS_TCS)
+
+/* Num of possible traffic priority values */
+#define NUM_OF_PRIO            (8)
+
+#define MAX_NUM_VOQS_K2                (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
+#define MAX_NUM_VOQS_BB                (NUM_OF_TCS * MAX_NUM_PORTS_BB)
+#define MAX_NUM_VOQS           (MAX_NUM_VOQS_K2)
+#define MAX_PHYS_VOQS          (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+
+/* CIDs */
+#define NUM_OF_CONNECTION_TYPES        (8)
+#define NUM_OF_LCIDS           (320)
+#define NUM_OF_LTIDS           (320)
+
+/*****************/
+/* CDU CONSTANTS */
+/*****************/
+
+#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT              (17)
+#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK             (0x1ffff)
+
+/*****************/
+/* DQ CONSTANTS  */
+/*****************/
+
+/* DEMS */
+#define DQ_DEMS_LEGACY                 0
+
+/* XCM agg val selection */
+#define DQ_XCM_AGG_VAL_SEL_WORD2  0
+#define DQ_XCM_AGG_VAL_SEL_WORD3  1
+#define DQ_XCM_AGG_VAL_SEL_WORD4  2
+#define DQ_XCM_AGG_VAL_SEL_WORD5  3
+#define DQ_XCM_AGG_VAL_SEL_REG3   4
+#define DQ_XCM_AGG_VAL_SEL_REG4   5
+#define DQ_XCM_AGG_VAL_SEL_REG5   6
+#define DQ_XCM_AGG_VAL_SEL_REG6   7
+
+/* XCM agg val selection */
+#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD2
+#define DQ_XCM_ETH_TX_BD_CONS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_CORE_TX_BD_CONS_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ETH_TX_BD_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_TX_BD_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_CORE_SPQ_PROD_CMD \
+       DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD            DQ_XCM_AGG_VAL_SEL_WORD5
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_AGG_FLG_SHIFT_BIT14  0
+#define DQ_XCM_AGG_FLG_SHIFT_BIT15  1
+#define DQ_XCM_AGG_FLG_SHIFT_CF12   2
+#define DQ_XCM_AGG_FLG_SHIFT_CF13   3
+#define DQ_XCM_AGG_FLG_SHIFT_CF18   4
+#define DQ_XCM_AGG_FLG_SHIFT_CF19   5
+#define DQ_XCM_AGG_FLG_SHIFT_CF22   6
+#define DQ_XCM_AGG_FLG_SHIFT_CF23   7
+
+/* XCM agg counter flag selection */
+#define DQ_XCM_ETH_DQ_CF_CMD           (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_CORE_DQ_CF_CMD          (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF18)
+#define DQ_XCM_ETH_TERMINATE_CMD       (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_CORE_TERMINATE_CMD      (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ETH_SLOW_PATH_CMD       (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_CORE_SLOW_PATH_CMD      (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ETH_TPH_EN_CMD          (1 << \
+                                       DQ_XCM_AGG_FLG_SHIFT_CF23)
+
+/*****************/
+/* QM CONSTANTS  */
+/*****************/
+
+/* number of TX queues in the QM */
+#define MAX_QM_TX_QUEUES_K2    512
+#define MAX_QM_TX_QUEUES_BB    448
+#define MAX_QM_TX_QUEUES       MAX_QM_TX_QUEUES_K2
+
+/* number of Other queues in the QM */
+#define MAX_QM_OTHER_QUEUES_BB 64
+#define MAX_QM_OTHER_QUEUES_K2 128
+#define MAX_QM_OTHER_QUEUES    MAX_QM_OTHER_QUEUES_K2
+
+/* number of queues in a PF queue group */
+#define QM_PF_QUEUE_GROUP_SIZE 8
+
+/* base number of Tx PQs in the CM PQ representation.
+ * should be used when storing PQ IDs in CM PQ registers and context
+ */
+#define CM_TX_PQ_BASE  0x200
+
+/* QM registers data */
+#define QM_LINE_CRD_REG_WIDTH          16
+#define QM_LINE_CRD_REG_SIGN_BIT       (1 << (QM_LINE_CRD_REG_WIDTH - 1))
+#define QM_BYTE_CRD_REG_WIDTH          24
+#define QM_BYTE_CRD_REG_SIGN_BIT       (1 << (QM_BYTE_CRD_REG_WIDTH - 1))
+#define QM_WFQ_CRD_REG_WIDTH           32
+#define QM_WFQ_CRD_REG_SIGN_BIT                (1 << (QM_WFQ_CRD_REG_WIDTH - 
1))
+#define QM_RL_CRD_REG_WIDTH            32
+#define QM_RL_CRD_REG_SIGN_BIT         (1 << (QM_RL_CRD_REG_WIDTH - 1))
+
+/*****************/
+/* CAU CONSTANTS */
+/*****************/
+
+#define CAU_FSM_ETH_RX  0
+#define CAU_FSM_ETH_TX  1
+
+/* Number of Protocol Indices per Status Block */
+#define PIS_PER_SB    12
+
+#define CAU_HC_STOPPED_STATE   3
+#define CAU_HC_DISABLE_STATE   4
+#define CAU_HC_ENABLE_STATE    0
+
+/*****************/
+/* IGU CONSTANTS */
+/*****************/
+
+#define MAX_SB_PER_PATH_K2     (368)
+#define MAX_SB_PER_PATH_BB     (288)
+#define MAX_TOT_SB_PER_PATH \
+       MAX_SB_PER_PATH_K2
+
+#define MAX_SB_PER_PF_MIMD     129
+#define MAX_SB_PER_PF_SIMD     64
+#define MAX_SB_PER_VF          64
+
+/* Memory addresses on the BAR for the IGU Sub Block */
+#define IGU_MEM_BASE                   0x0000
+
+#define IGU_MEM_MSIX_BASE              0x0000
+#define IGU_MEM_MSIX_UPPER             0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER    0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE          0x0200
+#define IGU_MEM_PBA_MSIX_UPPER         0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER        0x03ff
+
+#define IGU_CMD_INT_ACK_BASE           0x0400
+#define IGU_CMD_INT_ACK_UPPER          (IGU_CMD_INT_ACK_BASE + \
+                                        MAX_TOT_SB_PER_PATH -  \
+                                        1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER     0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER     0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER     0x05f2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER          0x05f3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER      0x05f4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER      0x05f5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER         0x05f6
+
+#define IGU_CMD_PROD_UPD_BASE                  0x0600
+#define IGU_CMD_PROD_UPD_UPPER                 (IGU_CMD_PROD_UPD_BASE +\
+                                                MAX_TOT_SB_PER_PATH - \
+                                                1)
+#define IGU_CMD_PROD_UPD_RESERVED_UPPER                0x07ff
+
+/*****************/
+/* PXP CONSTANTS */
+/*****************/
+
+/* PTT and GTT */
+#define PXP_NUM_PF_WINDOWS             12
+#define PXP_PER_PF_ENTRY_SIZE          8
+#define PXP_NUM_GLOBAL_WINDOWS         243
+#define PXP_GLOBAL_ENTRY_SIZE          4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH        4
+#define PXP_PF_WINDOW_ADMIN_START      0
+#define PXP_PF_WINDOW_ADMIN_LENGTH     0x1000
+#define PXP_PF_WINDOW_ADMIN_END                (PXP_PF_WINDOW_ADMIN_START + \
+                                        PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_START       0
+#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH      (PXP_NUM_PF_WINDOWS * \
+                                                PXP_PER_PF_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+                                        PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_START       0x200
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH      (PXP_NUM_GLOBAL_WINDOWS * \
+                                                PXP_GLOBAL_ENTRY_SIZE)
+#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \
+               (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \
+                PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1)
+#define PXP_PF_GLOBAL_PRETEND_ADDR     0x1f0
+#define PXP_PF_ME_OPAQUE_MASK_ADDR     0xf4
+#define PXP_PF_ME_OPAQUE_ADDR          0x1f8
+#define PXP_PF_ME_CONCRETE_ADDR                0x1fc
+
+#define PXP_EXTERNAL_BAR_PF_WINDOW_START       0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM         PXP_NUM_PF_WINDOWS
+#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
+#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_PF_WINDOW_END \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_START + \
+        PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1)
+
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \
+       (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM             PXP_NUM_GLOBAL_WINDOWS
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE     0x1000
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE)
+#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \
+       (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
+        PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER        1024
+
+/* ILT Records */
+#define PXP_NUM_ILT_RECORDS_BB 7600
+#define PXP_NUM_ILT_RECORDS_K2 11000
+#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/******************/
+/* PBF CONSTANTS  */
+/******************/
+
+/* Number of PBF command queue lines. Each line is 32B. */
+#define PBF_MAX_CMD_LINES 3328
+
+/* Number of BTB blocks. Each block is 256B. */
+#define BTB_MAX_BLOCKS 1440
+
+/*****************/
+/* PRS CONSTANTS */
+/*****************/
+
+/* Async data KCQ CQE */
+struct async_data {
+       __le32  cid;
+       __le16  itid;
+       u8      error_code;
+       u8      fw_debug_param;
+};
+
+struct regpair {
+       __le32 lo /* low word for reg-pair */;
+       __le32 hi /* high word for reg-pair */;
+};
+
+struct vf_pf_channel_eqe_data {
+       struct regpair msg_addr /* VF-PF message address */;
+};
+
+struct iscsi_eqe_data {
+       __le32 cid /* Context ID of the connection */;
+       __le16 conn_id
+           /* Task Id of the task (for error that happened on a a task) */;
+       u8 error_code;
+       u8 reserved0;
+};
+
+/*
+ *  * Event Ring malicious VF data
+ *   */
+struct malicious_vf_eqe_data {
+       u8 vf_id /* Malicious VF ID */; /* WARNING:CAMELCASE */
+       u8 err_id /* Malicious VF error */;
+       __le16 reserved[3];
+};
+
+/*
+ *  * Event Ring initial cleanup data
+ *   */
+struct initial_cleanup_eqe_data {
+       u8 vf_id /* VF ID */; /* WARNING:CAMELCASE */
+       u8 reserved[7];
+};
+
+
+union event_ring_data {
+       u8 bytes[8] /* Byte Array */;
+       struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+       struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+       struct regpair roce_handle /* WARNING:CAMELCASE */
+           /* Dedicated field for RoCE affiliated asynchronous error */;
+       struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+       struct initial_cleanup_eqe_data vf_init_cleanup
+           /* VF Initial Cleanup data */;
+};
+/* Event Ring Entry */
+struct event_ring_entry {
+       u8                      protocol_id;
+       u8                      opcode;
+       __le16                  reserved0;
+       __le16                  echo;
+       u8                      fw_return_code;
+       u8                      flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK      0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT     0
+#define EVENT_RING_ENTRY_RESERVED1_MASK  0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+       union event_ring_data   data;
+};
+
+/* Multi function mode */
+enum mf_mode {
+       SF,
+       MF_OVLAN,
+       MF_NPAR,
+       MAX_MF_MODE
+};
+
+/* Per-protocol connection types */
+enum protocol_type {
+       PROTOCOLID_ISCSI /* iSCSI */ ,
+       PROTOCOLID_FCOE /* FCoE */ ,
+       PROTOCOLID_ROCE /* RoCE */ ,
+       PROTOCOLID_CORE /* Core (light L2, slow path core) */ ,
+       PROTOCOLID_ETH /* Ethernet */ ,
+       PROTOCOLID_IWARP /* iWARP */ ,
+       PROTOCOLID_TOE /* TOE */ ,
+       PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */ ,
+       PROTOCOLID_COMMON /* ProtocolCommon */ ,
+       PROTOCOLID_TCP /* TCP */ ,
+       MAX_PROTOCOL_TYPE
+};
+
+/* status block structure */
+struct cau_pi_entry {
+       u32 prod;
+#define CAU_PI_ENTRY_PROD_VAL_MASK    0xFFFF
+#define CAU_PI_ENTRY_PROD_VAL_SHIFT   0
+#define CAU_PI_ENTRY_PI_TIMESET_MASK  0x7F
+#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+#define CAU_PI_ENTRY_FSM_SEL_MASK     0x1
+#define CAU_PI_ENTRY_FSM_SEL_SHIFT    23
+#define CAU_PI_ENTRY_RESERVED_MASK    0xFF
+#define CAU_PI_ENTRY_RESERVED_SHIFT   24
+};
+
+/* status block structure */
+struct cau_sb_entry {
+       u32 data;
+#define CAU_SB_ENTRY_SB_PROD_MASK      0xFFFFFF
+#define CAU_SB_ENTRY_SB_PROD_SHIFT     0
+#define CAU_SB_ENTRY_STATE0_MASK       0xF
+#define CAU_SB_ENTRY_STATE0_SHIFT      24
+#define CAU_SB_ENTRY_STATE1_MASK       0xF
+#define CAU_SB_ENTRY_STATE1_SHIFT      28
+       u32 params;
+#define CAU_SB_ENTRY_SB_TIMESET0_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+#define CAU_SB_ENTRY_SB_TIMESET1_MASK  0x7F
+#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+#define CAU_SB_ENTRY_TIMER_RES0_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES0_SHIFT  14
+#define CAU_SB_ENTRY_TIMER_RES1_MASK   0x3
+#define CAU_SB_ENTRY_TIMER_RES1_SHIFT  16
+#define CAU_SB_ENTRY_VF_NUMBER_MASK    0xFF
+#define CAU_SB_ENTRY_VF_NUMBER_SHIFT   18
+#define CAU_SB_ENTRY_VF_VALID_MASK     0x1
+#define CAU_SB_ENTRY_VF_VALID_SHIFT    26
+#define CAU_SB_ENTRY_PF_NUMBER_MASK    0xF
+#define CAU_SB_ENTRY_PF_NUMBER_SHIFT   27
+#define CAU_SB_ENTRY_TPH_MASK          0x1
+#define CAU_SB_ENTRY_TPH_SHIFT         31
+};
+
+/* core doorbell data */
+struct core_db_data {
+       u8 params;
+#define CORE_DB_DATA_DEST_MASK         0x3
+#define CORE_DB_DATA_DEST_SHIFT        0
+#define CORE_DB_DATA_AGG_CMD_MASK      0x3
+#define CORE_DB_DATA_AGG_CMD_SHIFT     2
+#define CORE_DB_DATA_BYPASS_EN_MASK    0x1
+#define CORE_DB_DATA_BYPASS_EN_SHIFT   4
+#define CORE_DB_DATA_RESERVED_MASK     0x1
+#define CORE_DB_DATA_RESERVED_SHIFT    5
+#define CORE_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8      agg_flags;
+       __le16  spq_prod;
+};
+
+/* Enum of doorbell aggregative command selection */
+enum db_agg_cmd_sel {
+       DB_AGG_CMD_NOP,
+       DB_AGG_CMD_SET,
+       DB_AGG_CMD_ADD,
+       DB_AGG_CMD_MAX,
+       MAX_DB_AGG_CMD_SEL
+};
+
+/* Enum of doorbell destination */
+enum db_dest {
+       DB_DEST_XCM,
+       DB_DEST_UCM,
+       DB_DEST_TCM,
+       DB_NUM_DESTINATIONS,
+       MAX_DB_DEST
+};
+
+/* Structure for doorbell address, in legacy mode */
+struct db_legacy_addr {
+       __le32 addr;
+#define DB_LEGACY_ADDR_RESERVED0_MASK  0x3
+#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+#define DB_LEGACY_ADDR_DEMS_MASK       0x7
+#define DB_LEGACY_ADDR_DEMS_SHIFT      2
+#define DB_LEGACY_ADDR_ICID_MASK       0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_SHIFT      5
+};
+
+/* Igu interrupt command */
+enum igu_int_cmd {
+       IGU_INT_ENABLE  = 0,
+       IGU_INT_DISABLE = 1,
+       IGU_INT_NOP     = 2,
+       IGU_INT_NOP2    = 3,
+       MAX_IGU_INT_CMD
+};
+
+/* IGU producer or consumer update command */
+struct igu_prod_cons_update {
+       u32 sb_id_and_flags;
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK        0xFFFFFF
+#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT       0
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK     0x1
+#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT    24
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK      0x3
+#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT     25
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK  0x1
+#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK      0x1
+#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT     28
+#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK       0x3
+#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT      29
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK    0x1
+#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT   31
+       u32 reserved1;
+};
+
+/* Igu segments access for default status block only */
+enum igu_seg_access {
+       IGU_SEG_ACCESS_REG      = 0,
+       IGU_SEG_ACCESS_ATTN     = 1,
+       MAX_IGU_SEG_ACCESS
+};
+
+struct parsing_and_err_flags {
+       __le16 flags;
+#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK                      0x3
+#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT                     0
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK                  0x3
+#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT                 2
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK                    0x1
+#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT                   4
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK               0x1
+#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT              5
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK        0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT       6
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT                7
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK           0x1
+#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT          8
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK                  0x1
+#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT                 9
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK                0x1
+#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT               10
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK                 0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT                11
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK         0x1
+#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT        12
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK            0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT           13
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK  0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK          0x1
+#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT         15
+};
+
+/* Concrete Function ID. */
+struct pxp_concrete_fid {
+       __le16 fid;
+#define PXP_CONCRETE_FID_PFID_MASK     0xF
+#define PXP_CONCRETE_FID_PFID_SHIFT    0
+#define PXP_CONCRETE_FID_PORT_MASK     0x3
+#define PXP_CONCRETE_FID_PORT_SHIFT    4
+#define PXP_CONCRETE_FID_PATH_MASK     0x1
+#define PXP_CONCRETE_FID_PATH_SHIFT    6
+#define PXP_CONCRETE_FID_VFVALID_MASK  0x1
+#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
+#define PXP_CONCRETE_FID_VFID_MASK     0xFF
+#define PXP_CONCRETE_FID_VFID_SHIFT    8
+};
+
+struct pxp_pretend_concrete_fid {
+       __le16 fid;
+#define PXP_PRETEND_CONCRETE_FID_PFID_MASK      0xF
+#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT     0
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK  0x7
+#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK   0x1
+#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT  7
+#define PXP_PRETEND_CONCRETE_FID_VFID_MASK      0xFF
+#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT     8
+};
+
+union pxp_pretend_fid {
+       struct pxp_pretend_concrete_fid concrete_fid;
+       __le16                          opaque_fid;
+};
+
+/* Pxp Pretend Command Register. */
+struct pxp_pretend_cmd {
+       union pxp_pretend_fid   fid;
+       __le16                  control;
+#define PXP_PRETEND_CMD_PATH_MASK              0x1
+#define PXP_PRETEND_CMD_PATH_SHIFT             0
+#define PXP_PRETEND_CMD_USE_PORT_MASK          0x1
+#define PXP_PRETEND_CMD_USE_PORT_SHIFT         1
+#define PXP_PRETEND_CMD_PORT_MASK              0x3
+#define PXP_PRETEND_CMD_PORT_SHIFT             2
+#define PXP_PRETEND_CMD_RESERVED0_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED0_SHIFT        4
+#define PXP_PRETEND_CMD_RESERVED1_MASK         0xF
+#define PXP_PRETEND_CMD_RESERVED1_SHIFT        8
+#define PXP_PRETEND_CMD_PRETEND_PATH_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT     12
+#define PXP_PRETEND_CMD_PRETEND_PORT_MASK      0x1
+#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT     13
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK  0x1
+#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14
+#define PXP_PRETEND_CMD_IS_CONCRETE_MASK       0x1
+#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT      15
+};
+
+/* PTT Record in PXP Admin Window. */
+struct pxp_ptt_entry {
+       __le32                  offset;
+#define PXP_PTT_ENTRY_OFFSET_MASK     0x7FFFFF
+#define PXP_PTT_ENTRY_OFFSET_SHIFT    0
+#define PXP_PTT_ENTRY_RESERVED0_MASK  0x1FF
+#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23
+       struct pxp_pretend_cmd  pretend;
+};
+
+/* RSS hash type */
+enum rss_hash_type {
+       RSS_HASH_TYPE_DEFAULT   = 0,
+       RSS_HASH_TYPE_IPV4      = 1,
+       RSS_HASH_TYPE_TCP_IPV4  = 2,
+       RSS_HASH_TYPE_IPV6      = 3,
+       RSS_HASH_TYPE_TCP_IPV6  = 4,
+       RSS_HASH_TYPE_UDP_IPV4  = 5,
+       RSS_HASH_TYPE_UDP_IPV6  = 6,
+       MAX_RSS_HASH_TYPE
+};
+
+/* status block structure */
+struct status_block {
+       __le16  pi_array[PIS_PER_SB];
+       __le32  sb_num;
+#define STATUS_BLOCK_SB_NUM_MASK      0x1FF
+#define STATUS_BLOCK_SB_NUM_SHIFT     0
+#define STATUS_BLOCK_ZERO_PAD_MASK    0x7F
+#define STATUS_BLOCK_ZERO_PAD_SHIFT   9
+#define STATUS_BLOCK_ZERO_PAD2_MASK   0xFFFF
+#define STATUS_BLOCK_ZERO_PAD2_SHIFT  16
+       __le32 prod_index;
+#define STATUS_BLOCK_PROD_INDEX_MASK  0xFFFFFF
+#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_ZERO_PAD3_MASK   0xFF
+#define STATUS_BLOCK_ZERO_PAD3_SHIFT  24
+};
+
+/* @DPDK */
+#define X_FINAL_CLEANUP_AGG_INT  1
+#define SDM_COMP_TYPE_AGG_INT 2
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define QM_PQ_ELEMENT_SIZE 4
+#define PXP_VF_BAR0_START_IGU 0
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
+
+#define TSTORM_QZONE_SIZE 8
+#define MSTORM_QZONE_SIZE 16
+#define USTORM_QZONE_SIZE 8
+#define XSTORM_QZONE_SIZE 0
+#define YSTORM_QZONE_SIZE 8
+#define PSTORM_QZONE_SIZE 0
+
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_GRC          0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH         0x200
+#define PXP_VF_BAR0_END_GRC \
+(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_IGU          0
+#define PXP_VF_BAR0_IGU_LENGTH         0x3000
+#define PXP_VF_BAR0_END_IGU \
+(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ           0x3000
+#define PXP_VF_BAR0_DQ_LENGTH          0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET    0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B   0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B   0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B   0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B   0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B   0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B   0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B   0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A    0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A      0x10000
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH   32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN  12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+#endif /* __COMMON_HSI__ */
diff --git a/drivers/net/qede/ecore/ecore.h b/drivers/net/qede/ecore/ecore.h
new file mode 100644
index 0000000..aa2e587
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore.h
@@ -0,0 +1,785 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_H
+#define __ECORE_H
+
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_tools.h"
+#include "ecore_proto_if.h"
+#include "mcp_public.h"
+
+#define MAX_HWFNS_PER_DEVICE   (4)
+#define NAME_SIZE 64           /* @DPDK */
+#define VER_SIZE 16
+/* @DPDK ARRAY_DECL */
+#define ECORE_WFQ_UNIT 100
+#include "../qede_logs.h"      /* @DPDK */
+
+/* Constants */
+#define ECORE_WID_SIZE         (1024)
+
+/* Configurable */
+#define ECORE_PF_DEMS_SIZE     (4)
+
+/* cau states */
+enum ecore_coalescing_mode {
+       ECORE_COAL_MODE_DISABLE,
+       ECORE_COAL_MODE_ENABLE
+};
+
+enum ecore_nvm_cmd {
+       ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+       ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+       ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
+       ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+       ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+       ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
+       ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
+       ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
+       ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
+       ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
+       ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
+#ifndef LINUX_REMOVE
+#if !defined(CONFIG_ECORE_L2) && !defined(CONFIG_ECORE_ROCE) && \
+       !defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI)
+#define CONFIG_ECORE_L2
+#define CONFIG_ECORE_SRIOV
+
+#if 0                          /* @DPDK */
+
+#define CONFIG_ECORE_ROCE
+#define CONFIG_ECORE_FCOE
+#define CONFIG_ECORE_ISCSI
+#define CONFIG_ECORE_LL2
+
+#endif
+
+#endif
+#endif
+
+/* helpers */
+#ifndef __EXTRACT__LINUX__
+#define MASK_FIELD(_name, _value)                                      \
+               ((_value) &= (_name##_MASK))
+
+#define FIELD_VALUE(_name, _value)                                     \
+               ((_value & _name##_MASK) << _name##_SHIFT)
+
+#define SET_FIELD(value, name, flag)                                   \
+do {                                                                   \
+       (value) &= ~(name##_MASK << name##_SHIFT);                      \
+       (value) |= (((u64)flag) << (name##_SHIFT));                     \
+} while (0)
+
+#define GET_FIELD(value, name)                                         \
+       (((value) >> (name##_SHIFT)) & name##_MASK)
+#endif
+
+static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
+{
+       u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+           (cid * ECORE_PF_DEMS_SIZE);
+
+       return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn)                             \
+       ((sizeof(type_name) + (u32)(1<<(p_hwfn->p_dev->cache_shift))-1) & \
+        ~((1<<(p_hwfn->p_dev->cache_shift))-1))
+
+#ifndef U64_HI
+#define U64_HI(val) ((u32)(((u64)(val))  >> 32))
+#endif
+
+#ifndef U64_LO
+#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+#endif
+
+#ifndef __EXTRACT__LINUX__
+enum DP_LEVEL {
+       ECORE_LEVEL_VERBOSE = 0x0,
+       ECORE_LEVEL_INFO = 0x1,
+       ECORE_LEVEL_NOTICE = 0x2,
+       ECORE_LEVEL_ERR = 0x3,
+};
+
+#define ECORE_LOG_LEVEL_SHIFT  (30)
+#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
+#define ECORE_LOG_INFO_MASK    (0x40000000)
+#define ECORE_LOG_NOTICE_MASK  (0x80000000)
+
+enum DP_MODULE {
+#ifndef LINUX_REMOVE
+       ECORE_MSG_DRV = 0x0001,
+       ECORE_MSG_PROBE = 0x0002,
+       ECORE_MSG_LINK = 0x0004,
+       ECORE_MSG_TIMER = 0x0008,
+       ECORE_MSG_IFDOWN = 0x0010,
+       ECORE_MSG_IFUP = 0x0020,
+       ECORE_MSG_RX_ERR = 0x0040,
+       ECORE_MSG_TX_ERR = 0x0080,
+       ECORE_MSG_TX_QUEUED = 0x0100,
+       ECORE_MSG_INTR = 0x0200,
+       ECORE_MSG_TX_DONE = 0x0400,
+       ECORE_MSG_RX_STATUS = 0x0800,
+       ECORE_MSG_PKTDATA = 0x1000,
+       ECORE_MSG_HW = 0x2000,
+       ECORE_MSG_WOL = 0x4000,
+#endif
+       ECORE_MSG_SPQ = 0x10000,
+       ECORE_MSG_STATS = 0x20000,
+       ECORE_MSG_DCB = 0x40000,
+       ECORE_MSG_IOV = 0x80000,
+       ECORE_MSG_SP = 0x100000,
+       ECORE_MSG_STORAGE = 0x200000,
+       ECORE_MSG_CXT = 0x800000,
+       ECORE_MSG_LL2 = 0x1000000,
+       ECORE_MSG_ILT = 0x2000000,
+       ECORE_MSG_ROCE = 0x4000000,
+       ECORE_MSG_DEBUG = 0x8000000,
+       /* to be added...up to 0x8000000 */
+};
+#endif
+
+#define for_each_hwfn(p_dev, i)        for (i = 0; i < p_dev->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+       (val == (cond1) ? true1 : \
+        (val == (cond2) ? true2 : def))
+
+/* forward */
+struct ecore_ptt_pool;
+struct ecore_spq;
+struct ecore_sb_info;
+struct ecore_sb_attn_info;
+struct ecore_cxt_mngr;
+struct ecore_dma_mem;
+struct ecore_sb_sp_info;
+struct ecore_ll2_info;
+struct ecore_igu_info;
+struct ecore_mcp_info;
+struct ecore_dcbx_info;
+
+struct ecore_rt_data {
+       u32 *init_val;
+       bool *b_valid;
+};
+
+enum ecore_tunn_mode {
+       ECORE_MODE_L2GENEVE_TUNN,
+       ECORE_MODE_IPGENEVE_TUNN,
+       ECORE_MODE_L2GRE_TUNN,
+       ECORE_MODE_IPGRE_TUNN,
+       ECORE_MODE_VXLAN_TUNN,
+};
+
+enum ecore_tunn_clss {
+       ECORE_TUNN_CLSS_MAC_VLAN,
+       ECORE_TUNN_CLSS_MAC_VNI,
+       ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+       ECORE_TUNN_CLSS_INNER_MAC_VNI,
+       MAX_ECORE_TUNN_CLSS,
+};
+
+struct ecore_tunn_start_params {
+       unsigned long tunn_mode;
+       u16 vxlan_udp_port;
+       u16 geneve_udp_port;
+       u8 update_vxlan_udp_port;
+       u8 update_geneve_udp_port;
+       u8 tunn_clss_vxlan;
+       u8 tunn_clss_l2geneve;
+       u8 tunn_clss_ipgeneve;
+       u8 tunn_clss_l2gre;
+       u8 tunn_clss_ipgre;
+};
+
+struct ecore_tunn_update_params {
+       unsigned long tunn_mode_update_mask;
+       unsigned long tunn_mode;
+       u16 vxlan_udp_port;
+       u16 geneve_udp_port;
+       u8 update_rx_pf_clss;
+       u8 update_tx_pf_clss;
+       u8 update_vxlan_udp_port;
+       u8 update_geneve_udp_port;
+       u8 tunn_clss_vxlan;
+       u8 tunn_clss_l2geneve;
+       u8 tunn_clss_ipgeneve;
+       u8 tunn_clss_l2gre;
+       u8 tunn_clss_ipgre;
+};
+
+struct ecore_hw_sriov_info {
+       /* standard SRIOV capability fields, mostly for debugging */
+       int pos;                /* capability position */
+       int nres;               /* number of resources */
+       u32 cap;                /* SR-IOV Capabilities */
+       u16 ctrl;               /* SR-IOV Control */
+       u16 total_vfs;          /* total VFs associated with the PF */
+       u16 num_vfs;            /* number of vfs that have been started */
+       u64 active_vfs[3];      /* bitfield of active vfs */
+#define ECORE_IS_VF_ACTIVE(_p_dev, _rel_vf_id) \
+               (!!(_p_dev->sriov_info.active_vfs[_rel_vf_id / 64] & \
+                   (1ULL << (_rel_vf_id % 64))))
+       u16 initial_vfs;        /* initial VFs associated with the PF */
+       u16 nr_virtfn;          /* number of VFs available */
+       u16 offset;             /* first VF Routing ID offset */
+       u16 stride;             /* following VF stride */
+       u16 vf_device_id;       /* VF device id */
+       u32 pgsz;               /* page size for BAR alignment */
+       u8 link;                /* Function Dependency Link */
+
+       bool b_hw_channel;      /* Whether PF uses the HW-channel */
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE protocol
+ */
+enum ecore_pci_personality {
+       ECORE_PCI_ETH,
+       ECORE_PCI_ISCSI,
+       ECORE_PCI_ETH_ROCE,
+       ECORE_PCI_DEFAULT       /* default in shmem */
+};
+
+/* All VFs are symetric, all counters are PF + all VFs */
+struct ecore_qm_iids {
+       u32 cids;
+       u32 vf_cids;
+       u32 tids;
+};
+
+#define MAX_PF_PER_PORT 8
+
+/*@@@TBD MK RESC: need to remove and use MCP interface instead */
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum ECORE_RESOURCES {
+       ECORE_SB,
+       ECORE_L2_QUEUE,
+       ECORE_VPORT,
+       ECORE_RSS_ENG,
+       ECORE_PQ,
+       ECORE_RL,
+       ECORE_MAC,
+       ECORE_VLAN,
+       ECORE_ROCE_CNQ_RAM,
+       ECORE_ILT,
+       ECORE_LL2_QUEUE,
+       ECORE_CMDQS_CQS,
+       ECORE_MAX_RESC,
+};
+
+/* Features that require resources, given as input to the resource management
+ * algorithm, the output are the resources above
+ */
+enum ECORE_FEATURE {
+       ECORE_PF_L2_QUE,
+       ECORE_PF_TC,
+       ECORE_VF,
+       ECORE_EXTRA_VF_QUE,
+       ECORE_VMQ,
+       ECORE_ROCE_CNQ,
+       ECORE_ISCSI_CQ,
+       ECORE_MAX_FEATURES,
+};
+
+enum ECORE_PORT_MODE {
+       ECORE_PORT_MODE_DE_2X40G,
+       ECORE_PORT_MODE_DE_2X50G,
+       ECORE_PORT_MODE_DE_1X100G,
+       ECORE_PORT_MODE_DE_4X10G_F,
+       ECORE_PORT_MODE_DE_4X10G_E,
+       ECORE_PORT_MODE_DE_4X20G,
+       ECORE_PORT_MODE_DE_1X40G,
+       ECORE_PORT_MODE_DE_2X25G,
+       ECORE_PORT_MODE_DE_1X25G
+};
+
+enum ecore_dev_cap {
+       ECORE_DEV_CAP_ETH,
+       ECORE_DEV_CAP_ISCSI,
+       ECORE_DEV_CAP_ROCE,
+       ECORE_DEV_CAP_IWARP
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_hw_err_type {
+       ECORE_HW_ERR_FAN_FAIL,
+       ECORE_HW_ERR_MFW_RESP_FAIL,
+       ECORE_HW_ERR_HW_ATTN,
+       ECORE_HW_ERR_DMAE_FAIL,
+       ECORE_HW_ERR_RAMROD_FAIL,
+       ECORE_HW_ERR_FW_ASSERT,
+};
+#endif
+
+struct ecore_hw_info {
+       /* PCI personality */
+       enum ecore_pci_personality personality;
+
+       /* Resource Allocation scheme results */
+       u32 resc_start[ECORE_MAX_RESC];
+       u32 resc_num[ECORE_MAX_RESC];
+       u32 feat_num[ECORE_MAX_FEATURES];
+
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+                                        RESC_NUM(_p_hwfn, resc))
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+       u8 num_tc;
+       u8 ooo_tc;
+       u8 offload_tc;
+       u8 non_offload_tc;
+
+       u32 concrete_fid;
+       u16 opaque_fid;
+       u16 ovlan;
+       u32 part_num[4];
+
+       unsigned char hw_mac_addr[ETH_ALEN];
+
+       u16 num_iscsi_conns;
+
+       struct ecore_igu_info *p_igu_info;
+       /* Sriov */
+       u32 first_vf_in_pf;
+       u8 max_chains_per_vf;
+
+       u32 port_mode;
+       u32 hw_mode;
+       unsigned long device_capabilities;
+};
+
+struct ecore_hw_cid_data {
+       u32 cid;
+       bool b_cid_allocated;
+       u8 vfid;                /* 1-based; 0 signals this is for a PF */
+
+       /* Additional identifiers */
+       u16 opaque_fid;
+       u8 vport_id;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE       0x2000
+
+struct ecore_dmae_info {
+       /* Mutex for synchronizing access to functions */
+       osal_mutex_t mutex;
+
+       u8 channel;
+
+       dma_addr_t completion_word_phys_addr;
+
+       /* The memory location where the DMAE writes the completion
+        * value when an operation is finished on this context.
+        */
+       u32 *p_completion_word;
+
+       dma_addr_t intermediate_buffer_phys_addr;
+
+       /* An intermediate buffer for DMAE operations that use virtual
+        * addresses - data is DMA'd to/from this buffer and then
+        * memcpy'd to/from the virtual address
+        */
+       u32 *p_intermediate_buffer;
+
+       dma_addr_t dmae_cmd_phys_addr;
+       struct dmae_cmd *p_dmae_cmd;
+};
+
+struct ecore_wfq_data {
+       u32 default_min_speed;  /* When wfq feature is not configured */
+       u32 min_speed;          /* when feature is configured for any 1 vport */
+       bool configured;
+};
+
+struct ecore_qm_info {
+       struct init_qm_pq_params *qm_pq_params;
+       struct init_qm_vport_params *qm_vport_params;
+       struct init_qm_port_params *qm_port_params;
+       u16 start_pq;
+       u8 start_vport;
+       u8 pure_lb_pq;
+       u8 offload_pq;
+       u8 pure_ack_pq;
+       u8 ooo_pq;
+       u8 vf_queues_offset;
+       u16 num_pqs;
+       u16 num_vf_pqs;
+       u8 num_vports;
+       u8 max_phys_tcs_per_port;
+       bool pf_rl_en;
+       bool pf_wfq_en;
+       bool vport_rl_en;
+       bool vport_wfq_en;
+       u8 pf_wfq;
+       u32 pf_rl;
+       struct ecore_wfq_data *wfq_data;
+};
+
+struct storm_stats {
+       u32 address;
+       u32 len;
+};
+
+#define CONFIG_ECORE_BINARY_FW
+#define CONFIG_ECORE_ZIPPED_FW
+
+struct ecore_fw_data {
+#ifdef CONFIG_ECORE_BINARY_FW
+       struct fw_ver_info *fw_ver_info;
+#endif
+       const u8 *modes_tree_buf;
+       union init_op *init_ops;
+       const u32 *arr_data;
+       u32 init_ops_size;
+};
+
+struct ecore_hwfn {
+       struct ecore_dev *p_dev;
+       u8 my_id;               /* ID inside the PF */
+#define IS_LEAD_HWFN(edev)             (!((edev)->my_id))
+       u8 rel_pf_id;           /* Relative to engine */
+       u8 abs_pf_id;
+#define ECORE_PATH_ID(_p_hwfn) \
+               (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+       u8 port_id;
+       bool b_active;
+
+       u32 dp_module;
+       u8 dp_level;
+       char name[NAME_SIZE];
+       void *dp_ctx;
+
+       bool first_on_engine;
+       bool hw_init_done;
+
+       u8 num_funcs_on_engine;
+
+       /* BAR access */
+       void OSAL_IOMEM *regview;
+       void OSAL_IOMEM *doorbells;
+       u64 db_phys_addr;
+       unsigned long db_size;
+
+       /* PTT pool */
+       struct ecore_ptt_pool *p_ptt_pool;
+
+       /* HW info */
+       struct ecore_hw_info hw_info;
+
+       /* rt_array (for init-tool) */
+       struct ecore_rt_data rt_data;
+
+       /* SPQ */
+       struct ecore_spq *p_spq;
+
+       /* EQ */
+       struct ecore_eq *p_eq;
+
+       /* Consolidate Q */
+       struct ecore_consq *p_consq;
+
+       /* Slow-Path definitions */
+       osal_dpc_t sp_dpc;
+       bool b_sp_dpc_enabled;
+
+       struct ecore_ptt *p_main_ptt;
+       struct ecore_ptt *p_dpc_ptt;
+
+       struct ecore_sb_sp_info *p_sp_sb;
+       struct ecore_sb_attn_info *p_sb_attn;
+
+       /* Protocol related */
+       bool using_ll2;
+       struct ecore_ll2_info *p_ll2_info;
+       struct ecore_ooo_info *p_ooo_info;
+       struct ecore_iscsi_info *p_iscsi_info;
+       struct ecore_roce_info *p_roce_info;
+       struct ecore_pf_params pf_params;
+
+       bool b_roce_enabled_in_prs;
+
+       /* Array of sb_info of all status blocks */
+       struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+       u16 num_sbs;
+
+       struct ecore_cxt_mngr *p_cxt_mngr;
+
+       /* Flag indicating whether interrupts are enabled or not */
+       bool b_int_enabled;
+       bool b_int_requested;
+
+       /* True if the driver requests for the link */
+       bool b_drv_link_init;
+
+       struct ecore_vf_iov *vf_iov_info;
+       struct ecore_pf_iov *pf_iov_info;
+       struct ecore_mcp_info *mcp_info;
+       struct ecore_dcbx_info *p_dcbx_info;
+
+       struct ecore_hw_cid_data *p_tx_cids;
+       struct ecore_hw_cid_data *p_rx_cids;
+
+       struct ecore_dmae_info dmae_info;
+
+       /* QM init */
+       struct ecore_qm_info qm_info;
+
+       /* Buffer for unzipping firmware data */
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       void *unzip_buf;
+#endif
+
+       struct dbg_tools_data dbg_info;
+
+       struct z_stream_s *stream;
+
+       /* PWM region specific data */
+       u32 dpi_size;
+       u32 dpi_count;
+       u32 dpi_start_offset;   /* this is used to
+                                * calculate th
+                                * doorbell address
+                                */
+
+       /* RoCE */
+       u32 n_roce_qps;
+       u32 n_roce_cqs;
+
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_mf_mode {
+       ECORE_MF_DEFAULT,
+       ECORE_MF_OVLAN,
+       ECORE_MF_NPAR,
+};
+#endif
+
+struct ecore_dev {
+
+       u32 dp_module;
+       u8 dp_level;
+       char name[NAME_SIZE];
+       void *dp_ctx;
+
+       u8 type;
+#define ECORE_DEV_TYPE_BB      (0 << 0)
+#define ECORE_DEV_TYPE_AH      (1 << 0)
+/* Translate type/revision combo into the proper conditions */
+#define ECORE_IS_BB(dev)       ((dev)->type == ECORE_DEV_TYPE_BB)
+#define ECORE_IS_BB_A0(dev)    (ECORE_IS_BB(dev) && \
+                                CHIP_REV_IS_A0(dev))
+#define ECORE_IS_BB_B0(dev)    (ECORE_IS_BB(dev) && \
+                                CHIP_REV_IS_B0(dev))
+#define ECORE_IS_AH(dev)       ((dev)->type == ECORE_DEV_TYPE_AH)
+#define ECORE_IS_K2(dev)       ECORE_IS_AH(dev)
+#define ECORE_GET_TYPE(dev)    (ECORE_IS_BB_A0(dev) ? CHIP_BB_A0 : \
+                                ECORE_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
+
+       u16 vendor_id;
+       u16 device_id;
+
+       u16 chip_num;
+#define CHIP_NUM_MASK                  0xffff
+#define CHIP_NUM_SHIFT                 16
+
+       u16 chip_rev;
+#define CHIP_REV_MASK                  0xf
+#define CHIP_REV_SHIFT                 12
+#ifndef ASIC_ONLY
+#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+                                         CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
+                                         CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+               (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+               (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+                CHIP_REV_IS_FPGA_A0(_p_dev) || \
+                !(_p_dev)->chip_rev)
+#define CHIP_REV_IS_B0(_p_dev) \
+               (CHIP_REV_IS_EMUL_B0(_p_dev) || \
+                CHIP_REV_IS_FPGA_B0(_p_dev) || \
+                (_p_dev)->chip_rev == 1)
+#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev))
+#else
+#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
+#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+#endif
+
+       u16 chip_metal;
+#define CHIP_METAL_MASK                        0xff
+#define CHIP_METAL_SHIFT               4
+
+       u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK              0xf
+#define CHIP_BOND_ID_SHIFT             0
+
+       u8 num_engines;
+       u8 num_ports_in_engines;
+       u8 num_funcs_in_port;
+
+       u8 path_id;
+       enum ecore_mf_mode mf_mode;
+#define IS_MF_DEFAULT(_p_hwfn) \
+               (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn)      (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn)      (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+
+       int pcie_width;
+       int pcie_speed;
+       u8 ver_str[VER_SIZE];
+       /* Add MF related configuration */
+       u8 mcp_rev;
+       u8 boot_mode;
+
+       u8 wol;
+
+       u32 int_mode;
+       enum ecore_coalescing_mode int_coalescing_mode;
+       u8 rx_coalesce_usecs;
+       u8 tx_coalesce_usecs;
+
+       /* Start Bar offset of first hwfn */
+       void OSAL_IOMEM *regview;
+       void OSAL_IOMEM *doorbells;
+       u64 db_phys_addr;
+       unsigned long db_size;
+
+       /* PCI */
+       u8 cache_shift;
+
+       /* Init */
+       const struct iro *iro_arr;
+#define IRO (p_hwfn->p_dev->iro_arr)
+
+       /* HW functions */
+       u8 num_hwfns;
+       struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+       /* SRIOV */
+       struct ecore_hw_sriov_info sriov_info;
+       unsigned long tunn_mode;
+#define IS_ECORE_SRIOV(edev)           (!!((edev)->sriov_info.total_vfs))
+       bool b_is_vf;
+
+       u32 drv_type;
+
+       u32 roce_max_sge;
+       u32 roce_max_inline;
+
+       struct ecore_eth_stats *reset_stats;
+       struct ecore_fw_data *fw_data;
+
+       u32 mcp_nvm_resp;
+
+       /* Recovery */
+       bool recov_in_prog;
+
+#ifndef ASIC_ONLY
+       bool b_is_emul_full;
+#endif
+
+       void *firmware;
+
+       u64 fw_len;
+
+};
+
+#define NUM_OF_VFS(dev)                (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
+                                                 : MAX_NUM_VFS_K2)
+#define NUM_OF_L2_QUEUES(dev)  (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
+                                                 : MAX_NUM_L2_QUEUES_K2)
+#define NUM_OF_PORTS(dev)      (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
+                                                 : MAX_NUM_PORTS_K2)
+#define NUM_OF_SBS(dev)                (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
+                                                 : MAX_SB_PER_PATH_K2)
+#define NUM_OF_ENG_PFS(dev)    (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
+                                                 : MAX_NUM_PFS_K2)
+
+#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
+       (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
+       (ECORE_PATH_ID(p_hwfn) == 1) && \
+       ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
+        (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
+        (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
+
+/**
+ * @brief ecore_concrete_to_sw_fid - get the sw function id from
+ *        the concrete value.
+ *
+ * @param concrete_fid
+ *
+ * @return OSAL_INLINE u8
+ */
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
+                                              u32 concrete_fid)
+{
+       u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+       u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+       u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
+       u8 sw_fid;
+
+       if (vf_valid)
+               sw_fid = vfid + MAX_NUM_PFS;
+       else
+               sw_fid = pfid;
+
+       return sw_fid;
+}
+
+#define PURE_LB_TC 8
+#define OOO_LB_TC 9
+
+static OSAL_INLINE u16 ecore_sriov_get_next_vf(struct ecore_hwfn *p_hwfn,
+                                              u16 rel_vf_id)
+{
+       u16 i;
+
+       for (i = rel_vf_id; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+               if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, i))
+                       return i;
+
+       return p_hwfn->p_dev->sriov_info.total_vfs;
+}
+
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+                                          u32 min_pf_rate);
+
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+int ecore_device_num_engines(struct ecore_dev *p_dev);
+int ecore_device_num_ports(struct ecore_dev *p_dev);
+
+#define ecore_for_each_vf(_p_hwfn, _i)                         \
+       for (_i = ecore_sriov_get_next_vf(_p_hwfn, 0);          \
+            _i < _p_hwfn->p_dev->sriov_info.total_vfs;         \
+            _i = ecore_sriov_get_next_vf(_p_hwfn, _i + 1))
+
+#define ECORE_LEADING_HWFN(dev)        (&dev->hwfns[0])
+
+#endif /* __ECORE_H */
diff --git a/drivers/net/qede/ecore/ecore_attn_values.h 
b/drivers/net/qede/ecore/ecore_attn_values.h
new file mode 100644
index 0000000..8bd2ba7
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_attn_values.h
@@ -0,0 +1,13287 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ATTN_VALUES_H__
+#define __ATTN_VALUES_H__
+
+#ifndef __PREVENT_INT_ATTN__
+
+/* HW Attention register */
+struct attn_hw_reg {
+       u16 reg_idx;            /* Index of this register in its block */
+       u16 num_of_bits;        /* number of valid attention bits */
+       const u16 *bit_attn_idx;        /* attention index per valid bit */
+       u32 sts_addr;           /* Address of the STS register */
+       u32 sts_clr_addr;       /* Address of the STS_CLR register */
+       u32 sts_wr_addr;        /* Address of the STS_WR register */
+       u32 mask_addr;          /* Address of the MASK register */
+};
+
+/* HW block attention registers */
+struct attn_hw_regs {
+       u16 num_of_int_regs;    /* Number of interrupt regs */
+       u16 num_of_prty_regs;   /* Number of parity regs */
+       struct attn_hw_reg **int_regs;  /* interrupt regs */
+       struct attn_hw_reg **prty_regs; /* parity regs */
+};
+
+/* HW block attention registers */
+struct attn_hw_block {
+       const char *name;       /* Block name */
+       const char **int_desc;  /* Array of interrupt attention descriptions */
+       const char **prty_desc; /* Array of parity attention descriptions */
+       struct attn_hw_regs chip_regs[3];       /* attention regs per chip.*/
+};
+
+#ifdef ATTN_DESC
+static const char *grc_int_attn_desc[5] = {
+       "grc_address_error",
+       "grc_timeout_event",
+       "grc_global_reserved_address",
+       "grc_path_isolation_error",
+       "grc_trace_fifo_valid_data",
+};
+#else
+#define grc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_int0_bb_a0_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_a0 = {
+       0, 4, grc_int0_bb_a0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_a0_regs[1] = {
+       &grc_int0_bb_a0,
+};
+
+static const u16 grc_int0_bb_b0_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg grc_int0_bb_b0 = {
+       0, 4, grc_int0_bb_b0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_bb_b0_regs[1] = {
+       &grc_int0_bb_b0,
+};
+
+static const u16 grc_int0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg grc_int0_k2 = {
+       0, 5, grc_int0_k2_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184
+};
+
+static struct attn_hw_reg *grc_int_k2_regs[1] = {
+       &grc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *grc_prty_attn_desc[3] = {
+       "grc_mem003_i_mem_prty",
+       "grc_mem002_i_mem_prty",
+       "grc_mem001_i_mem_prty",
+};
+#else
+#define grc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 grc_prty1_bb_a0_attn_idx[2] = {
+       1, 2,
+};
+
+static struct attn_hw_reg grc_prty1_bb_a0 = {
+       0, 2, grc_prty1_bb_a0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_a0_regs[1] = {
+       &grc_prty1_bb_a0,
+};
+
+static const u16 grc_prty1_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_bb_b0 = {
+       0, 2, grc_prty1_bb_b0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = {
+       &grc_prty1_bb_b0,
+};
+
+static const u16 grc_prty1_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg grc_prty1_k2 = {
+       0, 2, grc_prty1_k2_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204
+};
+
+static struct attn_hw_reg *grc_prty_k2_regs[1] = {
+       &grc_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_int_attn_desc[14] = {
+       "miscs_address_error",
+       "miscs_generic_sw",
+       "miscs_cnig_interrupt",
+       "miscs_opte_dorq_fifo_err_eng1",
+       "miscs_opte_dorq_fifo_err_eng0",
+       "miscs_opte_dbg_fifo_err_eng1",
+       "miscs_opte_dbg_fifo_err_eng0",
+       "miscs_opte_btb_if1_fifo_err_eng1",
+       "miscs_opte_btb_if1_fifo_err_eng0",
+       "miscs_opte_btb_if0_fifo_err_eng1",
+       "miscs_opte_btb_if0_fifo_err_eng0",
+       "miscs_opte_btb_sop_fifo_err_eng1",
+       "miscs_opte_btb_sop_fifo_err_eng0",
+       "miscs_opte_storm_fifo_err_eng0",
+};
+#else
+#define miscs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_int0_bb_a0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg miscs_int0_bb_a0 = {
+       0, 2, miscs_int0_bb_a0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_a0_attn_idx[11] = {
+       3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_a0 = {
+       1, 11, miscs_int1_bb_a0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_a0_regs[2] = {
+       &miscs_int0_bb_a0, &miscs_int1_bb_a0,
+};
+
+static const u16 miscs_int0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_bb_b0 = {
+       0, 3, miscs_int0_bb_b0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static const u16 miscs_int1_bb_b0_attn_idx[11] = {
+       3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg miscs_int1_bb_b0 = {
+       1, 11, miscs_int1_bb_b0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194
+};
+
+static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = {
+       &miscs_int0_bb_b0, &miscs_int1_bb_b0,
+};
+
+static const u16 miscs_int0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg miscs_int0_k2 = {
+       0, 3, miscs_int0_k2_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184
+};
+
+static struct attn_hw_reg *miscs_int_k2_regs[1] = {
+       &miscs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *miscs_prty_attn_desc[1] = {
+       "miscs_cnig_parity",
+};
+#else
+#define miscs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 miscs_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg miscs_prty0_bb_b0 = {
+       0, 1, miscs_prty0_bb_b0_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = {
+       &miscs_prty0_bb_b0,
+};
+
+static const u16 miscs_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg miscs_prty0_k2 = {
+       0, 1, miscs_prty0_k2_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4
+};
+
+static struct attn_hw_reg *miscs_prty_k2_regs[1] = {
+       &miscs_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *misc_int_attn_desc[1] = {
+       "misc_address_error",
+};
+#else
+#define misc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 misc_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg misc_int0_bb_a0 = {
+       0, 1, misc_int0_bb_a0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_a0_regs[1] = {
+       &misc_int0_bb_a0,
+};
+
+static const u16 misc_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg misc_int0_bb_b0 = {
+       0, 1, misc_int0_bb_b0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_bb_b0_regs[1] = {
+       &misc_int0_bb_b0,
+};
+
+static const u16 misc_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg misc_int0_k2 = {
+       0, 1, misc_int0_k2_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184
+};
+
+static struct attn_hw_reg *misc_int_k2_regs[1] = {
+       &misc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_int_attn_desc[24] = {
+       "pglue_b_address_error",
+       "pglue_b_incorrect_rcv_behavior",
+       "pglue_b_was_error_attn",
+       "pglue_b_vf_length_violation_attn",
+       "pglue_b_vf_grc_space_violation_attn",
+       "pglue_b_tcpl_error_attn",
+       "pglue_b_tcpl_in_two_rcbs_attn",
+       "pglue_b_cssnoop_fifo_overflow",
+       "pglue_b_tcpl_translation_size_different",
+       "pglue_b_pcie_rx_l0s_timeout",
+       "pglue_b_master_zlr_attn",
+       "pglue_b_admin_window_violation_attn",
+       "pglue_b_out_of_range_function_in_pretend",
+       "pglue_b_illegal_address",
+       "pglue_b_pgl_cpl_err",
+       "pglue_b_pgl_txw_of",
+       "pglue_b_pgl_cpl_aft",
+       "pglue_b_pgl_cpl_of",
+       "pglue_b_pgl_cpl_ecrc",
+       "pglue_b_pgl_pcie_attn",
+       "pglue_b_pgl_read_blocked",
+       "pglue_b_pgl_write_blocked",
+       "pglue_b_vf_ilt_err",
+       "pglue_b_rxobffexception_attn",
+};
+#else
+#define pglue_b_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_int0_bb_a0_attn_idx[23] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_a0 = {
+       0, 23, pglue_b_int0_bb_a0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+       0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_a0_regs[1] = {
+       &pglue_b_int0_bb_a0,
+};
+
+static const u16 pglue_b_int0_bb_b0_attn_idx[23] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22,
+};
+
+static struct attn_hw_reg pglue_b_int0_bb_b0 = {
+       0, 23, pglue_b_int0_bb_b0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188,
+       0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = {
+       &pglue_b_int0_bb_b0,
+};
+
+static const u16 pglue_b_int0_k2_attn_idx[24] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23,
+};
+
+static struct attn_hw_reg pglue_b_int0_k2 = {
+       0, 24, pglue_b_int0_k2_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184
+};
+
+static struct attn_hw_reg *pglue_b_int_k2_regs[1] = {
+       &pglue_b_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglue_b_prty_attn_desc[35] = {
+       "pglue_b_datapath_registers",
+       "pglue_b_mem027_i_mem_prty",
+       "pglue_b_mem007_i_mem_prty",
+       "pglue_b_mem009_i_mem_prty",
+       "pglue_b_mem010_i_mem_prty",
+       "pglue_b_mem008_i_mem_prty",
+       "pglue_b_mem022_i_mem_prty",
+       "pglue_b_mem023_i_mem_prty",
+       "pglue_b_mem024_i_mem_prty",
+       "pglue_b_mem025_i_mem_prty",
+       "pglue_b_mem004_i_mem_prty",
+       "pglue_b_mem005_i_mem_prty",
+       "pglue_b_mem011_i_mem_prty",
+       "pglue_b_mem016_i_mem_prty",
+       "pglue_b_mem017_i_mem_prty",
+       "pglue_b_mem012_i_mem_prty",
+       "pglue_b_mem013_i_mem_prty",
+       "pglue_b_mem014_i_mem_prty",
+       "pglue_b_mem015_i_mem_prty",
+       "pglue_b_mem018_i_mem_prty",
+       "pglue_b_mem020_i_mem_prty",
+       "pglue_b_mem021_i_mem_prty",
+       "pglue_b_mem019_i_mem_prty",
+       "pglue_b_mem026_i_mem_prty",
+       "pglue_b_mem006_i_mem_prty",
+       "pglue_b_mem003_i_mem_prty",
+       "pglue_b_mem002_i_mem_prty_0",
+       "pglue_b_mem002_i_mem_prty_1",
+       "pglue_b_mem002_i_mem_prty_2",
+       "pglue_b_mem002_i_mem_prty_3",
+       "pglue_b_mem002_i_mem_prty_4",
+       "pglue_b_mem002_i_mem_prty_5",
+       "pglue_b_mem002_i_mem_prty_6",
+       "pglue_b_mem002_i_mem_prty_7",
+       "pglue_b_mem001_i_mem_prty",
+};
+#else
+#define pglue_b_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglue_b_prty1_bb_a0_attn_idx[22] = {
+       2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+       32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_a0 = {
+       0, 22, pglue_b_prty1_bb_a0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+       0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_a0_regs[1] = {
+       &pglue_b_prty1_bb_a0,
+};
+
+static const u16 pglue_b_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_bb_b0 = {
+       0, 1, pglue_b_prty0_bb_b0_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198,
+       0x2a8194
+};
+
+static const u16 pglue_b_prty1_bb_b0_attn_idx[22] = {
+       2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31,
+       32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty1_bb_b0 = {
+       1, 22, pglue_b_prty1_bb_b0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+       0x2a8204
+};
+
+static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = {
+       &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0,
+};
+
+static const u16 pglue_b_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pglue_b_prty0_k2 = {
+       0, 1, pglue_b_prty0_k2_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194
+};
+
+static const u16 pglue_b_prty1_k2_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pglue_b_prty1_k2 = {
+       1, 31, pglue_b_prty1_k2_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208,
+       0x2a8204
+};
+
+static const u16 pglue_b_prty2_k2_attn_idx[3] = {
+       32, 33, 34,
+};
+
+static struct attn_hw_reg pglue_b_prty2_k2 = {
+       2, 3, pglue_b_prty2_k2_attn_idx, 0x2a8210, 0x2a821c, 0x2a8218, 0x2a8214
+};
+
+static struct attn_hw_reg *pglue_b_prty_k2_regs[3] = {
+       &pglue_b_prty0_k2, &pglue_b_prty1_k2, &pglue_b_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_int_attn_desc[10] = {
+       "cnig_address_error",
+       "cnig_tx_illegal_sop_port0",
+       "cnig_tx_illegal_sop_port1",
+       "cnig_tx_illegal_sop_port2",
+       "cnig_tx_illegal_sop_port3",
+       "cnig_tdm_lane_0_bandwith_exceed",
+       "cnig_tdm_lane_1_bandwith_exceed",
+       "cnig_pmeg_intr",
+       "cnig_pmfc_intr",
+       "cnig_fifo_error",
+};
+#else
+#define cnig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_int0_bb_a0_attn_idx[4] = {
+       0, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_a0 = {
+       0, 4, cnig_int0_bb_a0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_a0_regs[1] = {
+       &cnig_int0_bb_a0,
+};
+
+static const u16 cnig_int0_bb_b0_attn_idx[6] = {
+       0, 1, 3, 7, 8, 9,
+};
+
+static struct attn_hw_reg cnig_int0_bb_b0 = {
+       0, 6, cnig_int0_bb_b0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec
+};
+
+static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = {
+       &cnig_int0_bb_b0,
+};
+
+static const u16 cnig_int0_k2_attn_idx[7] = {
+       0, 1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg cnig_int0_k2 = {
+       0, 7, cnig_int0_k2_attn_idx, 0x218218, 0x218224, 0x218220, 0x21821c
+};
+
+static struct attn_hw_reg *cnig_int_k2_regs[1] = {
+       &cnig_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cnig_prty_attn_desc[3] = {
+       "cnig_unused_0",
+       "cnig_datapath_tx",
+       "cnig_datapath_rx",
+};
+#else
+#define cnig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cnig_prty0_bb_b0_attn_idx[2] = {
+       1, 2,
+};
+
+static struct attn_hw_reg cnig_prty0_bb_b0 = {
+       0, 2, cnig_prty0_bb_b0_attn_idx, 0x218348, 0x218354, 0x218350, 0x21834c
+};
+
+static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = {
+       &cnig_prty0_bb_b0,
+};
+
+static const u16 cnig_prty0_k2_attn_idx[1] = {
+       1,
+};
+
+static struct attn_hw_reg cnig_prty0_k2 = {
+       0, 1, cnig_prty0_k2_attn_idx, 0x21822c, 0x218238, 0x218234, 0x218230
+};
+
+static struct attn_hw_reg *cnig_prty_k2_regs[1] = {
+       &cnig_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cpmu_int_attn_desc[1] = {
+       "cpmu_address_error",
+};
+#else
+#define cpmu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cpmu_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_a0 = {
+       0, 1, cpmu_int0_bb_a0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_a0_regs[1] = {
+       &cpmu_int0_bb_a0,
+};
+
+static const u16 cpmu_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg cpmu_int0_bb_b0 = {
+       0, 1, cpmu_int0_bb_b0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = {
+       &cpmu_int0_bb_b0,
+};
+
+static const u16 cpmu_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg cpmu_int0_k2 = {
+       0, 1, cpmu_int0_k2_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4
+};
+
+static struct attn_hw_reg *cpmu_int_k2_regs[1] = {
+       &cpmu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_int_attn_desc[1] = {
+       "ncsi_address_error",
+};
+#else
+#define ncsi_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_a0 = {
+       0, 1, ncsi_int0_bb_a0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_a0_regs[1] = {
+       &ncsi_int0_bb_a0,
+};
+
+static const u16 ncsi_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ncsi_int0_bb_b0 = {
+       0, 1, ncsi_int0_bb_b0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = {
+       &ncsi_int0_bb_b0,
+};
+
+static const u16 ncsi_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ncsi_int0_k2 = {
+       0, 1, ncsi_int0_k2_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0
+};
+
+static struct attn_hw_reg *ncsi_int_k2_regs[1] = {
+       &ncsi_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ncsi_prty_attn_desc[1] = {
+       "ncsi_mem002_i_mem_prty",
+};
+#else
+#define ncsi_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ncsi_prty1_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_a0 = {
+       0, 1, ncsi_prty1_bb_a0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_a0_regs[1] = {
+       &ncsi_prty1_bb_a0,
+};
+
+static const u16 ncsi_prty1_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ncsi_prty1_bb_b0 = {
+       0, 1, ncsi_prty1_bb_b0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = {
+       &ncsi_prty1_bb_b0,
+};
+
+static const u16 ncsi_prty1_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ncsi_prty1_k2 = {
+       0, 1, ncsi_prty1_k2_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004
+};
+
+static struct attn_hw_reg *ncsi_prty_k2_regs[1] = {
+       &ncsi_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *opte_prty_attn_desc[12] = {
+       "opte_mem009_i_mem_prty",
+       "opte_mem010_i_mem_prty",
+       "opte_mem005_i_mem_prty",
+       "opte_mem006_i_mem_prty",
+       "opte_mem007_i_mem_prty",
+       "opte_mem008_i_mem_prty",
+       "opte_mem001_i_mem_prty",
+       "opte_mem002_i_mem_prty",
+       "opte_mem003_i_mem_prty",
+       "opte_mem004_i_mem_prty",
+       "opte_mem011_i_mem_prty",
+       "opte_datapath_parity_error",
+};
+#else
+#define opte_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 opte_prty1_bb_a0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_a0 = {
+       0, 11, opte_prty1_bb_a0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static struct attn_hw_reg *opte_prty_bb_a0_regs[1] = {
+       &opte_prty1_bb_a0,
+};
+
+static const u16 opte_prty1_bb_b0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_bb_b0 = {
+       0, 11, opte_prty1_bb_b0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_bb_b0_attn_idx[1] = {
+       11,
+};
+
+static struct attn_hw_reg opte_prty0_bb_b0 = {
+       1, 1, opte_prty0_bb_b0_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = {
+       &opte_prty1_bb_b0, &opte_prty0_bb_b0,
+};
+
+static const u16 opte_prty1_k2_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg opte_prty1_k2 = {
+       0, 11, opte_prty1_k2_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004
+};
+
+static const u16 opte_prty0_k2_attn_idx[1] = {
+       11,
+};
+
+static struct attn_hw_reg opte_prty0_k2 = {
+       1, 1, opte_prty0_k2_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c
+};
+
+static struct attn_hw_reg *opte_prty_k2_regs[2] = {
+       &opte_prty1_k2, &opte_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_int_attn_desc[297] = {
+       "bmb_address_error",
+       "bmb_rc_pkt0_rls_error",
+       "bmb_unused_0",
+       "bmb_rc_pkt0_protocol_error",
+       "bmb_rc_pkt1_rls_error",
+       "bmb_unused_1",
+       "bmb_rc_pkt1_protocol_error",
+       "bmb_rc_pkt2_rls_error",
+       "bmb_unused_2",
+       "bmb_rc_pkt2_protocol_error",
+       "bmb_rc_pkt3_rls_error",
+       "bmb_unused_3",
+       "bmb_rc_pkt3_protocol_error",
+       "bmb_rc_sop_req_tc_port_error",
+       "bmb_unused_4",
+       "bmb_wc0_protocol_error",
+       "bmb_wc1_protocol_error",
+       "bmb_wc2_protocol_error",
+       "bmb_wc3_protocol_error",
+       "bmb_unused_5",
+       "bmb_ll_blk_error",
+       "bmb_unused_6",
+       "bmb_mac0_fc_cnt_error",
+       "bmb_ll_arb_calc_error",
+       "bmb_wc0_inp_fifo_error",
+       "bmb_wc0_sop_fifo_error",
+       "bmb_wc0_len_fifo_error",
+       "bmb_wc0_queue_fifo_error",
+       "bmb_wc0_free_point_fifo_error",
+       "bmb_wc0_next_point_fifo_error",
+       "bmb_wc0_strt_fifo_error",
+       "bmb_wc0_second_dscr_fifo_error",
+       "bmb_wc0_pkt_avail_fifo_error",
+       "bmb_wc0_cos_cnt_fifo_error",
+       "bmb_wc0_notify_fifo_error",
+       "bmb_wc0_ll_req_fifo_error",
+       "bmb_wc0_ll_pa_cnt_error",
+       "bmb_wc0_bb_pa_cnt_error",
+       "bmb_wc1_inp_fifo_error",
+       "bmb_wc1_sop_fifo_error",
+       "bmb_wc1_queue_fifo_error",
+       "bmb_wc1_free_point_fifo_error",
+       "bmb_wc1_next_point_fifo_error",
+       "bmb_wc1_strt_fifo_error",
+       "bmb_wc1_second_dscr_fifo_error",
+       "bmb_wc1_pkt_avail_fifo_error",
+       "bmb_wc1_cos_cnt_fifo_error",
+       "bmb_wc1_notify_fifo_error",
+       "bmb_wc1_ll_req_fifo_error",
+       "bmb_wc1_ll_pa_cnt_error",
+       "bmb_wc1_bb_pa_cnt_error",
+       "bmb_wc2_inp_fifo_error",
+       "bmb_wc2_sop_fifo_error",
+       "bmb_wc2_queue_fifo_error",
+       "bmb_wc2_free_point_fifo_error",
+       "bmb_wc2_next_point_fifo_error",
+       "bmb_wc2_strt_fifo_error",
+       "bmb_wc2_second_dscr_fifo_error",
+       "bmb_wc2_pkt_avail_fifo_error",
+       "bmb_wc2_cos_cnt_fifo_error",
+       "bmb_wc2_notify_fifo_error",
+       "bmb_wc2_ll_req_fifo_error",
+       "bmb_wc2_ll_pa_cnt_error",
+       "bmb_wc2_bb_pa_cnt_error",
+       "bmb_wc3_inp_fifo_error",
+       "bmb_wc3_sop_fifo_error",
+       "bmb_wc3_queue_fifo_error",
+       "bmb_wc3_free_point_fifo_error",
+       "bmb_wc3_next_point_fifo_error",
+       "bmb_wc3_strt_fifo_error",
+       "bmb_wc3_second_dscr_fifo_error",
+       "bmb_wc3_pkt_avail_fifo_error",
+       "bmb_wc3_cos_cnt_fifo_error",
+       "bmb_wc3_notify_fifo_error",
+       "bmb_wc3_ll_req_fifo_error",
+       "bmb_wc3_ll_pa_cnt_error",
+       "bmb_wc3_bb_pa_cnt_error",
+       "bmb_rc_pkt0_side_fifo_error",
+       "bmb_rc_pkt0_req_fifo_error",
+       "bmb_rc_pkt0_blk_fifo_error",
+       "bmb_rc_pkt0_rls_left_fifo_error",
+       "bmb_rc_pkt0_strt_ptr_fifo_error",
+       "bmb_rc_pkt0_second_ptr_fifo_error",
+       "bmb_rc_pkt0_rsp_fifo_error",
+       "bmb_rc_pkt0_dscr_fifo_error",
+       "bmb_rc_pkt1_side_fifo_error",
+       "bmb_rc_pkt1_req_fifo_error",
+       "bmb_rc_pkt1_blk_fifo_error",
+       "bmb_rc_pkt1_rls_left_fifo_error",
+       "bmb_rc_pkt1_strt_ptr_fifo_error",
+       "bmb_rc_pkt1_second_ptr_fifo_error",
+       "bmb_rc_pkt1_rsp_fifo_error",
+       "bmb_rc_pkt1_dscr_fifo_error",
+       "bmb_rc_pkt2_side_fifo_error",
+       "bmb_rc_pkt2_req_fifo_error",
+       "bmb_rc_pkt2_blk_fifo_error",
+       "bmb_rc_pkt2_rls_left_fifo_error",
+       "bmb_rc_pkt2_strt_ptr_fifo_error",
+       "bmb_rc_pkt2_second_ptr_fifo_error",
+       "bmb_rc_pkt2_rsp_fifo_error",
+       "bmb_rc_pkt2_dscr_fifo_error",
+       "bmb_rc_pkt3_side_fifo_error",
+       "bmb_rc_pkt3_req_fifo_error",
+       "bmb_rc_pkt3_blk_fifo_error",
+       "bmb_rc_pkt3_rls_left_fifo_error",
+       "bmb_rc_pkt3_strt_ptr_fifo_error",
+       "bmb_rc_pkt3_second_ptr_fifo_error",
+       "bmb_rc_pkt3_rsp_fifo_error",
+       "bmb_rc_pkt3_dscr_fifo_error",
+       "bmb_rc_sop_strt_fifo_error",
+       "bmb_rc_sop_req_fifo_error",
+       "bmb_rc_sop_dscr_fifo_error",
+       "bmb_rc_sop_queue_fifo_error",
+       "bmb_ll_arb_rls_fifo_error",
+       "bmb_ll_arb_prefetch_fifo_error",
+       "bmb_rc_pkt0_rls_fifo_error",
+       "bmb_rc_pkt1_rls_fifo_error",
+       "bmb_rc_pkt2_rls_fifo_error",
+       "bmb_rc_pkt3_rls_fifo_error",
+       "bmb_rc_pkt4_rls_fifo_error",
+       "bmb_rc_pkt5_rls_fifo_error",
+       "bmb_rc_pkt6_rls_fifo_error",
+       "bmb_rc_pkt7_rls_fifo_error",
+       "bmb_rc_pkt8_rls_fifo_error",
+       "bmb_rc_pkt9_rls_fifo_error",
+       "bmb_rc_pkt4_rls_error",
+       "bmb_rc_pkt4_protocol_error",
+       "bmb_rc_pkt4_side_fifo_error",
+       "bmb_rc_pkt4_req_fifo_error",
+       "bmb_rc_pkt4_blk_fifo_error",
+       "bmb_rc_pkt4_rls_left_fifo_error",
+       "bmb_rc_pkt4_strt_ptr_fifo_error",
+       "bmb_rc_pkt4_second_ptr_fifo_error",
+       "bmb_rc_pkt4_rsp_fifo_error",
+       "bmb_rc_pkt4_dscr_fifo_error",
+       "bmb_rc_pkt5_rls_error",
+       "bmb_rc_pkt5_protocol_error",
+       "bmb_rc_pkt5_side_fifo_error",
+       "bmb_rc_pkt5_req_fifo_error",
+       "bmb_rc_pkt5_blk_fifo_error",
+       "bmb_rc_pkt5_rls_left_fifo_error",
+       "bmb_rc_pkt5_strt_ptr_fifo_error",
+       "bmb_rc_pkt5_second_ptr_fifo_error",
+       "bmb_rc_pkt5_rsp_fifo_error",
+       "bmb_rc_pkt5_dscr_fifo_error",
+       "bmb_rc_pkt6_rls_error",
+       "bmb_rc_pkt6_protocol_error",
+       "bmb_rc_pkt6_side_fifo_error",
+       "bmb_rc_pkt6_req_fifo_error",
+       "bmb_rc_pkt6_blk_fifo_error",
+       "bmb_rc_pkt6_rls_left_fifo_error",
+       "bmb_rc_pkt6_strt_ptr_fifo_error",
+       "bmb_rc_pkt6_second_ptr_fifo_error",
+       "bmb_rc_pkt6_rsp_fifo_error",
+       "bmb_rc_pkt6_dscr_fifo_error",
+       "bmb_rc_pkt7_rls_error",
+       "bmb_rc_pkt7_protocol_error",
+       "bmb_rc_pkt7_side_fifo_error",
+       "bmb_rc_pkt7_req_fifo_error",
+       "bmb_rc_pkt7_blk_fifo_error",
+       "bmb_rc_pkt7_rls_left_fifo_error",
+       "bmb_rc_pkt7_strt_ptr_fifo_error",
+       "bmb_rc_pkt7_second_ptr_fifo_error",
+       "bmb_rc_pkt7_rsp_fifo_error",
+       "bmb_packet_available_sync_fifo_push_error",
+       "bmb_rc_pkt8_rls_error",
+       "bmb_rc_pkt8_protocol_error",
+       "bmb_rc_pkt8_side_fifo_error",
+       "bmb_rc_pkt8_req_fifo_error",
+       "bmb_rc_pkt8_blk_fifo_error",
+       "bmb_rc_pkt8_rls_left_fifo_error",
+       "bmb_rc_pkt8_strt_ptr_fifo_error",
+       "bmb_rc_pkt8_second_ptr_fifo_error",
+       "bmb_rc_pkt8_rsp_fifo_error",
+       "bmb_rc_pkt8_dscr_fifo_error",
+       "bmb_rc_pkt9_rls_error",
+       "bmb_rc_pkt9_protocol_error",
+       "bmb_rc_pkt9_side_fifo_error",
+       "bmb_rc_pkt9_req_fifo_error",
+       "bmb_rc_pkt9_blk_fifo_error",
+       "bmb_rc_pkt9_rls_left_fifo_error",
+       "bmb_rc_pkt9_strt_ptr_fifo_error",
+       "bmb_rc_pkt9_second_ptr_fifo_error",
+       "bmb_rc_pkt9_rsp_fifo_error",
+       "bmb_rc_pkt9_dscr_fifo_error",
+       "bmb_wc4_protocol_error",
+       "bmb_wc5_protocol_error",
+       "bmb_wc6_protocol_error",
+       "bmb_wc7_protocol_error",
+       "bmb_wc8_protocol_error",
+       "bmb_wc9_protocol_error",
+       "bmb_wc4_inp_fifo_error",
+       "bmb_wc4_sop_fifo_error",
+       "bmb_wc4_queue_fifo_error",
+       "bmb_wc4_free_point_fifo_error",
+       "bmb_wc4_next_point_fifo_error",
+       "bmb_wc4_strt_fifo_error",
+       "bmb_wc4_second_dscr_fifo_error",
+       "bmb_wc4_pkt_avail_fifo_error",
+       "bmb_wc4_cos_cnt_fifo_error",
+       "bmb_wc4_notify_fifo_error",
+       "bmb_wc4_ll_req_fifo_error",
+       "bmb_wc4_ll_pa_cnt_error",
+       "bmb_wc4_bb_pa_cnt_error",
+       "bmb_wc5_inp_fifo_error",
+       "bmb_wc5_sop_fifo_error",
+       "bmb_wc5_queue_fifo_error",
+       "bmb_wc5_free_point_fifo_error",
+       "bmb_wc5_next_point_fifo_error",
+       "bmb_wc5_strt_fifo_error",
+       "bmb_wc5_second_dscr_fifo_error",
+       "bmb_wc5_pkt_avail_fifo_error",
+       "bmb_wc5_cos_cnt_fifo_error",
+       "bmb_wc5_notify_fifo_error",
+       "bmb_wc5_ll_req_fifo_error",
+       "bmb_wc5_ll_pa_cnt_error",
+       "bmb_wc5_bb_pa_cnt_error",
+       "bmb_wc6_inp_fifo_error",
+       "bmb_wc6_sop_fifo_error",
+       "bmb_wc6_queue_fifo_error",
+       "bmb_wc6_free_point_fifo_error",
+       "bmb_wc6_next_point_fifo_error",
+       "bmb_wc6_strt_fifo_error",
+       "bmb_wc6_second_dscr_fifo_error",
+       "bmb_wc6_pkt_avail_fifo_error",
+       "bmb_wc6_cos_cnt_fifo_error",
+       "bmb_wc6_notify_fifo_error",
+       "bmb_wc6_ll_req_fifo_error",
+       "bmb_wc6_ll_pa_cnt_error",
+       "bmb_wc6_bb_pa_cnt_error",
+       "bmb_wc7_inp_fifo_error",
+       "bmb_wc7_sop_fifo_error",
+       "bmb_wc7_queue_fifo_error",
+       "bmb_wc7_free_point_fifo_error",
+       "bmb_wc7_next_point_fifo_error",
+       "bmb_wc7_strt_fifo_error",
+       "bmb_wc7_second_dscr_fifo_error",
+       "bmb_wc7_pkt_avail_fifo_error",
+       "bmb_wc7_cos_cnt_fifo_error",
+       "bmb_wc7_notify_fifo_error",
+       "bmb_wc7_ll_req_fifo_error",
+       "bmb_wc7_ll_pa_cnt_error",
+       "bmb_wc7_bb_pa_cnt_error",
+       "bmb_wc8_inp_fifo_error",
+       "bmb_wc8_sop_fifo_error",
+       "bmb_wc8_queue_fifo_error",
+       "bmb_wc8_free_point_fifo_error",
+       "bmb_wc8_next_point_fifo_error",
+       "bmb_wc8_strt_fifo_error",
+       "bmb_wc8_second_dscr_fifo_error",
+       "bmb_wc8_pkt_avail_fifo_error",
+       "bmb_wc8_cos_cnt_fifo_error",
+       "bmb_wc8_notify_fifo_error",
+       "bmb_wc8_ll_req_fifo_error",
+       "bmb_wc8_ll_pa_cnt_error",
+       "bmb_wc8_bb_pa_cnt_error",
+       "bmb_wc9_inp_fifo_error",
+       "bmb_wc9_sop_fifo_error",
+       "bmb_wc9_queue_fifo_error",
+       "bmb_wc9_free_point_fifo_error",
+       "bmb_wc9_next_point_fifo_error",
+       "bmb_wc9_strt_fifo_error",
+       "bmb_wc9_second_dscr_fifo_error",
+       "bmb_wc9_pkt_avail_fifo_error",
+       "bmb_wc9_cos_cnt_fifo_error",
+       "bmb_wc9_notify_fifo_error",
+       "bmb_wc9_ll_req_fifo_error",
+       "bmb_wc9_ll_pa_cnt_error",
+       "bmb_wc9_bb_pa_cnt_error",
+       "bmb_rc9_sop_rc_out_sync_fifo_error",
+       "bmb_rc9_sop_out_sync_fifo_push_error",
+       "bmb_rc0_sop_pend_fifo_error",
+       "bmb_rc1_sop_pend_fifo_error",
+       "bmb_rc2_sop_pend_fifo_error",
+       "bmb_rc3_sop_pend_fifo_error",
+       "bmb_rc4_sop_pend_fifo_error",
+       "bmb_rc5_sop_pend_fifo_error",
+       "bmb_rc6_sop_pend_fifo_error",
+       "bmb_rc7_sop_pend_fifo_error",
+       "bmb_rc0_dscr_pend_fifo_error",
+       "bmb_rc1_dscr_pend_fifo_error",
+       "bmb_rc2_dscr_pend_fifo_error",
+       "bmb_rc3_dscr_pend_fifo_error",
+       "bmb_rc4_dscr_pend_fifo_error",
+       "bmb_rc5_dscr_pend_fifo_error",
+       "bmb_rc6_dscr_pend_fifo_error",
+       "bmb_rc7_dscr_pend_fifo_error",
+       "bmb_rc8_sop_inp_sync_fifo_push_error",
+       "bmb_rc9_sop_inp_sync_fifo_push_error",
+       "bmb_rc8_sop_out_sync_fifo_push_error",
+       "bmb_rc_gnt_pend_fifo_error",
+       "bmb_rc8_out_sync_fifo_push_error",
+       "bmb_rc9_out_sync_fifo_push_error",
+       "bmb_wc8_sync_fifo_push_error",
+       "bmb_wc9_sync_fifo_push_error",
+       "bmb_rc8_sop_rc_out_sync_fifo_error",
+       "bmb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define bmb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_int0_bb_a0_attn_idx[16] = {
+       0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_a0 = {
+       0, 16, bmb_int0_bb_a0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_a0_attn_idx[28] = {
+       23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+       41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_a0 = {
+       1, 28, bmb_int1_bb_a0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_a0_attn_idx[26] = {
+       51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+       69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_a0 = {
+       2, 26, bmb_int2_bb_a0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_a0_attn_idx[31] = {
+       77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+       95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_a0 = {
+       3, 31, bmb_int3_bb_a0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_a0_attn_idx[27] = {
+       108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+       122,
+       123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_a0 = {
+       4, 27, bmb_int4_bb_a0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_a0_attn_idx[29] = {
+       135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+       149,
+       150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_a0 = {
+       5, 29, bmb_int5_bb_a0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_a0_attn_idx[30] = {
+       164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+       178,
+       179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+           193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_a0 = {
+       6, 30, bmb_int6_bb_a0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_a0_attn_idx[32] = {
+       194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+       208,
+       209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+           223, 224,
+       225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_a0 = {
+       7, 32, bmb_int7_bb_a0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_a0_attn_idx[32] = {
+       226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+       240,
+       241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+           255, 256,
+       257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_a0 = {
+       8, 32, bmb_int8_bb_a0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_a0_attn_idx[32] = {
+       258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+       272,
+       273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+           287, 288,
+       289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_a0 = {
+       9, 32, bmb_int9_bb_a0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_a0_attn_idx[3] = {
+       290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_a0 = {
+       10, 3, bmb_int10_bb_a0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_a0_attn_idx[4] = {
+       293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_a0 = {
+       11, 4, bmb_int11_bb_a0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_a0_regs[12] = {
+       &bmb_int0_bb_a0, &bmb_int1_bb_a0, &bmb_int2_bb_a0, &bmb_int3_bb_a0,
+       &bmb_int4_bb_a0, &bmb_int5_bb_a0, &bmb_int6_bb_a0, &bmb_int7_bb_a0,
+       &bmb_int8_bb_a0, &bmb_int9_bb_a0,
+       &bmb_int10_bb_a0, &bmb_int11_bb_a0,
+};
+
+static const u16 bmb_int0_bb_b0_attn_idx[16] = {
+       0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_bb_b0 = {
+       0, 16, bmb_int0_bb_b0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_bb_b0_attn_idx[28] = {
+       23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+       41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_bb_b0 = {
+       1, 28, bmb_int1_bb_b0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_bb_b0_attn_idx[26] = {
+       51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+       69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_bb_b0 = {
+       2, 26, bmb_int2_bb_b0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_bb_b0_attn_idx[31] = {
+       77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+       95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_bb_b0 = {
+       3, 31, bmb_int3_bb_b0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_bb_b0_attn_idx[27] = {
+       108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+       122,
+       123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_bb_b0 = {
+       4, 27, bmb_int4_bb_b0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_bb_b0_attn_idx[29] = {
+       135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+       149,
+       150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_bb_b0 = {
+       5, 29, bmb_int5_bb_b0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_bb_b0_attn_idx[30] = {
+       164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+       178,
+       179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+           193,
+};
+
+static struct attn_hw_reg bmb_int6_bb_b0 = {
+       6, 30, bmb_int6_bb_b0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_bb_b0_attn_idx[32] = {
+       194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+       208,
+       209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+           223, 224,
+       225,
+};
+
+static struct attn_hw_reg bmb_int7_bb_b0 = {
+       7, 32, bmb_int7_bb_b0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_bb_b0_attn_idx[32] = {
+       226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+       240,
+       241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+           255, 256,
+       257,
+};
+
+static struct attn_hw_reg bmb_int8_bb_b0 = {
+       8, 32, bmb_int8_bb_b0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_bb_b0_attn_idx[32] = {
+       258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+       272,
+       273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+           287, 288,
+       289,
+};
+
+static struct attn_hw_reg bmb_int9_bb_b0 = {
+       9, 32, bmb_int9_bb_b0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_bb_b0_attn_idx[3] = {
+       290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_bb_b0 = {
+       10, 3, bmb_int10_bb_b0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_bb_b0_attn_idx[4] = {
+       293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_bb_b0 = {
+       11, 4, bmb_int11_bb_b0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = {
+       &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0,
+       &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0,
+       &bmb_int8_bb_b0, &bmb_int9_bb_b0,
+       &bmb_int10_bb_b0, &bmb_int11_bb_b0,
+};
+
+static const u16 bmb_int0_k2_attn_idx[16] = {
+       0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22,
+};
+
+static struct attn_hw_reg bmb_int0_k2 = {
+       0, 16, bmb_int0_k2_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4
+};
+
+static const u16 bmb_int1_k2_attn_idx[28] = {
+       23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+       41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_int1_k2 = {
+       1, 28, bmb_int1_k2_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc
+};
+
+static const u16 bmb_int2_k2_attn_idx[26] = {
+       51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+       69, 70, 71, 72, 73, 74, 75, 76,
+};
+
+static struct attn_hw_reg bmb_int2_k2 = {
+       2, 26, bmb_int2_k2_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4
+};
+
+static const u16 bmb_int3_k2_attn_idx[31] = {
+       77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+       95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+};
+
+static struct attn_hw_reg bmb_int3_k2 = {
+       3, 31, bmb_int3_k2_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c
+};
+
+static const u16 bmb_int4_k2_attn_idx[27] = {
+       108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+       122,
+       123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+};
+
+static struct attn_hw_reg bmb_int4_k2 = {
+       4, 27, bmb_int4_k2_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124
+};
+
+static const u16 bmb_int5_k2_attn_idx[29] = {
+       135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+       149,
+       150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+};
+
+static struct attn_hw_reg bmb_int5_k2 = {
+       5, 29, bmb_int5_k2_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c
+};
+
+static const u16 bmb_int6_k2_attn_idx[30] = {
+       164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
+       178,
+       179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
+           193,
+};
+
+static struct attn_hw_reg bmb_int6_k2 = {
+       6, 30, bmb_int6_k2_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154
+};
+
+static const u16 bmb_int7_k2_attn_idx[32] = {
+       194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
+       208,
+       209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+           223, 224,
+       225,
+};
+
+static struct attn_hw_reg bmb_int7_k2 = {
+       7, 32, bmb_int7_k2_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c
+};
+
+static const u16 bmb_int8_k2_attn_idx[32] = {
+       226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
+       240,
+       241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
+           255, 256,
+       257,
+};
+
+static struct attn_hw_reg bmb_int8_k2 = {
+       8, 32, bmb_int8_k2_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188
+};
+
+static const u16 bmb_int9_k2_attn_idx[32] = {
+       258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271,
+       272,
+       273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286,
+           287, 288,
+       289,
+};
+
+static struct attn_hw_reg bmb_int9_k2 = {
+       9, 32, bmb_int9_k2_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0
+};
+
+static const u16 bmb_int10_k2_attn_idx[3] = {
+       290, 291, 292,
+};
+
+static struct attn_hw_reg bmb_int10_k2 = {
+       10, 3, bmb_int10_k2_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8
+};
+
+static const u16 bmb_int11_k2_attn_idx[4] = {
+       293, 294, 295, 296,
+};
+
+static struct attn_hw_reg bmb_int11_k2 = {
+       11, 4, bmb_int11_k2_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0
+};
+
+static struct attn_hw_reg *bmb_int_k2_regs[12] = {
+       &bmb_int0_k2, &bmb_int1_k2, &bmb_int2_k2, &bmb_int3_k2, &bmb_int4_k2,
+       &bmb_int5_k2, &bmb_int6_k2, &bmb_int7_k2, &bmb_int8_k2, &bmb_int9_k2,
+       &bmb_int10_k2, &bmb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmb_prty_attn_desc[61] = {
+       "bmb_ll_bank0_mem_prty",
+       "bmb_ll_bank1_mem_prty",
+       "bmb_ll_bank2_mem_prty",
+       "bmb_ll_bank3_mem_prty",
+       "bmb_datapath_registers",
+       "bmb_mem001_i_ecc_rf_int",
+       "bmb_mem008_i_ecc_rf_int",
+       "bmb_mem009_i_ecc_rf_int",
+       "bmb_mem010_i_ecc_rf_int",
+       "bmb_mem011_i_ecc_rf_int",
+       "bmb_mem012_i_ecc_rf_int",
+       "bmb_mem013_i_ecc_rf_int",
+       "bmb_mem014_i_ecc_rf_int",
+       "bmb_mem015_i_ecc_rf_int",
+       "bmb_mem016_i_ecc_rf_int",
+       "bmb_mem002_i_ecc_rf_int",
+       "bmb_mem003_i_ecc_rf_int",
+       "bmb_mem004_i_ecc_rf_int",
+       "bmb_mem005_i_ecc_rf_int",
+       "bmb_mem006_i_ecc_rf_int",
+       "bmb_mem007_i_ecc_rf_int",
+       "bmb_mem059_i_mem_prty",
+       "bmb_mem060_i_mem_prty",
+       "bmb_mem037_i_mem_prty",
+       "bmb_mem038_i_mem_prty",
+       "bmb_mem039_i_mem_prty",
+       "bmb_mem040_i_mem_prty",
+       "bmb_mem041_i_mem_prty",
+       "bmb_mem042_i_mem_prty",
+       "bmb_mem043_i_mem_prty",
+       "bmb_mem044_i_mem_prty",
+       "bmb_mem045_i_mem_prty",
+       "bmb_mem046_i_mem_prty",
+       "bmb_mem047_i_mem_prty",
+       "bmb_mem048_i_mem_prty",
+       "bmb_mem049_i_mem_prty",
+       "bmb_mem050_i_mem_prty",
+       "bmb_mem051_i_mem_prty",
+       "bmb_mem052_i_mem_prty",
+       "bmb_mem053_i_mem_prty",
+       "bmb_mem054_i_mem_prty",
+       "bmb_mem055_i_mem_prty",
+       "bmb_mem056_i_mem_prty",
+       "bmb_mem057_i_mem_prty",
+       "bmb_mem058_i_mem_prty",
+       "bmb_mem033_i_mem_prty",
+       "bmb_mem034_i_mem_prty",
+       "bmb_mem035_i_mem_prty",
+       "bmb_mem036_i_mem_prty",
+       "bmb_mem021_i_mem_prty",
+       "bmb_mem022_i_mem_prty",
+       "bmb_mem023_i_mem_prty",
+       "bmb_mem024_i_mem_prty",
+       "bmb_mem025_i_mem_prty",
+       "bmb_mem026_i_mem_prty",
+       "bmb_mem027_i_mem_prty",
+       "bmb_mem028_i_mem_prty",
+       "bmb_mem029_i_mem_prty",
+       "bmb_mem030_i_mem_prty",
+       "bmb_mem031_i_mem_prty",
+       "bmb_mem032_i_mem_prty",
+};
+#else
+#define bmb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmb_prty1_bb_a0_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+       24,
+       25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_a0 = {
+       0, 31, bmb_prty1_bb_a0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_a0_attn_idx[25] = {
+       36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+       54, 55, 56, 57, 58, 59, 60,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_a0 = {
+       1, 25, bmb_prty2_bb_a0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_a0_regs[2] = {
+       &bmb_prty1_bb_a0, &bmb_prty2_bb_a0,
+};
+
+static const u16 bmb_prty0_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_bb_b0 = {
+       0, 5, bmb_prty0_bb_b0_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_bb_b0_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+       24,
+       25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_bb_b0 = {
+       1, 31, bmb_prty1_bb_b0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_bb_b0_attn_idx[15] = {
+       36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_bb_b0 = {
+       2, 15, bmb_prty2_bb_b0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = {
+       &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0,
+};
+
+static const u16 bmb_prty0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg bmb_prty0_k2 = {
+       0, 5, bmb_prty0_k2_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0
+};
+
+static const u16 bmb_prty1_k2_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+       24,
+       25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg bmb_prty1_k2 = {
+       1, 31, bmb_prty1_k2_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404
+};
+
+static const u16 bmb_prty2_k2_attn_idx[15] = {
+       36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+};
+
+static struct attn_hw_reg bmb_prty2_k2 = {
+       2, 15, bmb_prty2_k2_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414
+};
+
+static struct attn_hw_reg *bmb_prty_k2_regs[3] = {
+       &bmb_prty0_k2, &bmb_prty1_k2, &bmb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_int_attn_desc[17] = {
+       "pcie_address_error",
+       "pcie_link_down_detect",
+       "pcie_link_up_detect",
+       "pcie_cfg_link_eq_req_int",
+       "pcie_pcie_bandwidth_change_detect",
+       "pcie_early_hot_reset_detect",
+       "pcie_hot_reset_detect",
+       "pcie_l1_entry_detect",
+       "pcie_l1_exit_detect",
+       "pcie_ltssm_state_match_detect",
+       "pcie_fc_timeout_detect",
+       "pcie_pme_turnoff_message_detect",
+       "pcie_cfg_send_cor_err",
+       "pcie_cfg_send_nf_err",
+       "pcie_cfg_send_f_err",
+       "pcie_qoverflow_detect",
+       "pcie_vdm_detect",
+};
+#else
+#define pcie_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_int0_k2_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg pcie_int0_k2 = {
+       0, 17, pcie_int0_k2_attn_idx, 0x547a0, 0x547ac, 0x547a8, 0x547a4
+};
+
+static struct attn_hw_reg *pcie_int_k2_regs[1] = {
+       &pcie_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcie_prty_attn_desc[24] = {
+       "pcie_mem003_i_ecc_rf_int",
+       "pcie_mem004_i_ecc_rf_int",
+       "pcie_mem008_i_mem_prty",
+       "pcie_mem007_i_mem_prty",
+       "pcie_mem005_i_mem_prty",
+       "pcie_mem006_i_mem_prty",
+       "pcie_mem001_i_mem_prty",
+       "pcie_mem002_i_mem_prty",
+       "pcie_mem001_i_ecc_rf_int",
+       "pcie_mem005_i_ecc_rf_int",
+       "pcie_mem010_i_ecc_rf_int",
+       "pcie_mem009_i_ecc_rf_int",
+       "pcie_mem007_i_ecc_rf_int",
+       "pcie_mem004_i_mem_prty_0",
+       "pcie_mem004_i_mem_prty_1",
+       "pcie_mem004_i_mem_prty_2",
+       "pcie_mem004_i_mem_prty_3",
+       "pcie_mem011_i_mem_prty_1",
+       "pcie_mem011_i_mem_prty_2",
+       "pcie_mem012_i_mem_prty_1",
+       "pcie_mem012_i_mem_prty_2",
+       "pcie_app_parity_errs_0",
+       "pcie_app_parity_errs_1",
+       "pcie_app_parity_errs_2",
+};
+#else
+#define pcie_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcie_prty1_bb_a0_attn_idx[17] = {
+       0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_a0 = {
+       0, 17, pcie_prty1_bb_a0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_a0_regs[1] = {
+       &pcie_prty1_bb_a0,
+};
+
+static const u16 pcie_prty1_bb_b0_attn_idx[17] = {
+       0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+};
+
+static struct attn_hw_reg pcie_prty1_bb_b0 = {
+       0, 17, pcie_prty1_bb_b0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = {
+       &pcie_prty1_bb_b0,
+};
+
+static const u16 pcie_prty1_k2_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg pcie_prty1_k2 = {
+       0, 8, pcie_prty1_k2_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004
+};
+
+static const u16 pcie_prty0_k2_attn_idx[3] = {
+       21, 22, 23,
+};
+
+static struct attn_hw_reg pcie_prty0_k2 = {
+       1, 3, pcie_prty0_k2_attn_idx, 0x547b0, 0x547bc, 0x547b8, 0x547b4
+};
+
+static struct attn_hw_reg *pcie_prty_k2_regs[2] = {
+       &pcie_prty1_k2, &pcie_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcp2_prty_attn_desc[13] = {
+       "mcp2_rom_parity",
+       "mcp2_mem001_i_ecc_rf_int",
+       "mcp2_mem006_i_ecc_0_rf_int",
+       "mcp2_mem006_i_ecc_1_rf_int",
+       "mcp2_mem006_i_ecc_2_rf_int",
+       "mcp2_mem006_i_ecc_3_rf_int",
+       "mcp2_mem007_i_ecc_rf_int",
+       "mcp2_mem004_i_mem_prty",
+       "mcp2_mem003_i_mem_prty",
+       "mcp2_mem002_i_mem_prty",
+       "mcp2_mem009_i_mem_prty",
+       "mcp2_mem008_i_mem_prty",
+       "mcp2_mem005_i_mem_prty",
+};
+#else
+#define mcp2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcp2_prty0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_a0 = {
+       0, 1, mcp2_prty0_bb_a0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_a0_attn_idx[12] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_a0 = {
+       1, 12, mcp2_prty1_bb_a0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_a0_regs[2] = {
+       &mcp2_prty0_bb_a0, &mcp2_prty1_bb_a0,
+};
+
+static const u16 mcp2_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg mcp2_prty0_bb_b0 = {
+       0, 1, mcp2_prty0_bb_b0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_bb_b0_attn_idx[12] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_bb_b0 = {
+       1, 12, mcp2_prty1_bb_b0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = {
+       &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0,
+};
+
+static const u16 mcp2_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg mcp2_prty0_k2 = {
+       0, 1, mcp2_prty0_k2_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044
+};
+
+static const u16 mcp2_prty1_k2_attn_idx[12] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg mcp2_prty1_k2 = {
+       1, 12, mcp2_prty1_k2_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208
+};
+
+static struct attn_hw_reg *mcp2_prty_k2_regs[2] = {
+       &mcp2_prty0_k2, &mcp2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_int_attn_desc[18] = {
+       "pswhst_address_error",
+       "pswhst_hst_src_fifo1_err",
+       "pswhst_hst_src_fifo2_err",
+       "pswhst_hst_src_fifo3_err",
+       "pswhst_hst_src_fifo4_err",
+       "pswhst_hst_src_fifo5_err",
+       "pswhst_hst_hdr_sync_fifo_err",
+       "pswhst_hst_data_sync_fifo_err",
+       "pswhst_hst_cpl_sync_fifo_err",
+       "pswhst_hst_vf_disabled_access",
+       "pswhst_hst_permission_violation",
+       "pswhst_hst_incorrect_access",
+       "pswhst_hst_src_fifo6_err",
+       "pswhst_hst_src_fifo7_err",
+       "pswhst_hst_src_fifo8_err",
+       "pswhst_hst_src_fifo9_err",
+       "pswhst_hst_source_credit_violation",
+       "pswhst_hst_timeout",
+};
+#else
+#define pswhst_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_int0_bb_a0_attn_idx[18] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_a0 = {
+       0, 18, pswhst_int0_bb_a0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+       0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_a0_regs[1] = {
+       &pswhst_int0_bb_a0,
+};
+
+static const u16 pswhst_int0_bb_b0_attn_idx[18] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_bb_b0 = {
+       0, 18, pswhst_int0_bb_b0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188,
+       0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = {
+       &pswhst_int0_bb_b0,
+};
+
+static const u16 pswhst_int0_k2_attn_idx[18] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_int0_k2 = {
+       0, 18, pswhst_int0_k2_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184
+};
+
+static struct attn_hw_reg *pswhst_int_k2_regs[1] = {
+       &pswhst_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst_prty_attn_desc[18] = {
+       "pswhst_datapath_registers",
+       "pswhst_mem006_i_mem_prty",
+       "pswhst_mem007_i_mem_prty",
+       "pswhst_mem005_i_mem_prty",
+       "pswhst_mem002_i_mem_prty",
+       "pswhst_mem003_i_mem_prty",
+       "pswhst_mem001_i_mem_prty",
+       "pswhst_mem008_i_mem_prty",
+       "pswhst_mem004_i_mem_prty",
+       "pswhst_mem009_i_mem_prty",
+       "pswhst_mem010_i_mem_prty",
+       "pswhst_mem016_i_mem_prty",
+       "pswhst_mem012_i_mem_prty",
+       "pswhst_mem013_i_mem_prty",
+       "pswhst_mem014_i_mem_prty",
+       "pswhst_mem015_i_mem_prty",
+       "pswhst_mem011_i_mem_prty",
+       "pswhst_mem017_i_mem_prty",
+};
+#else
+#define pswhst_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst_prty1_bb_a0_attn_idx[17] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_a0 = {
+       0, 17, pswhst_prty1_bb_a0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+       0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_a0_regs[1] = {
+       &pswhst_prty1_bb_a0,
+};
+
+static const u16 pswhst_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswhst_prty0_bb_b0 = {
+       0, 1, pswhst_prty0_bb_b0_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198,
+       0x2a0194
+};
+
+static const u16 pswhst_prty1_bb_b0_attn_idx[17] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_bb_b0 = {
+       1, 17, pswhst_prty1_bb_b0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208,
+       0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = {
+       &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0,
+};
+
+static const u16 pswhst_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswhst_prty0_k2 = {
+       0, 1, pswhst_prty0_k2_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194
+};
+
+static const u16 pswhst_prty1_k2_attn_idx[17] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pswhst_prty1_k2 = {
+       1, 17, pswhst_prty1_k2_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204
+};
+
+static struct attn_hw_reg *pswhst_prty_k2_regs[2] = {
+       &pswhst_prty0_k2, &pswhst_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_int_attn_desc[5] = {
+       "pswhst2_address_error",
+       "pswhst2_hst_header_fifo_err",
+       "pswhst2_hst_data_fifo_err",
+       "pswhst2_hst_cpl_fifo_err",
+       "pswhst2_hst_ireq_fifo_err",
+};
+#else
+#define pswhst2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_int0_bb_a0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_a0 = {
+       0, 5, pswhst2_int0_bb_a0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+       0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_a0_regs[1] = {
+       &pswhst2_int0_bb_a0,
+};
+
+static const u16 pswhst2_int0_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_bb_b0 = {
+       0, 5, pswhst2_int0_bb_b0_attn_idx, 0x29e180, 0x29e18c, 0x29e188,
+       0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = {
+       &pswhst2_int0_bb_b0,
+};
+
+static const u16 pswhst2_int0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswhst2_int0_k2 = {
+       0, 5, pswhst2_int0_k2_attn_idx, 0x29e180, 0x29e18c, 0x29e188, 0x29e184
+};
+
+static struct attn_hw_reg *pswhst2_int_k2_regs[1] = {
+       &pswhst2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswhst2_prty_attn_desc[1] = {
+       "pswhst2_datapath_registers",
+};
+#else
+#define pswhst2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswhst2_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_bb_b0 = {
+       0, 1, pswhst2_prty0_bb_b0_attn_idx, 0x29e190, 0x29e19c, 0x29e198,
+       0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = {
+       &pswhst2_prty0_bb_b0,
+};
+
+static const u16 pswhst2_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswhst2_prty0_k2 = {
+       0, 1, pswhst2_prty0_k2_attn_idx, 0x29e190, 0x29e19c, 0x29e198, 0x29e194
+};
+
+static struct attn_hw_reg *pswhst2_prty_k2_regs[1] = {
+       &pswhst2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_int_attn_desc[3] = {
+       "pswrd_address_error",
+       "pswrd_pop_error",
+       "pswrd_pop_pbf_error",
+};
+#else
+#define pswrd_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_int0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_a0 = {
+       0, 3, pswrd_int0_bb_a0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_a0_regs[1] = {
+       &pswrd_int0_bb_a0,
+};
+
+static const u16 pswrd_int0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_bb_b0 = {
+       0, 3, pswrd_int0_bb_b0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = {
+       &pswrd_int0_bb_b0,
+};
+
+static const u16 pswrd_int0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg pswrd_int0_k2 = {
+       0, 3, pswrd_int0_k2_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184
+};
+
+static struct attn_hw_reg *pswrd_int_k2_regs[1] = {
+       &pswrd_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd_prty_attn_desc[1] = {
+       "pswrd_datapath_registers",
+};
+#else
+#define pswrd_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswrd_prty0_bb_b0 = {
+       0, 1, pswrd_prty0_bb_b0_attn_idx, 0x29c190, 0x29c19c, 0x29c198,
+       0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = {
+       &pswrd_prty0_bb_b0,
+};
+
+static const u16 pswrd_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswrd_prty0_k2 = {
+       0, 1, pswrd_prty0_k2_attn_idx, 0x29c190, 0x29c19c, 0x29c198, 0x29c194
+};
+
+static struct attn_hw_reg *pswrd_prty_k2_regs[1] = {
+       &pswrd_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_int_attn_desc[5] = {
+       "pswrd2_address_error",
+       "pswrd2_sr_fifo_error",
+       "pswrd2_blk_fifo_error",
+       "pswrd2_push_error",
+       "pswrd2_push_pbf_error",
+};
+#else
+#define pswrd2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_int0_bb_a0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_a0 = {
+       0, 5, pswrd2_int0_bb_a0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+       0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_a0_regs[1] = {
+       &pswrd2_int0_bb_a0,
+};
+
+static const u16 pswrd2_int0_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_bb_b0 = {
+       0, 5, pswrd2_int0_bb_b0_attn_idx, 0x29d180, 0x29d18c, 0x29d188,
+       0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = {
+       &pswrd2_int0_bb_b0,
+};
+
+static const u16 pswrd2_int0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pswrd2_int0_k2 = {
+       0, 5, pswrd2_int0_k2_attn_idx, 0x29d180, 0x29d18c, 0x29d188, 0x29d184
+};
+
+static struct attn_hw_reg *pswrd2_int_k2_regs[1] = {
+       &pswrd2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrd2_prty_attn_desc[36] = {
+       "pswrd2_datapath_registers",
+       "pswrd2_mem017_i_ecc_rf_int",
+       "pswrd2_mem018_i_ecc_rf_int",
+       "pswrd2_mem019_i_ecc_rf_int",
+       "pswrd2_mem020_i_ecc_rf_int",
+       "pswrd2_mem021_i_ecc_rf_int",
+       "pswrd2_mem022_i_ecc_rf_int",
+       "pswrd2_mem023_i_ecc_rf_int",
+       "pswrd2_mem024_i_ecc_rf_int",
+       "pswrd2_mem025_i_ecc_rf_int",
+       "pswrd2_mem015_i_ecc_rf_int",
+       "pswrd2_mem034_i_mem_prty",
+       "pswrd2_mem032_i_mem_prty",
+       "pswrd2_mem028_i_mem_prty",
+       "pswrd2_mem033_i_mem_prty",
+       "pswrd2_mem030_i_mem_prty",
+       "pswrd2_mem029_i_mem_prty",
+       "pswrd2_mem031_i_mem_prty",
+       "pswrd2_mem027_i_mem_prty",
+       "pswrd2_mem026_i_mem_prty",
+       "pswrd2_mem001_i_mem_prty",
+       "pswrd2_mem007_i_mem_prty",
+       "pswrd2_mem008_i_mem_prty",
+       "pswrd2_mem009_i_mem_prty",
+       "pswrd2_mem010_i_mem_prty",
+       "pswrd2_mem011_i_mem_prty",
+       "pswrd2_mem012_i_mem_prty",
+       "pswrd2_mem013_i_mem_prty",
+       "pswrd2_mem014_i_mem_prty",
+       "pswrd2_mem002_i_mem_prty",
+       "pswrd2_mem003_i_mem_prty",
+       "pswrd2_mem004_i_mem_prty",
+       "pswrd2_mem005_i_mem_prty",
+       "pswrd2_mem006_i_mem_prty",
+       "pswrd2_mem016_i_mem_prty",
+       "pswrd2_mem015_i_mem_prty",
+};
+#else
+#define pswrd2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrd2_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+       22,
+       23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_a0 = {
+       0, 31, pswrd2_prty1_bb_a0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+       0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_a0_attn_idx[3] = {
+       33, 34, 35,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_a0 = {
+       1, 3, pswrd2_prty2_bb_a0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+       0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_a0_regs[2] = {
+       &pswrd2_prty1_bb_a0, &pswrd2_prty2_bb_a0,
+};
+
+static const u16 pswrd2_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_bb_b0 = {
+       0, 1, pswrd2_prty0_bb_b0_attn_idx, 0x29d190, 0x29d19c, 0x29d198,
+       0x29d194
+};
+
+static const u16 pswrd2_prty1_bb_b0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_bb_b0 = {
+       1, 31, pswrd2_prty1_bb_b0_attn_idx, 0x29d200, 0x29d20c, 0x29d208,
+       0x29d204
+};
+
+static const u16 pswrd2_prty2_bb_b0_attn_idx[3] = {
+       32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_bb_b0 = {
+       2, 3, pswrd2_prty2_bb_b0_attn_idx, 0x29d210, 0x29d21c, 0x29d218,
+       0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = {
+       &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0,
+};
+
+static const u16 pswrd2_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswrd2_prty0_k2 = {
+       0, 1, pswrd2_prty0_k2_attn_idx, 0x29d190, 0x29d19c, 0x29d198, 0x29d194
+};
+
+static const u16 pswrd2_prty1_k2_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswrd2_prty1_k2 = {
+       1, 31, pswrd2_prty1_k2_attn_idx, 0x29d200, 0x29d20c, 0x29d208, 0x29d204
+};
+
+static const u16 pswrd2_prty2_k2_attn_idx[3] = {
+       32, 33, 34,
+};
+
+static struct attn_hw_reg pswrd2_prty2_k2 = {
+       2, 3, pswrd2_prty2_k2_attn_idx, 0x29d210, 0x29d21c, 0x29d218, 0x29d214
+};
+
+static struct attn_hw_reg *pswrd2_prty_k2_regs[3] = {
+       &pswrd2_prty0_k2, &pswrd2_prty1_k2, &pswrd2_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_int_attn_desc[16] = {
+       "pswwr_address_error",
+       "pswwr_src_fifo_overflow",
+       "pswwr_qm_fifo_overflow",
+       "pswwr_tm_fifo_overflow",
+       "pswwr_usdm_fifo_overflow",
+       "pswwr_usdmdp_fifo_overflow",
+       "pswwr_xsdm_fifo_overflow",
+       "pswwr_tsdm_fifo_overflow",
+       "pswwr_cduwr_fifo_overflow",
+       "pswwr_dbg_fifo_overflow",
+       "pswwr_dmae_fifo_overflow",
+       "pswwr_hc_fifo_overflow",
+       "pswwr_msdm_fifo_overflow",
+       "pswwr_ysdm_fifo_overflow",
+       "pswwr_psdm_fifo_overflow",
+       "pswwr_m2p_fifo_overflow",
+};
+#else
+#define pswwr_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_int0_bb_a0_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_a0 = {
+       0, 16, pswwr_int0_bb_a0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+       0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_a0_regs[1] = {
+       &pswwr_int0_bb_a0,
+};
+
+static const u16 pswwr_int0_bb_b0_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_bb_b0 = {
+       0, 16, pswwr_int0_bb_b0_attn_idx, 0x29a180, 0x29a18c, 0x29a188,
+       0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = {
+       &pswwr_int0_bb_b0,
+};
+
+static const u16 pswwr_int0_k2_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pswwr_int0_k2 = {
+       0, 16, pswwr_int0_k2_attn_idx, 0x29a180, 0x29a18c, 0x29a188, 0x29a184
+};
+
+static struct attn_hw_reg *pswwr_int_k2_regs[1] = {
+       &pswwr_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr_prty_attn_desc[1] = {
+       "pswwr_datapath_registers",
+};
+#else
+#define pswwr_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswwr_prty0_bb_b0 = {
+       0, 1, pswwr_prty0_bb_b0_attn_idx, 0x29a190, 0x29a19c, 0x29a198,
+       0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = {
+       &pswwr_prty0_bb_b0,
+};
+
+static const u16 pswwr_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswwr_prty0_k2 = {
+       0, 1, pswwr_prty0_k2_attn_idx, 0x29a190, 0x29a19c, 0x29a198, 0x29a194
+};
+
+static struct attn_hw_reg *pswwr_prty_k2_regs[1] = {
+       &pswwr_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_int_attn_desc[19] = {
+       "pswwr2_address_error",
+       "pswwr2_pglue_eop_error",
+       "pswwr2_pglue_lsr_error",
+       "pswwr2_tm_underflow",
+       "pswwr2_qm_underflow",
+       "pswwr2_src_underflow",
+       "pswwr2_usdm_underflow",
+       "pswwr2_tsdm_underflow",
+       "pswwr2_xsdm_underflow",
+       "pswwr2_usdmdp_underflow",
+       "pswwr2_cdu_underflow",
+       "pswwr2_dbg_underflow",
+       "pswwr2_dmae_underflow",
+       "pswwr2_hc_underflow",
+       "pswwr2_msdm_underflow",
+       "pswwr2_ysdm_underflow",
+       "pswwr2_psdm_underflow",
+       "pswwr2_m2p_underflow",
+       "pswwr2_pglue_eop_error_in_line",
+};
+#else
+#define pswwr2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_int0_bb_a0_attn_idx[19] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_a0 = {
+       0, 19, pswwr2_int0_bb_a0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+       0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_a0_regs[1] = {
+       &pswwr2_int0_bb_a0,
+};
+
+static const u16 pswwr2_int0_bb_b0_attn_idx[19] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_bb_b0 = {
+       0, 19, pswwr2_int0_bb_b0_attn_idx, 0x29b180, 0x29b18c, 0x29b188,
+       0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = {
+       &pswwr2_int0_bb_b0,
+};
+
+static const u16 pswwr2_int0_k2_attn_idx[19] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pswwr2_int0_k2 = {
+       0, 19, pswwr2_int0_k2_attn_idx, 0x29b180, 0x29b18c, 0x29b188, 0x29b184
+};
+
+static struct attn_hw_reg *pswwr2_int_k2_regs[1] = {
+       &pswwr2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswwr2_prty_attn_desc[114] = {
+       "pswwr2_datapath_registers",
+       "pswwr2_mem008_i_ecc_rf_int",
+       "pswwr2_mem001_i_mem_prty",
+       "pswwr2_mem014_i_mem_prty_0",
+       "pswwr2_mem014_i_mem_prty_1",
+       "pswwr2_mem014_i_mem_prty_2",
+       "pswwr2_mem014_i_mem_prty_3",
+       "pswwr2_mem014_i_mem_prty_4",
+       "pswwr2_mem014_i_mem_prty_5",
+       "pswwr2_mem014_i_mem_prty_6",
+       "pswwr2_mem014_i_mem_prty_7",
+       "pswwr2_mem014_i_mem_prty_8",
+       "pswwr2_mem016_i_mem_prty_0",
+       "pswwr2_mem016_i_mem_prty_1",
+       "pswwr2_mem016_i_mem_prty_2",
+       "pswwr2_mem016_i_mem_prty_3",
+       "pswwr2_mem016_i_mem_prty_4",
+       "pswwr2_mem016_i_mem_prty_5",
+       "pswwr2_mem016_i_mem_prty_6",
+       "pswwr2_mem016_i_mem_prty_7",
+       "pswwr2_mem016_i_mem_prty_8",
+       "pswwr2_mem007_i_mem_prty_0",
+       "pswwr2_mem007_i_mem_prty_1",
+       "pswwr2_mem007_i_mem_prty_2",
+       "pswwr2_mem007_i_mem_prty_3",
+       "pswwr2_mem007_i_mem_prty_4",
+       "pswwr2_mem007_i_mem_prty_5",
+       "pswwr2_mem007_i_mem_prty_6",
+       "pswwr2_mem007_i_mem_prty_7",
+       "pswwr2_mem007_i_mem_prty_8",
+       "pswwr2_mem017_i_mem_prty_0",
+       "pswwr2_mem017_i_mem_prty_1",
+       "pswwr2_mem017_i_mem_prty_2",
+       "pswwr2_mem017_i_mem_prty_3",
+       "pswwr2_mem017_i_mem_prty_4",
+       "pswwr2_mem017_i_mem_prty_5",
+       "pswwr2_mem017_i_mem_prty_6",
+       "pswwr2_mem017_i_mem_prty_7",
+       "pswwr2_mem017_i_mem_prty_8",
+       "pswwr2_mem009_i_mem_prty_0",
+       "pswwr2_mem009_i_mem_prty_1",
+       "pswwr2_mem009_i_mem_prty_2",
+       "pswwr2_mem009_i_mem_prty_3",
+       "pswwr2_mem009_i_mem_prty_4",
+       "pswwr2_mem009_i_mem_prty_5",
+       "pswwr2_mem009_i_mem_prty_6",
+       "pswwr2_mem009_i_mem_prty_7",
+       "pswwr2_mem009_i_mem_prty_8",
+       "pswwr2_mem013_i_mem_prty_0",
+       "pswwr2_mem013_i_mem_prty_1",
+       "pswwr2_mem013_i_mem_prty_2",
+       "pswwr2_mem013_i_mem_prty_3",
+       "pswwr2_mem013_i_mem_prty_4",
+       "pswwr2_mem013_i_mem_prty_5",
+       "pswwr2_mem013_i_mem_prty_6",
+       "pswwr2_mem013_i_mem_prty_7",
+       "pswwr2_mem013_i_mem_prty_8",
+       "pswwr2_mem006_i_mem_prty_0",
+       "pswwr2_mem006_i_mem_prty_1",
+       "pswwr2_mem006_i_mem_prty_2",
+       "pswwr2_mem006_i_mem_prty_3",
+       "pswwr2_mem006_i_mem_prty_4",
+       "pswwr2_mem006_i_mem_prty_5",
+       "pswwr2_mem006_i_mem_prty_6",
+       "pswwr2_mem006_i_mem_prty_7",
+       "pswwr2_mem006_i_mem_prty_8",
+       "pswwr2_mem010_i_mem_prty_0",
+       "pswwr2_mem010_i_mem_prty_1",
+       "pswwr2_mem010_i_mem_prty_2",
+       "pswwr2_mem010_i_mem_prty_3",
+       "pswwr2_mem010_i_mem_prty_4",
+       "pswwr2_mem010_i_mem_prty_5",
+       "pswwr2_mem010_i_mem_prty_6",
+       "pswwr2_mem010_i_mem_prty_7",
+       "pswwr2_mem010_i_mem_prty_8",
+       "pswwr2_mem012_i_mem_prty",
+       "pswwr2_mem011_i_mem_prty_0",
+       "pswwr2_mem011_i_mem_prty_1",
+       "pswwr2_mem011_i_mem_prty_2",
+       "pswwr2_mem011_i_mem_prty_3",
+       "pswwr2_mem011_i_mem_prty_4",
+       "pswwr2_mem011_i_mem_prty_5",
+       "pswwr2_mem011_i_mem_prty_6",
+       "pswwr2_mem011_i_mem_prty_7",
+       "pswwr2_mem011_i_mem_prty_8",
+       "pswwr2_mem004_i_mem_prty_0",
+       "pswwr2_mem004_i_mem_prty_1",
+       "pswwr2_mem004_i_mem_prty_2",
+       "pswwr2_mem004_i_mem_prty_3",
+       "pswwr2_mem004_i_mem_prty_4",
+       "pswwr2_mem004_i_mem_prty_5",
+       "pswwr2_mem004_i_mem_prty_6",
+       "pswwr2_mem004_i_mem_prty_7",
+       "pswwr2_mem004_i_mem_prty_8",
+       "pswwr2_mem015_i_mem_prty_0",
+       "pswwr2_mem015_i_mem_prty_1",
+       "pswwr2_mem015_i_mem_prty_2",
+       "pswwr2_mem005_i_mem_prty_0",
+       "pswwr2_mem005_i_mem_prty_1",
+       "pswwr2_mem005_i_mem_prty_2",
+       "pswwr2_mem005_i_mem_prty_3",
+       "pswwr2_mem005_i_mem_prty_4",
+       "pswwr2_mem005_i_mem_prty_5",
+       "pswwr2_mem005_i_mem_prty_6",
+       "pswwr2_mem005_i_mem_prty_7",
+       "pswwr2_mem005_i_mem_prty_8",
+       "pswwr2_mem002_i_mem_prty_0",
+       "pswwr2_mem002_i_mem_prty_1",
+       "pswwr2_mem002_i_mem_prty_2",
+       "pswwr2_mem002_i_mem_prty_3",
+       "pswwr2_mem002_i_mem_prty_4",
+       "pswwr2_mem003_i_mem_prty_0",
+       "pswwr2_mem003_i_mem_prty_1",
+       "pswwr2_mem003_i_mem_prty_2",
+};
+#else
+#define pswwr2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswwr2_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_a0 = {
+       0, 31, pswwr2_prty1_bb_a0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+       0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_a0_attn_idx[31] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+       50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_a0 = {
+       1, 31, pswwr2_prty2_bb_a0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+       0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_a0_attn_idx[31] = {
+       63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+       81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_a0 = {
+       2, 31, pswwr2_prty3_bb_a0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+       0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_a0_attn_idx[20] = {
+       94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+       109,
+       110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_a0 = {
+       3, 20, pswwr2_prty4_bb_a0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+       0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_a0_regs[4] = {
+       &pswwr2_prty1_bb_a0, &pswwr2_prty2_bb_a0, &pswwr2_prty3_bb_a0,
+       &pswwr2_prty4_bb_a0,
+};
+
+static const u16 pswwr2_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_bb_b0 = {
+       0, 1, pswwr2_prty0_bb_b0_attn_idx, 0x29b190, 0x29b19c, 0x29b198,
+       0x29b194
+};
+
+static const u16 pswwr2_prty1_bb_b0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_bb_b0 = {
+       1, 31, pswwr2_prty1_bb_b0_attn_idx, 0x29b200, 0x29b20c, 0x29b208,
+       0x29b204
+};
+
+static const u16 pswwr2_prty2_bb_b0_attn_idx[31] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+       50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_bb_b0 = {
+       2, 31, pswwr2_prty2_bb_b0_attn_idx, 0x29b210, 0x29b21c, 0x29b218,
+       0x29b214
+};
+
+static const u16 pswwr2_prty3_bb_b0_attn_idx[31] = {
+       63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+       81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_bb_b0 = {
+       3, 31, pswwr2_prty3_bb_b0_attn_idx, 0x29b220, 0x29b22c, 0x29b228,
+       0x29b224
+};
+
+static const u16 pswwr2_prty4_bb_b0_attn_idx[20] = {
+       94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+       109,
+       110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_bb_b0 = {
+       4, 20, pswwr2_prty4_bb_b0_attn_idx, 0x29b230, 0x29b23c, 0x29b238,
+       0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = {
+       &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0,
+       &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0,
+};
+
+static const u16 pswwr2_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswwr2_prty0_k2 = {
+       0, 1, pswwr2_prty0_k2_attn_idx, 0x29b190, 0x29b19c, 0x29b198, 0x29b194
+};
+
+static const u16 pswwr2_prty1_k2_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pswwr2_prty1_k2 = {
+       1, 31, pswwr2_prty1_k2_attn_idx, 0x29b200, 0x29b20c, 0x29b208, 0x29b204
+};
+
+static const u16 pswwr2_prty2_k2_attn_idx[31] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+       50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+};
+
+static struct attn_hw_reg pswwr2_prty2_k2 = {
+       2, 31, pswwr2_prty2_k2_attn_idx, 0x29b210, 0x29b21c, 0x29b218, 0x29b214
+};
+
+static const u16 pswwr2_prty3_k2_attn_idx[31] = {
+       63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+       81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93,
+};
+
+static struct attn_hw_reg pswwr2_prty3_k2 = {
+       3, 31, pswwr2_prty3_k2_attn_idx, 0x29b220, 0x29b22c, 0x29b228, 0x29b224
+};
+
+static const u16 pswwr2_prty4_k2_attn_idx[20] = {
+       94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+       109,
+       110, 111, 112, 113,
+};
+
+static struct attn_hw_reg pswwr2_prty4_k2 = {
+       4, 20, pswwr2_prty4_k2_attn_idx, 0x29b230, 0x29b23c, 0x29b238, 0x29b234
+};
+
+static struct attn_hw_reg *pswwr2_prty_k2_regs[5] = {
+       &pswwr2_prty0_k2, &pswwr2_prty1_k2, &pswwr2_prty2_k2, &pswwr2_prty3_k2,
+       &pswwr2_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_int_attn_desc[21] = {
+       "pswrq_address_error",
+       "pswrq_pbf_fifo_overflow",
+       "pswrq_src_fifo_overflow",
+       "pswrq_qm_fifo_overflow",
+       "pswrq_tm_fifo_overflow",
+       "pswrq_usdm_fifo_overflow",
+       "pswrq_m2p_fifo_overflow",
+       "pswrq_xsdm_fifo_overflow",
+       "pswrq_tsdm_fifo_overflow",
+       "pswrq_ptu_fifo_overflow",
+       "pswrq_cduwr_fifo_overflow",
+       "pswrq_cdurd_fifo_overflow",
+       "pswrq_dmae_fifo_overflow",
+       "pswrq_hc_fifo_overflow",
+       "pswrq_dbg_fifo_overflow",
+       "pswrq_msdm_fifo_overflow",
+       "pswrq_ysdm_fifo_overflow",
+       "pswrq_psdm_fifo_overflow",
+       "pswrq_prm_fifo_overflow",
+       "pswrq_muld_fifo_overflow",
+       "pswrq_xyld_fifo_overflow",
+};
+#else
+#define pswrq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_int0_bb_a0_attn_idx[21] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_a0 = {
+       0, 21, pswrq_int0_bb_a0_attn_idx, 0x280180, 0x28018c, 0x280188,
+       0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_a0_regs[1] = {
+       &pswrq_int0_bb_a0,
+};
+
+static const u16 pswrq_int0_bb_b0_attn_idx[21] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+};
+
+static struct attn_hw_reg pswrq_int0_bb_b0 = {
+       0, 21, pswrq_int0_bb_b0_attn_idx, 0x280180, 0x28018c, 0x280188,
+       0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = {
+       &pswrq_int0_bb_b0,
+};
+
+static const u16 pswrq_int0_k2_attn_idx[21] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+};
+
+static struct attn_hw_reg pswrq_int0_k2 = {
+       0, 21, pswrq_int0_k2_attn_idx, 0x280180, 0x28018c, 0x280188, 0x280184
+};
+
+static struct attn_hw_reg *pswrq_int_k2_regs[1] = {
+       &pswrq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq_prty_attn_desc[1] = {
+       "pswrq_pxp_busip_parity",
+};
+#else
+#define pswrq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswrq_prty0_bb_b0 = {
+       0, 1, pswrq_prty0_bb_b0_attn_idx, 0x280190, 0x28019c, 0x280198,
+       0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = {
+       &pswrq_prty0_bb_b0,
+};
+
+static const u16 pswrq_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pswrq_prty0_k2 = {
+       0, 1, pswrq_prty0_k2_attn_idx, 0x280190, 0x28019c, 0x280198, 0x280194
+};
+
+static struct attn_hw_reg *pswrq_prty_k2_regs[1] = {
+       &pswrq_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_int_attn_desc[15] = {
+       "pswrq2_address_error",
+       "pswrq2_l2p_fifo_overflow",
+       "pswrq2_wdfifo_overflow",
+       "pswrq2_phyaddr_fifo_of",
+       "pswrq2_l2p_violation_1",
+       "pswrq2_l2p_violation_2",
+       "pswrq2_free_list_empty",
+       "pswrq2_elt_addr",
+       "pswrq2_l2p_vf_err",
+       "pswrq2_core_wdone_overflow",
+       "pswrq2_treq_fifo_underflow",
+       "pswrq2_treq_fifo_overflow",
+       "pswrq2_icpl_fifo_underflow",
+       "pswrq2_icpl_fifo_overflow",
+       "pswrq2_back2back_atc_response",
+};
+#else
+#define pswrq2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_int0_bb_a0_attn_idx[15] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_a0 = {
+       0, 15, pswrq2_int0_bb_a0_attn_idx, 0x240180, 0x24018c, 0x240188,
+       0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_a0_regs[1] = {
+       &pswrq2_int0_bb_a0,
+};
+
+static const u16 pswrq2_int0_bb_b0_attn_idx[15] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_bb_b0 = {
+       0, 15, pswrq2_int0_bb_b0_attn_idx, 0x240180, 0x24018c, 0x240188,
+       0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = {
+       &pswrq2_int0_bb_b0,
+};
+
+static const u16 pswrq2_int0_k2_attn_idx[15] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg pswrq2_int0_k2 = {
+       0, 15, pswrq2_int0_k2_attn_idx, 0x240180, 0x24018c, 0x240188, 0x240184
+};
+
+static struct attn_hw_reg *pswrq2_int_k2_regs[1] = {
+       &pswrq2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pswrq2_prty_attn_desc[11] = {
+       "pswrq2_mem004_i_ecc_rf_int",
+       "pswrq2_mem005_i_ecc_rf_int",
+       "pswrq2_mem001_i_ecc_rf_int",
+       "pswrq2_mem006_i_mem_prty",
+       "pswrq2_mem008_i_mem_prty",
+       "pswrq2_mem009_i_mem_prty",
+       "pswrq2_mem003_i_mem_prty",
+       "pswrq2_mem002_i_mem_prty",
+       "pswrq2_mem010_i_mem_prty",
+       "pswrq2_mem007_i_mem_prty",
+       "pswrq2_mem005_i_mem_prty",
+};
+#else
+#define pswrq2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pswrq2_prty1_bb_a0_attn_idx[9] = {
+       0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_a0 = {
+       0, 9, pswrq2_prty1_bb_a0_attn_idx, 0x240200, 0x24020c, 0x240208,
+       0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_a0_regs[1] = {
+       &pswrq2_prty1_bb_a0,
+};
+
+static const u16 pswrq2_prty1_bb_b0_attn_idx[9] = {
+       0, 2, 3, 4, 5, 6, 7, 9, 10,
+};
+
+static struct attn_hw_reg pswrq2_prty1_bb_b0 = {
+       0, 9, pswrq2_prty1_bb_b0_attn_idx, 0x240200, 0x24020c, 0x240208,
+       0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = {
+       &pswrq2_prty1_bb_b0,
+};
+
+static const u16 pswrq2_prty1_k2_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg pswrq2_prty1_k2 = {
+       0, 10, pswrq2_prty1_k2_attn_idx, 0x240200, 0x24020c, 0x240208, 0x240204
+};
+
+static struct attn_hw_reg *pswrq2_prty_k2_regs[1] = {
+       &pswrq2_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pglcs_int_attn_desc[2] = {
+       "pglcs_address_error",
+       "pglcs_rasdp_error",
+};
+#else
+#define pglcs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pglcs_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_a0 = {
+       0, 1, pglcs_int0_bb_a0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_a0_regs[1] = {
+       &pglcs_int0_bb_a0,
+};
+
+static const u16 pglcs_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pglcs_int0_bb_b0 = {
+       0, 1, pglcs_int0_bb_b0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = {
+       &pglcs_int0_bb_b0,
+};
+
+static const u16 pglcs_int0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg pglcs_int0_k2 = {
+       0, 2, pglcs_int0_k2_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04
+};
+
+static struct attn_hw_reg *pglcs_int_k2_regs[1] = {
+       &pglcs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_int_attn_desc[2] = {
+       "dmae_address_error",
+       "dmae_pci_rd_buf_err",
+};
+#else
+#define dmae_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_int0_bb_a0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_a0 = {
+       0, 2, dmae_int0_bb_a0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_a0_regs[1] = {
+       &dmae_int0_bb_a0,
+};
+
+static const u16 dmae_int0_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_bb_b0 = {
+       0, 2, dmae_int0_bb_b0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = {
+       &dmae_int0_bb_b0,
+};
+
+static const u16 dmae_int0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg dmae_int0_k2 = {
+       0, 2, dmae_int0_k2_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184
+};
+
+static struct attn_hw_reg *dmae_int_k2_regs[1] = {
+       &dmae_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dmae_prty_attn_desc[3] = {
+       "dmae_mem002_i_mem_prty",
+       "dmae_mem001_i_mem_prty",
+       "dmae_mem003_i_mem_prty",
+};
+#else
+#define dmae_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dmae_prty1_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_a0 = {
+       0, 3, dmae_prty1_bb_a0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_a0_regs[1] = {
+       &dmae_prty1_bb_a0,
+};
+
+static const u16 dmae_prty1_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_bb_b0 = {
+       0, 3, dmae_prty1_bb_b0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = {
+       &dmae_prty1_bb_b0,
+};
+
+static const u16 dmae_prty1_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg dmae_prty1_k2 = {
+       0, 3, dmae_prty1_k2_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204
+};
+
+static struct attn_hw_reg *dmae_prty_k2_regs[1] = {
+       &dmae_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_int_attn_desc[8] = {
+       "ptu_address_error",
+       "ptu_atc_tcpl_to_not_pend",
+       "ptu_atc_gpa_multiple_hits",
+       "ptu_atc_rcpl_to_empty_cnt",
+       "ptu_atc_tcpl_error",
+       "ptu_atc_inv_halt",
+       "ptu_atc_reuse_transpend",
+       "ptu_atc_ireq_less_than_stu",
+};
+#else
+#define ptu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_int0_bb_a0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_a0 = {
+       0, 8, ptu_int0_bb_a0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_a0_regs[1] = {
+       &ptu_int0_bb_a0,
+};
+
+static const u16 ptu_int0_bb_b0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_bb_b0 = {
+       0, 8, ptu_int0_bb_b0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = {
+       &ptu_int0_bb_b0,
+};
+
+static const u16 ptu_int0_k2_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg ptu_int0_k2 = {
+       0, 8, ptu_int0_k2_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184
+};
+
+static struct attn_hw_reg *ptu_int_k2_regs[1] = {
+       &ptu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ptu_prty_attn_desc[18] = {
+       "ptu_mem017_i_ecc_rf_int",
+       "ptu_mem018_i_mem_prty",
+       "ptu_mem006_i_mem_prty",
+       "ptu_mem001_i_mem_prty",
+       "ptu_mem002_i_mem_prty",
+       "ptu_mem003_i_mem_prty",
+       "ptu_mem004_i_mem_prty",
+       "ptu_mem005_i_mem_prty",
+       "ptu_mem009_i_mem_prty",
+       "ptu_mem010_i_mem_prty",
+       "ptu_mem016_i_mem_prty",
+       "ptu_mem007_i_mem_prty",
+       "ptu_mem015_i_mem_prty",
+       "ptu_mem013_i_mem_prty",
+       "ptu_mem012_i_mem_prty",
+       "ptu_mem014_i_mem_prty",
+       "ptu_mem011_i_mem_prty",
+       "ptu_mem008_i_mem_prty",
+};
+#else
+#define ptu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ptu_prty1_bb_a0_attn_idx[18] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_a0 = {
+       0, 18, ptu_prty1_bb_a0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_a0_regs[1] = {
+       &ptu_prty1_bb_a0,
+};
+
+static const u16 ptu_prty1_bb_b0_attn_idx[18] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_bb_b0 = {
+       0, 18, ptu_prty1_bb_b0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = {
+       &ptu_prty1_bb_b0,
+};
+
+static const u16 ptu_prty1_k2_attn_idx[18] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg ptu_prty1_k2 = {
+       0, 18, ptu_prty1_k2_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204
+};
+
+static struct attn_hw_reg *ptu_prty_k2_regs[1] = {
+       &ptu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_int_attn_desc[41] = {
+       "tcm_address_error",
+       "tcm_is_storm_ovfl_err",
+       "tcm_is_storm_under_err",
+       "tcm_is_tsdm_ovfl_err",
+       "tcm_is_tsdm_under_err",
+       "tcm_is_msem_ovfl_err",
+       "tcm_is_msem_under_err",
+       "tcm_is_ysem_ovfl_err",
+       "tcm_is_ysem_under_err",
+       "tcm_is_dorq_ovfl_err",
+       "tcm_is_dorq_under_err",
+       "tcm_is_pbf_ovfl_err",
+       "tcm_is_pbf_under_err",
+       "tcm_is_prs_ovfl_err",
+       "tcm_is_prs_under_err",
+       "tcm_is_tm_ovfl_err",
+       "tcm_is_tm_under_err",
+       "tcm_is_qm_p_ovfl_err",
+       "tcm_is_qm_p_under_err",
+       "tcm_is_qm_s_ovfl_err",
+       "tcm_is_qm_s_under_err",
+       "tcm_is_grc_ovfl_err0",
+       "tcm_is_grc_under_err0",
+       "tcm_is_grc_ovfl_err1",
+       "tcm_is_grc_under_err1",
+       "tcm_is_grc_ovfl_err2",
+       "tcm_is_grc_under_err2",
+       "tcm_is_grc_ovfl_err3",
+       "tcm_is_grc_under_err3",
+       "tcm_in_prcs_tbl_ovfl",
+       "tcm_agg_con_data_buf_ovfl",
+       "tcm_agg_con_cmd_buf_ovfl",
+       "tcm_sm_con_data_buf_ovfl",
+       "tcm_sm_con_cmd_buf_ovfl",
+       "tcm_agg_task_data_buf_ovfl",
+       "tcm_agg_task_cmd_buf_ovfl",
+       "tcm_sm_task_data_buf_ovfl",
+       "tcm_sm_task_cmd_buf_ovfl",
+       "tcm_fi_desc_input_violate",
+       "tcm_se_desc_input_violate",
+       "tcm_qmreg_more4",
+};
+#else
+#define tcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_int0_bb_a0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_a0 = {
+       0, 8, tcm_int0_bb_a0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+       0x1180184
+};
+
+static const u16 tcm_int1_bb_a0_attn_idx[32] = {
+       8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+       26,
+       27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_a0 = {
+       1, 32, tcm_int1_bb_a0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+       0x1180194
+};
+
+static const u16 tcm_int2_bb_a0_attn_idx[1] = {
+       40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_a0 = {
+       2, 1, tcm_int2_bb_a0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+       0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_a0_regs[3] = {
+       &tcm_int0_bb_a0, &tcm_int1_bb_a0, &tcm_int2_bb_a0,
+};
+
+static const u16 tcm_int0_bb_b0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_bb_b0 = {
+       0, 8, tcm_int0_bb_b0_attn_idx, 0x1180180, 0x118018c, 0x1180188,
+       0x1180184
+};
+
+static const u16 tcm_int1_bb_b0_attn_idx[32] = {
+       8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+       26,
+       27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_bb_b0 = {
+       1, 32, tcm_int1_bb_b0_attn_idx, 0x1180190, 0x118019c, 0x1180198,
+       0x1180194
+};
+
+static const u16 tcm_int2_bb_b0_attn_idx[1] = {
+       40,
+};
+
+static struct attn_hw_reg tcm_int2_bb_b0 = {
+       2, 1, tcm_int2_bb_b0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8,
+       0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = {
+       &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0,
+};
+
+static const u16 tcm_int0_k2_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tcm_int0_k2 = {
+       0, 8, tcm_int0_k2_attn_idx, 0x1180180, 0x118018c, 0x1180188, 0x1180184
+};
+
+static const u16 tcm_int1_k2_attn_idx[32] = {
+       8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+       26,
+       27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_int1_k2 = {
+       1, 32, tcm_int1_k2_attn_idx, 0x1180190, 0x118019c, 0x1180198, 0x1180194
+};
+
+static const u16 tcm_int2_k2_attn_idx[1] = {
+       40,
+};
+
+static struct attn_hw_reg tcm_int2_k2 = {
+       2, 1, tcm_int2_k2_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4
+};
+
+static struct attn_hw_reg *tcm_int_k2_regs[3] = {
+       &tcm_int0_k2, &tcm_int1_k2, &tcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcm_prty_attn_desc[51] = {
+       "tcm_mem026_i_ecc_rf_int",
+       "tcm_mem003_i_ecc_0_rf_int",
+       "tcm_mem003_i_ecc_1_rf_int",
+       "tcm_mem022_i_ecc_0_rf_int",
+       "tcm_mem022_i_ecc_1_rf_int",
+       "tcm_mem005_i_ecc_0_rf_int",
+       "tcm_mem005_i_ecc_1_rf_int",
+       "tcm_mem024_i_ecc_0_rf_int",
+       "tcm_mem024_i_ecc_1_rf_int",
+       "tcm_mem018_i_mem_prty",
+       "tcm_mem019_i_mem_prty",
+       "tcm_mem015_i_mem_prty",
+       "tcm_mem016_i_mem_prty",
+       "tcm_mem017_i_mem_prty",
+       "tcm_mem010_i_mem_prty",
+       "tcm_mem020_i_mem_prty",
+       "tcm_mem011_i_mem_prty",
+       "tcm_mem012_i_mem_prty",
+       "tcm_mem013_i_mem_prty",
+       "tcm_mem014_i_mem_prty",
+       "tcm_mem029_i_mem_prty",
+       "tcm_mem028_i_mem_prty",
+       "tcm_mem027_i_mem_prty",
+       "tcm_mem004_i_mem_prty",
+       "tcm_mem023_i_mem_prty",
+       "tcm_mem006_i_mem_prty",
+       "tcm_mem025_i_mem_prty",
+       "tcm_mem021_i_mem_prty",
+       "tcm_mem007_i_mem_prty_0",
+       "tcm_mem007_i_mem_prty_1",
+       "tcm_mem008_i_mem_prty",
+       "tcm_mem025_i_ecc_rf_int",
+       "tcm_mem021_i_ecc_0_rf_int",
+       "tcm_mem021_i_ecc_1_rf_int",
+       "tcm_mem023_i_ecc_0_rf_int",
+       "tcm_mem023_i_ecc_1_rf_int",
+       "tcm_mem026_i_mem_prty",
+       "tcm_mem022_i_mem_prty",
+       "tcm_mem024_i_mem_prty",
+       "tcm_mem009_i_mem_prty",
+       "tcm_mem024_i_ecc_rf_int",
+       "tcm_mem001_i_ecc_0_rf_int",
+       "tcm_mem001_i_ecc_1_rf_int",
+       "tcm_mem019_i_ecc_0_rf_int",
+       "tcm_mem019_i_ecc_1_rf_int",
+       "tcm_mem022_i_ecc_rf_int",
+       "tcm_mem002_i_mem_prty",
+       "tcm_mem005_i_mem_prty_0",
+       "tcm_mem005_i_mem_prty_1",
+       "tcm_mem001_i_mem_prty",
+       "tcm_mem007_i_mem_prty",
+};
+#else
+#define tcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcm_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 30, 32,
+       33, 36, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_a0 = {
+       0, 31, tcm_prty1_bb_a0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+       0x1180204
+};
+
+static const u16 tcm_prty2_bb_a0_attn_idx[3] = {
+       50, 21, 20,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_a0 = {
+       1, 3, tcm_prty2_bb_a0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+       0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_a0_regs[2] = {
+       &tcm_prty1_bb_a0, &tcm_prty2_bb_a0,
+};
+
+static const u16 tcm_prty1_bb_b0_attn_idx[31] = {
+       1, 2, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25,
+       28,
+       29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg tcm_prty1_bb_b0 = {
+       0, 31, tcm_prty1_bb_b0_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+       0x1180204
+};
+
+static const u16 tcm_prty2_bb_b0_attn_idx[2] = {
+       49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_bb_b0 = {
+       1, 2, tcm_prty2_bb_b0_attn_idx, 0x1180210, 0x118021c, 0x1180218,
+       0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = {
+       &tcm_prty1_bb_b0, &tcm_prty2_bb_b0,
+};
+
+static const u16 tcm_prty1_k2_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg tcm_prty1_k2 = {
+       0, 31, tcm_prty1_k2_attn_idx, 0x1180200, 0x118020c, 0x1180208,
+       0x1180204
+};
+
+static const u16 tcm_prty2_k2_attn_idx[3] = {
+       39, 49, 46,
+};
+
+static struct attn_hw_reg tcm_prty2_k2 = {
+       1, 3, tcm_prty2_k2_attn_idx, 0x1180210, 0x118021c, 0x1180218, 0x1180214
+};
+
+static struct attn_hw_reg *tcm_prty_k2_regs[2] = {
+       &tcm_prty1_k2, &tcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_int_attn_desc[41] = {
+       "mcm_address_error",
+       "mcm_is_storm_ovfl_err",
+       "mcm_is_storm_under_err",
+       "mcm_is_msdm_ovfl_err",
+       "mcm_is_msdm_under_err",
+       "mcm_is_ysdm_ovfl_err",
+       "mcm_is_ysdm_under_err",
+       "mcm_is_usdm_ovfl_err",
+       "mcm_is_usdm_under_err",
+       "mcm_is_tmld_ovfl_err",
+       "mcm_is_tmld_under_err",
+       "mcm_is_usem_ovfl_err",
+       "mcm_is_usem_under_err",
+       "mcm_is_ysem_ovfl_err",
+       "mcm_is_ysem_under_err",
+       "mcm_is_pbf_ovfl_err",
+       "mcm_is_pbf_under_err",
+       "mcm_is_qm_p_ovfl_err",
+       "mcm_is_qm_p_under_err",
+       "mcm_is_qm_s_ovfl_err",
+       "mcm_is_qm_s_under_err",
+       "mcm_is_grc_ovfl_err0",
+       "mcm_is_grc_under_err0",
+       "mcm_is_grc_ovfl_err1",
+       "mcm_is_grc_under_err1",
+       "mcm_is_grc_ovfl_err2",
+       "mcm_is_grc_under_err2",
+       "mcm_is_grc_ovfl_err3",
+       "mcm_is_grc_under_err3",
+       "mcm_in_prcs_tbl_ovfl",
+       "mcm_agg_con_data_buf_ovfl",
+       "mcm_agg_con_cmd_buf_ovfl",
+       "mcm_sm_con_data_buf_ovfl",
+       "mcm_sm_con_cmd_buf_ovfl",
+       "mcm_agg_task_data_buf_ovfl",
+       "mcm_agg_task_cmd_buf_ovfl",
+       "mcm_sm_task_data_buf_ovfl",
+       "mcm_sm_task_cmd_buf_ovfl",
+       "mcm_fi_desc_input_violate",
+       "mcm_se_desc_input_violate",
+       "mcm_qmreg_more4",
+};
+#else
+#define mcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_int0_bb_a0_attn_idx[14] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_a0 = {
+       0, 14, mcm_int0_bb_a0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+       0x1200184
+};
+
+static const u16 mcm_int1_bb_a0_attn_idx[26] = {
+       14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+       32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_a0 = {
+       1, 26, mcm_int1_bb_a0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+       0x1200194
+};
+
+static const u16 mcm_int2_bb_a0_attn_idx[1] = {
+       40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_a0 = {
+       2, 1, mcm_int2_bb_a0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+       0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_a0_regs[3] = {
+       &mcm_int0_bb_a0, &mcm_int1_bb_a0, &mcm_int2_bb_a0,
+};
+
+static const u16 mcm_int0_bb_b0_attn_idx[14] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_bb_b0 = {
+       0, 14, mcm_int0_bb_b0_attn_idx, 0x1200180, 0x120018c, 0x1200188,
+       0x1200184
+};
+
+static const u16 mcm_int1_bb_b0_attn_idx[26] = {
+       14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+       32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_bb_b0 = {
+       1, 26, mcm_int1_bb_b0_attn_idx, 0x1200190, 0x120019c, 0x1200198,
+       0x1200194
+};
+
+static const u16 mcm_int2_bb_b0_attn_idx[1] = {
+       40,
+};
+
+static struct attn_hw_reg mcm_int2_bb_b0 = {
+       2, 1, mcm_int2_bb_b0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8,
+       0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = {
+       &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0,
+};
+
+static const u16 mcm_int0_k2_attn_idx[14] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg mcm_int0_k2 = {
+       0, 14, mcm_int0_k2_attn_idx, 0x1200180, 0x120018c, 0x1200188, 0x1200184
+};
+
+static const u16 mcm_int1_k2_attn_idx[26] = {
+       14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+       32, 33, 34, 35, 36, 37, 38, 39,
+};
+
+static struct attn_hw_reg mcm_int1_k2 = {
+       1, 26, mcm_int1_k2_attn_idx, 0x1200190, 0x120019c, 0x1200198, 0x1200194
+};
+
+static const u16 mcm_int2_k2_attn_idx[1] = {
+       40,
+};
+
+static struct attn_hw_reg mcm_int2_k2 = {
+       2, 1, mcm_int2_k2_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4
+};
+
+static struct attn_hw_reg *mcm_int_k2_regs[3] = {
+       &mcm_int0_k2, &mcm_int1_k2, &mcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *mcm_prty_attn_desc[46] = {
+       "mcm_mem028_i_ecc_rf_int",
+       "mcm_mem003_i_ecc_rf_int",
+       "mcm_mem023_i_ecc_0_rf_int",
+       "mcm_mem023_i_ecc_1_rf_int",
+       "mcm_mem005_i_ecc_0_rf_int",
+       "mcm_mem005_i_ecc_1_rf_int",
+       "mcm_mem025_i_ecc_0_rf_int",
+       "mcm_mem025_i_ecc_1_rf_int",
+       "mcm_mem026_i_ecc_rf_int",
+       "mcm_mem017_i_mem_prty",
+       "mcm_mem019_i_mem_prty",
+       "mcm_mem016_i_mem_prty",
+       "mcm_mem015_i_mem_prty",
+       "mcm_mem020_i_mem_prty",
+       "mcm_mem021_i_mem_prty",
+       "mcm_mem018_i_mem_prty",
+       "mcm_mem011_i_mem_prty",
+       "mcm_mem012_i_mem_prty",
+       "mcm_mem013_i_mem_prty",
+       "mcm_mem014_i_mem_prty",
+       "mcm_mem031_i_mem_prty",
+       "mcm_mem030_i_mem_prty",
+       "mcm_mem029_i_mem_prty",
+       "mcm_mem004_i_mem_prty",
+       "mcm_mem024_i_mem_prty",
+       "mcm_mem006_i_mem_prty",
+       "mcm_mem027_i_mem_prty",
+       "mcm_mem022_i_mem_prty",
+       "mcm_mem007_i_mem_prty_0",
+       "mcm_mem007_i_mem_prty_1",
+       "mcm_mem008_i_mem_prty",
+       "mcm_mem001_i_ecc_rf_int",
+       "mcm_mem021_i_ecc_0_rf_int",
+       "mcm_mem021_i_ecc_1_rf_int",
+       "mcm_mem003_i_ecc_0_rf_int",
+       "mcm_mem003_i_ecc_1_rf_int",
+       "mcm_mem024_i_ecc_rf_int",
+       "mcm_mem009_i_mem_prty",
+       "mcm_mem010_i_mem_prty",
+       "mcm_mem028_i_mem_prty",
+       "mcm_mem002_i_mem_prty",
+       "mcm_mem025_i_mem_prty",
+       "mcm_mem005_i_mem_prty_0",
+       "mcm_mem005_i_mem_prty_1",
+       "mcm_mem001_i_mem_prty",
+       "mcm_mem007_i_mem_prty",
+};
+#else
+#define mcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 mcm_prty1_bb_a0_attn_idx[31] = {
+       2, 3, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 22, 23, 25, 26, 27, 31,
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_a0 = {
+       0, 31, mcm_prty1_bb_a0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+       0x1200204
+};
+
+static const u16 mcm_prty2_bb_a0_attn_idx[4] = {
+       45, 30, 21, 20,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_a0 = {
+       1, 4, mcm_prty2_bb_a0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+       0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_a0_regs[2] = {
+       &mcm_prty1_bb_a0, &mcm_prty2_bb_a0,
+};
+
+static const u16 mcm_prty1_bb_b0_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_bb_b0 = {
+       0, 31, mcm_prty1_bb_b0_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+       0x1200204
+};
+
+static const u16 mcm_prty2_bb_b0_attn_idx[4] = {
+       37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_bb_b0 = {
+       1, 4, mcm_prty2_bb_b0_attn_idx, 0x1200210, 0x120021c, 0x1200218,
+       0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = {
+       &mcm_prty1_bb_b0, &mcm_prty2_bb_b0,
+};
+
+static const u16 mcm_prty1_k2_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg mcm_prty1_k2 = {
+       0, 31, mcm_prty1_k2_attn_idx, 0x1200200, 0x120020c, 0x1200208,
+       0x1200204
+};
+
+static const u16 mcm_prty2_k2_attn_idx[4] = {
+       37, 38, 44, 40,
+};
+
+static struct attn_hw_reg mcm_prty2_k2 = {
+       1, 4, mcm_prty2_k2_attn_idx, 0x1200210, 0x120021c, 0x1200218, 0x1200214
+};
+
+static struct attn_hw_reg *mcm_prty_k2_regs[2] = {
+       &mcm_prty1_k2, &mcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_int_attn_desc[47] = {
+       "ucm_address_error",
+       "ucm_is_storm_ovfl_err",
+       "ucm_is_storm_under_err",
+       "ucm_is_xsdm_ovfl_err",
+       "ucm_is_xsdm_under_err",
+       "ucm_is_ysdm_ovfl_err",
+       "ucm_is_ysdm_under_err",
+       "ucm_is_usdm_ovfl_err",
+       "ucm_is_usdm_under_err",
+       "ucm_is_rdif_ovfl_err",
+       "ucm_is_rdif_under_err",
+       "ucm_is_tdif_ovfl_err",
+       "ucm_is_tdif_under_err",
+       "ucm_is_muld_ovfl_err",
+       "ucm_is_muld_under_err",
+       "ucm_is_yuld_ovfl_err",
+       "ucm_is_yuld_under_err",
+       "ucm_is_dorq_ovfl_err",
+       "ucm_is_dorq_under_err",
+       "ucm_is_pbf_ovfl_err",
+       "ucm_is_pbf_under_err",
+       "ucm_is_tm_ovfl_err",
+       "ucm_is_tm_under_err",
+       "ucm_is_qm_p_ovfl_err",
+       "ucm_is_qm_p_under_err",
+       "ucm_is_qm_s_ovfl_err",
+       "ucm_is_qm_s_under_err",
+       "ucm_is_grc_ovfl_err0",
+       "ucm_is_grc_under_err0",
+       "ucm_is_grc_ovfl_err1",
+       "ucm_is_grc_under_err1",
+       "ucm_is_grc_ovfl_err2",
+       "ucm_is_grc_under_err2",
+       "ucm_is_grc_ovfl_err3",
+       "ucm_is_grc_under_err3",
+       "ucm_in_prcs_tbl_ovfl",
+       "ucm_agg_con_data_buf_ovfl",
+       "ucm_agg_con_cmd_buf_ovfl",
+       "ucm_sm_con_data_buf_ovfl",
+       "ucm_sm_con_cmd_buf_ovfl",
+       "ucm_agg_task_data_buf_ovfl",
+       "ucm_agg_task_cmd_buf_ovfl",
+       "ucm_sm_task_data_buf_ovfl",
+       "ucm_sm_task_cmd_buf_ovfl",
+       "ucm_fi_desc_input_violate",
+       "ucm_se_desc_input_violate",
+       "ucm_qmreg_more4",
+};
+#else
+#define ucm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_int0_bb_a0_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_a0 = {
+       0, 17, ucm_int0_bb_a0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+       0x1280184
+};
+
+static const u16 ucm_int1_bb_a0_attn_idx[29] = {
+       17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+       35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_a0 = {
+       1, 29, ucm_int1_bb_a0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+       0x1280194
+};
+
+static const u16 ucm_int2_bb_a0_attn_idx[1] = {
+       46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_a0 = {
+       2, 1, ucm_int2_bb_a0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+       0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_a0_regs[3] = {
+       &ucm_int0_bb_a0, &ucm_int1_bb_a0, &ucm_int2_bb_a0,
+};
+
+static const u16 ucm_int0_bb_b0_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_bb_b0 = {
+       0, 17, ucm_int0_bb_b0_attn_idx, 0x1280180, 0x128018c, 0x1280188,
+       0x1280184
+};
+
+static const u16 ucm_int1_bb_b0_attn_idx[29] = {
+       17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+       35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_bb_b0 = {
+       1, 29, ucm_int1_bb_b0_attn_idx, 0x1280190, 0x128019c, 0x1280198,
+       0x1280194
+};
+
+static const u16 ucm_int2_bb_b0_attn_idx[1] = {
+       46,
+};
+
+static struct attn_hw_reg ucm_int2_bb_b0 = {
+       2, 1, ucm_int2_bb_b0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8,
+       0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = {
+       &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0,
+};
+
+static const u16 ucm_int0_k2_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ucm_int0_k2 = {
+       0, 17, ucm_int0_k2_attn_idx, 0x1280180, 0x128018c, 0x1280188, 0x1280184
+};
+
+static const u16 ucm_int1_k2_attn_idx[29] = {
+       17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+       35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+};
+
+static struct attn_hw_reg ucm_int1_k2 = {
+       1, 29, ucm_int1_k2_attn_idx, 0x1280190, 0x128019c, 0x1280198, 0x1280194
+};
+
+static const u16 ucm_int2_k2_attn_idx[1] = {
+       46,
+};
+
+static struct attn_hw_reg ucm_int2_k2 = {
+       2, 1, ucm_int2_k2_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4
+};
+
+static struct attn_hw_reg *ucm_int_k2_regs[3] = {
+       &ucm_int0_k2, &ucm_int1_k2, &ucm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ucm_prty_attn_desc[54] = {
+       "ucm_mem030_i_ecc_rf_int",
+       "ucm_mem005_i_ecc_0_rf_int",
+       "ucm_mem005_i_ecc_1_rf_int",
+       "ucm_mem024_i_ecc_0_rf_int",
+       "ucm_mem024_i_ecc_1_rf_int",
+       "ucm_mem025_i_ecc_rf_int",
+       "ucm_mem007_i_ecc_0_rf_int",
+       "ucm_mem007_i_ecc_1_rf_int",
+       "ucm_mem008_i_ecc_rf_int",
+       "ucm_mem027_i_ecc_0_rf_int",
+       "ucm_mem027_i_ecc_1_rf_int",
+       "ucm_mem028_i_ecc_rf_int",
+       "ucm_mem020_i_mem_prty",
+       "ucm_mem021_i_mem_prty",
+       "ucm_mem019_i_mem_prty",
+       "ucm_mem013_i_mem_prty",
+       "ucm_mem018_i_mem_prty",
+       "ucm_mem022_i_mem_prty",
+       "ucm_mem014_i_mem_prty",
+       "ucm_mem015_i_mem_prty",
+       "ucm_mem016_i_mem_prty",
+       "ucm_mem017_i_mem_prty",
+       "ucm_mem033_i_mem_prty",
+       "ucm_mem032_i_mem_prty",
+       "ucm_mem031_i_mem_prty",
+       "ucm_mem006_i_mem_prty",
+       "ucm_mem026_i_mem_prty",
+       "ucm_mem009_i_mem_prty",
+       "ucm_mem029_i_mem_prty",
+       "ucm_mem023_i_mem_prty",
+       "ucm_mem010_i_mem_prty_0",
+       "ucm_mem003_i_ecc_0_rf_int",
+       "ucm_mem003_i_ecc_1_rf_int",
+       "ucm_mem022_i_ecc_0_rf_int",
+       "ucm_mem022_i_ecc_1_rf_int",
+       "ucm_mem023_i_ecc_rf_int",
+       "ucm_mem006_i_ecc_rf_int",
+       "ucm_mem025_i_ecc_0_rf_int",
+       "ucm_mem025_i_ecc_1_rf_int",
+       "ucm_mem026_i_ecc_rf_int",
+       "ucm_mem011_i_mem_prty",
+       "ucm_mem012_i_mem_prty",
+       "ucm_mem030_i_mem_prty",
+       "ucm_mem004_i_mem_prty",
+       "ucm_mem024_i_mem_prty",
+       "ucm_mem007_i_mem_prty",
+       "ucm_mem027_i_mem_prty",
+       "ucm_mem008_i_mem_prty_0",
+       "ucm_mem010_i_mem_prty_1",
+       "ucm_mem003_i_mem_prty",
+       "ucm_mem001_i_mem_prty",
+       "ucm_mem002_i_mem_prty",
+       "ucm_mem008_i_mem_prty_1",
+       "ucm_mem010_i_mem_prty",
+};
+#else
+#define ucm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ucm_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 24, 28, 31, 32, 33, 34,
+       35,
+       36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_a0 = {
+       0, 31, ucm_prty1_bb_a0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+       0x1280204
+};
+
+static const u16 ucm_prty2_bb_a0_attn_idx[7] = {
+       50, 51, 52, 27, 53, 23, 22,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_a0 = {
+       1, 7, ucm_prty2_bb_a0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+       0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_a0_regs[2] = {
+       &ucm_prty1_bb_a0, &ucm_prty2_bb_a0,
+};
+
+static const u16 ucm_prty1_bb_b0_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_bb_b0 = {
+       0, 31, ucm_prty1_bb_b0_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+       0x1280204
+};
+
+static const u16 ucm_prty2_bb_b0_attn_idx[7] = {
+       48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_bb_b0 = {
+       1, 7, ucm_prty2_bb_b0_attn_idx, 0x1280210, 0x128021c, 0x1280218,
+       0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = {
+       &ucm_prty1_bb_b0, &ucm_prty2_bb_b0,
+};
+
+static const u16 ucm_prty1_k2_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ucm_prty1_k2 = {
+       0, 31, ucm_prty1_k2_attn_idx, 0x1280200, 0x128020c, 0x1280208,
+       0x1280204
+};
+
+static const u16 ucm_prty2_k2_attn_idx[7] = {
+       48, 40, 41, 49, 43, 50, 51,
+};
+
+static struct attn_hw_reg ucm_prty2_k2 = {
+       1, 7, ucm_prty2_k2_attn_idx, 0x1280210, 0x128021c, 0x1280218, 0x1280214
+};
+
+static struct attn_hw_reg *ucm_prty_k2_regs[2] = {
+       &ucm_prty1_k2, &ucm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_int_attn_desc[49] = {
+       "xcm_address_error",
+       "xcm_is_storm_ovfl_err",
+       "xcm_is_storm_under_err",
+       "xcm_is_msdm_ovfl_err",
+       "xcm_is_msdm_under_err",
+       "xcm_is_xsdm_ovfl_err",
+       "xcm_is_xsdm_under_err",
+       "xcm_is_ysdm_ovfl_err",
+       "xcm_is_ysdm_under_err",
+       "xcm_is_usdm_ovfl_err",
+       "xcm_is_usdm_under_err",
+       "xcm_is_msem_ovfl_err",
+       "xcm_is_msem_under_err",
+       "xcm_is_usem_ovfl_err",
+       "xcm_is_usem_under_err",
+       "xcm_is_ysem_ovfl_err",
+       "xcm_is_ysem_under_err",
+       "xcm_is_dorq_ovfl_err",
+       "xcm_is_dorq_under_err",
+       "xcm_is_pbf_ovfl_err",
+       "xcm_is_pbf_under_err",
+       "xcm_is_tm_ovfl_err",
+       "xcm_is_tm_under_err",
+       "xcm_is_qm_p_ovfl_err",
+       "xcm_is_qm_p_under_err",
+       "xcm_is_qm_s_ovfl_err",
+       "xcm_is_qm_s_under_err",
+       "xcm_is_grc_ovfl_err0",
+       "xcm_is_grc_under_err0",
+       "xcm_is_grc_ovfl_err1",
+       "xcm_is_grc_under_err1",
+       "xcm_is_grc_ovfl_err2",
+       "xcm_is_grc_under_err2",
+       "xcm_is_grc_ovfl_err3",
+       "xcm_is_grc_under_err3",
+       "xcm_in_prcs_tbl_ovfl",
+       "xcm_agg_con_data_buf_ovfl",
+       "xcm_agg_con_cmd_buf_ovfl",
+       "xcm_sm_con_data_buf_ovfl",
+       "xcm_sm_con_cmd_buf_ovfl",
+       "xcm_fi_desc_input_violate",
+       "xcm_qm_act_st_cnt_msg_prcs_under",
+       "xcm_qm_act_st_cnt_msg_prcs_ovfl",
+       "xcm_qm_act_st_cnt_ext_ld_under",
+       "xcm_qm_act_st_cnt_ext_ld_ovfl",
+       "xcm_qm_act_st_cnt_rbc_under",
+       "xcm_qm_act_st_cnt_rbc_ovfl",
+       "xcm_qm_act_st_cnt_drop_under",
+       "xcm_qm_act_st_cnt_illeg_pqnum",
+};
+#else
+#define xcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_int0_bb_a0_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_a0 = {
+       0, 16, xcm_int0_bb_a0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+       0x1000184
+};
+
+static const u16 xcm_int1_bb_a0_attn_idx[25] = {
+       16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+       34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_a0 = {
+       1, 25, xcm_int1_bb_a0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+       0x1000194
+};
+
+static const u16 xcm_int2_bb_a0_attn_idx[8] = {
+       41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_a0 = {
+       2, 8, xcm_int2_bb_a0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+       0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_a0_regs[3] = {
+       &xcm_int0_bb_a0, &xcm_int1_bb_a0, &xcm_int2_bb_a0,
+};
+
+static const u16 xcm_int0_bb_b0_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_bb_b0 = {
+       0, 16, xcm_int0_bb_b0_attn_idx, 0x1000180, 0x100018c, 0x1000188,
+       0x1000184
+};
+
+static const u16 xcm_int1_bb_b0_attn_idx[25] = {
+       16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+       34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_bb_b0 = {
+       1, 25, xcm_int1_bb_b0_attn_idx, 0x1000190, 0x100019c, 0x1000198,
+       0x1000194
+};
+
+static const u16 xcm_int2_bb_b0_attn_idx[8] = {
+       41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_bb_b0 = {
+       2, 8, xcm_int2_bb_b0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8,
+       0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = {
+       &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0,
+};
+
+static const u16 xcm_int0_k2_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg xcm_int0_k2 = {
+       0, 16, xcm_int0_k2_attn_idx, 0x1000180, 0x100018c, 0x1000188, 0x1000184
+};
+
+static const u16 xcm_int1_k2_attn_idx[25] = {
+       16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
+       34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg xcm_int1_k2 = {
+       1, 25, xcm_int1_k2_attn_idx, 0x1000190, 0x100019c, 0x1000198, 0x1000194
+};
+
+static const u16 xcm_int2_k2_attn_idx[8] = {
+       41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_int2_k2 = {
+       2, 8, xcm_int2_k2_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4
+};
+
+static struct attn_hw_reg *xcm_int_k2_regs[3] = {
+       &xcm_int0_k2, &xcm_int1_k2, &xcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xcm_prty_attn_desc[59] = {
+       "xcm_mem036_i_ecc_rf_int",
+       "xcm_mem003_i_ecc_0_rf_int",
+       "xcm_mem003_i_ecc_1_rf_int",
+       "xcm_mem003_i_ecc_2_rf_int",
+       "xcm_mem003_i_ecc_3_rf_int",
+       "xcm_mem004_i_ecc_rf_int",
+       "xcm_mem033_i_ecc_0_rf_int",
+       "xcm_mem033_i_ecc_1_rf_int",
+       "xcm_mem034_i_ecc_rf_int",
+       "xcm_mem026_i_mem_prty",
+       "xcm_mem025_i_mem_prty",
+       "xcm_mem022_i_mem_prty",
+       "xcm_mem029_i_mem_prty",
+       "xcm_mem023_i_mem_prty",
+       "xcm_mem028_i_mem_prty",
+       "xcm_mem030_i_mem_prty",
+       "xcm_mem017_i_mem_prty",
+       "xcm_mem024_i_mem_prty",
+       "xcm_mem027_i_mem_prty",
+       "xcm_mem018_i_mem_prty",
+       "xcm_mem019_i_mem_prty",
+       "xcm_mem020_i_mem_prty",
+       "xcm_mem021_i_mem_prty",
+       "xcm_mem039_i_mem_prty",
+       "xcm_mem038_i_mem_prty",
+       "xcm_mem037_i_mem_prty",
+       "xcm_mem005_i_mem_prty",
+       "xcm_mem035_i_mem_prty",
+       "xcm_mem031_i_mem_prty",
+       "xcm_mem006_i_mem_prty",
+       "xcm_mem015_i_mem_prty",
+       "xcm_mem035_i_ecc_rf_int",
+       "xcm_mem032_i_ecc_0_rf_int",
+       "xcm_mem032_i_ecc_1_rf_int",
+       "xcm_mem033_i_ecc_rf_int",
+       "xcm_mem036_i_mem_prty",
+       "xcm_mem034_i_mem_prty",
+       "xcm_mem016_i_mem_prty",
+       "xcm_mem002_i_ecc_0_rf_int",
+       "xcm_mem002_i_ecc_1_rf_int",
+       "xcm_mem002_i_ecc_2_rf_int",
+       "xcm_mem002_i_ecc_3_rf_int",
+       "xcm_mem003_i_ecc_rf_int",
+       "xcm_mem031_i_ecc_0_rf_int",
+       "xcm_mem031_i_ecc_1_rf_int",
+       "xcm_mem032_i_ecc_rf_int",
+       "xcm_mem004_i_mem_prty",
+       "xcm_mem033_i_mem_prty",
+       "xcm_mem014_i_mem_prty",
+       "xcm_mem032_i_mem_prty",
+       "xcm_mem007_i_mem_prty",
+       "xcm_mem008_i_mem_prty",
+       "xcm_mem009_i_mem_prty",
+       "xcm_mem010_i_mem_prty",
+       "xcm_mem011_i_mem_prty",
+       "xcm_mem012_i_mem_prty",
+       "xcm_mem013_i_mem_prty",
+       "xcm_mem001_i_mem_prty",
+       "xcm_mem002_i_mem_prty",
+};
+#else
+#define xcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xcm_prty1_bb_a0_attn_idx[31] = {
+       8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 25, 26, 27, 30,
+       35,
+       37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_a0 = {
+       0, 31, xcm_prty1_bb_a0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+       0x1000204
+};
+
+static const u16 xcm_prty2_bb_a0_attn_idx[11] = {
+       50, 51, 52, 53, 54, 55, 56, 57, 15, 29, 24,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_a0 = {
+       1, 11, xcm_prty2_bb_a0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+       0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_a0_regs[2] = {
+       &xcm_prty1_bb_a0, &xcm_prty2_bb_a0,
+};
+
+static const u16 xcm_prty1_bb_b0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+       24,
+       25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg xcm_prty1_bb_b0 = {
+       0, 31, xcm_prty1_bb_b0_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+       0x1000204
+};
+
+static const u16 xcm_prty2_bb_b0_attn_idx[11] = {
+       50, 51, 52, 53, 54, 55, 56, 48, 57, 58, 28,
+};
+
+static struct attn_hw_reg xcm_prty2_bb_b0 = {
+       1, 11, xcm_prty2_bb_b0_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+       0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = {
+       &xcm_prty1_bb_b0, &xcm_prty2_bb_b0,
+};
+
+static const u16 xcm_prty1_k2_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg xcm_prty1_k2 = {
+       0, 31, xcm_prty1_k2_attn_idx, 0x1000200, 0x100020c, 0x1000208,
+       0x1000204
+};
+
+static const u16 xcm_prty2_k2_attn_idx[12] = {
+       37, 49, 50, 51, 52, 53, 54, 55, 56, 48, 57, 58,
+};
+
+static struct attn_hw_reg xcm_prty2_k2 = {
+       1, 12, xcm_prty2_k2_attn_idx, 0x1000210, 0x100021c, 0x1000218,
+       0x1000214
+};
+
+static struct attn_hw_reg *xcm_prty_k2_regs[2] = {
+       &xcm_prty1_k2, &xcm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_int_attn_desc[37] = {
+       "ycm_address_error",
+       "ycm_is_storm_ovfl_err",
+       "ycm_is_storm_under_err",
+       "ycm_is_msdm_ovfl_err",
+       "ycm_is_msdm_under_err",
+       "ycm_is_ysdm_ovfl_err",
+       "ycm_is_ysdm_under_err",
+       "ycm_is_xyld_ovfl_err",
+       "ycm_is_xyld_under_err",
+       "ycm_is_msem_ovfl_err",
+       "ycm_is_msem_under_err",
+       "ycm_is_usem_ovfl_err",
+       "ycm_is_usem_under_err",
+       "ycm_is_pbf_ovfl_err",
+       "ycm_is_pbf_under_err",
+       "ycm_is_qm_p_ovfl_err",
+       "ycm_is_qm_p_under_err",
+       "ycm_is_qm_s_ovfl_err",
+       "ycm_is_qm_s_under_err",
+       "ycm_is_grc_ovfl_err0",
+       "ycm_is_grc_under_err0",
+       "ycm_is_grc_ovfl_err1",
+       "ycm_is_grc_under_err1",
+       "ycm_is_grc_ovfl_err2",
+       "ycm_is_grc_under_err2",
+       "ycm_is_grc_ovfl_err3",
+       "ycm_is_grc_under_err3",
+       "ycm_in_prcs_tbl_ovfl",
+       "ycm_sm_con_data_buf_ovfl",
+       "ycm_sm_con_cmd_buf_ovfl",
+       "ycm_agg_task_data_buf_ovfl",
+       "ycm_agg_task_cmd_buf_ovfl",
+       "ycm_sm_task_data_buf_ovfl",
+       "ycm_sm_task_cmd_buf_ovfl",
+       "ycm_fi_desc_input_violate",
+       "ycm_se_desc_input_violate",
+       "ycm_qmreg_more4",
+};
+#else
+#define ycm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_int0_bb_a0_attn_idx[13] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_a0 = {
+       0, 13, ycm_int0_bb_a0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+       0x1080184
+};
+
+static const u16 ycm_int1_bb_a0_attn_idx[23] = {
+       13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+       31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_a0 = {
+       1, 23, ycm_int1_bb_a0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+       0x1080194
+};
+
+static const u16 ycm_int2_bb_a0_attn_idx[1] = {
+       36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_a0 = {
+       2, 1, ycm_int2_bb_a0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+       0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_a0_regs[3] = {
+       &ycm_int0_bb_a0, &ycm_int1_bb_a0, &ycm_int2_bb_a0,
+};
+
+static const u16 ycm_int0_bb_b0_attn_idx[13] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_bb_b0 = {
+       0, 13, ycm_int0_bb_b0_attn_idx, 0x1080180, 0x108018c, 0x1080188,
+       0x1080184
+};
+
+static const u16 ycm_int1_bb_b0_attn_idx[23] = {
+       13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+       31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_bb_b0 = {
+       1, 23, ycm_int1_bb_b0_attn_idx, 0x1080190, 0x108019c, 0x1080198,
+       0x1080194
+};
+
+static const u16 ycm_int2_bb_b0_attn_idx[1] = {
+       36,
+};
+
+static struct attn_hw_reg ycm_int2_bb_b0 = {
+       2, 1, ycm_int2_bb_b0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8,
+       0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = {
+       &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0,
+};
+
+static const u16 ycm_int0_k2_attn_idx[13] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg ycm_int0_k2 = {
+       0, 13, ycm_int0_k2_attn_idx, 0x1080180, 0x108018c, 0x1080188, 0x1080184
+};
+
+static const u16 ycm_int1_k2_attn_idx[23] = {
+       13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+       31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg ycm_int1_k2 = {
+       1, 23, ycm_int1_k2_attn_idx, 0x1080190, 0x108019c, 0x1080198, 0x1080194
+};
+
+static const u16 ycm_int2_k2_attn_idx[1] = {
+       36,
+};
+
+static struct attn_hw_reg ycm_int2_k2 = {
+       2, 1, ycm_int2_k2_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4
+};
+
+static struct attn_hw_reg *ycm_int_k2_regs[3] = {
+       &ycm_int0_k2, &ycm_int1_k2, &ycm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ycm_prty_attn_desc[44] = {
+       "ycm_mem027_i_ecc_rf_int",
+       "ycm_mem003_i_ecc_0_rf_int",
+       "ycm_mem003_i_ecc_1_rf_int",
+       "ycm_mem022_i_ecc_0_rf_int",
+       "ycm_mem022_i_ecc_1_rf_int",
+       "ycm_mem023_i_ecc_rf_int",
+       "ycm_mem005_i_ecc_0_rf_int",
+       "ycm_mem005_i_ecc_1_rf_int",
+       "ycm_mem025_i_ecc_0_rf_int",
+       "ycm_mem025_i_ecc_1_rf_int",
+       "ycm_mem018_i_mem_prty",
+       "ycm_mem020_i_mem_prty",
+       "ycm_mem017_i_mem_prty",
+       "ycm_mem016_i_mem_prty",
+       "ycm_mem019_i_mem_prty",
+       "ycm_mem015_i_mem_prty",
+       "ycm_mem011_i_mem_prty",
+       "ycm_mem012_i_mem_prty",
+       "ycm_mem013_i_mem_prty",
+       "ycm_mem014_i_mem_prty",
+       "ycm_mem030_i_mem_prty",
+       "ycm_mem029_i_mem_prty",
+       "ycm_mem028_i_mem_prty",
+       "ycm_mem004_i_mem_prty",
+       "ycm_mem024_i_mem_prty",
+       "ycm_mem006_i_mem_prty",
+       "ycm_mem026_i_mem_prty",
+       "ycm_mem021_i_mem_prty",
+       "ycm_mem007_i_mem_prty_0",
+       "ycm_mem007_i_mem_prty_1",
+       "ycm_mem008_i_mem_prty",
+       "ycm_mem026_i_ecc_rf_int",
+       "ycm_mem021_i_ecc_0_rf_int",
+       "ycm_mem021_i_ecc_1_rf_int",
+       "ycm_mem022_i_ecc_rf_int",
+       "ycm_mem024_i_ecc_0_rf_int",
+       "ycm_mem024_i_ecc_1_rf_int",
+       "ycm_mem027_i_mem_prty",
+       "ycm_mem023_i_mem_prty",
+       "ycm_mem025_i_mem_prty",
+       "ycm_mem009_i_mem_prty",
+       "ycm_mem010_i_mem_prty",
+       "ycm_mem001_i_mem_prty",
+       "ycm_mem002_i_mem_prty",
+};
+#else
+#define ycm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ycm_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+       29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_a0 = {
+       0, 31, ycm_prty1_bb_a0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+       0x1080204
+};
+
+static const u16 ycm_prty2_bb_a0_attn_idx[3] = {
+       41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_a0 = {
+       1, 3, ycm_prty2_bb_a0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+       0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_a0_regs[2] = {
+       &ycm_prty1_bb_a0, &ycm_prty2_bb_a0,
+};
+
+static const u16 ycm_prty1_bb_b0_attn_idx[31] = {
+       1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28,
+       29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+};
+
+static struct attn_hw_reg ycm_prty1_bb_b0 = {
+       0, 31, ycm_prty1_bb_b0_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+       0x1080204
+};
+
+static const u16 ycm_prty2_bb_b0_attn_idx[3] = {
+       41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_bb_b0 = {
+       1, 3, ycm_prty2_bb_b0_attn_idx, 0x1080210, 0x108021c, 0x1080218,
+       0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = {
+       &ycm_prty1_bb_b0, &ycm_prty2_bb_b0,
+};
+
+static const u16 ycm_prty1_k2_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg ycm_prty1_k2 = {
+       0, 31, ycm_prty1_k2_attn_idx, 0x1080200, 0x108020c, 0x1080208,
+       0x1080204
+};
+
+static const u16 ycm_prty2_k2_attn_idx[4] = {
+       40, 41, 42, 43,
+};
+
+static struct attn_hw_reg ycm_prty2_k2 = {
+       1, 4, ycm_prty2_k2_attn_idx, 0x1080210, 0x108021c, 0x1080218, 0x1080214
+};
+
+static struct attn_hw_reg *ycm_prty_k2_regs[2] = {
+       &ycm_prty1_k2, &ycm_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_int_attn_desc[20] = {
+       "pcm_address_error",
+       "pcm_is_storm_ovfl_err",
+       "pcm_is_storm_under_err",
+       "pcm_is_psdm_ovfl_err",
+       "pcm_is_psdm_under_err",
+       "pcm_is_pbf_ovfl_err",
+       "pcm_is_pbf_under_err",
+       "pcm_is_grc_ovfl_err0",
+       "pcm_is_grc_under_err0",
+       "pcm_is_grc_ovfl_err1",
+       "pcm_is_grc_under_err1",
+       "pcm_is_grc_ovfl_err2",
+       "pcm_is_grc_under_err2",
+       "pcm_is_grc_ovfl_err3",
+       "pcm_is_grc_under_err3",
+       "pcm_in_prcs_tbl_ovfl",
+       "pcm_sm_con_data_buf_ovfl",
+       "pcm_sm_con_cmd_buf_ovfl",
+       "pcm_fi_desc_input_violate",
+       "pcm_qmreg_more4",
+};
+#else
+#define pcm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_int0_bb_a0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_a0 = {
+       0, 5, pcm_int0_bb_a0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+       0x1100184
+};
+
+static const u16 pcm_int1_bb_a0_attn_idx[14] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_a0 = {
+       1, 14, pcm_int1_bb_a0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+       0x1100194
+};
+
+static const u16 pcm_int2_bb_a0_attn_idx[1] = {
+       19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_a0 = {
+       2, 1, pcm_int2_bb_a0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+       0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_a0_regs[3] = {
+       &pcm_int0_bb_a0, &pcm_int1_bb_a0, &pcm_int2_bb_a0,
+};
+
+static const u16 pcm_int0_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_bb_b0 = {
+       0, 5, pcm_int0_bb_b0_attn_idx, 0x1100180, 0x110018c, 0x1100188,
+       0x1100184
+};
+
+static const u16 pcm_int1_bb_b0_attn_idx[14] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_bb_b0 = {
+       1, 14, pcm_int1_bb_b0_attn_idx, 0x1100190, 0x110019c, 0x1100198,
+       0x1100194
+};
+
+static const u16 pcm_int2_bb_b0_attn_idx[1] = {
+       19,
+};
+
+static struct attn_hw_reg pcm_int2_bb_b0 = {
+       2, 1, pcm_int2_bb_b0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8,
+       0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = {
+       &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0,
+};
+
+static const u16 pcm_int0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg pcm_int0_k2 = {
+       0, 5, pcm_int0_k2_attn_idx, 0x1100180, 0x110018c, 0x1100188, 0x1100184
+};
+
+static const u16 pcm_int1_k2_attn_idx[14] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+};
+
+static struct attn_hw_reg pcm_int1_k2 = {
+       1, 14, pcm_int1_k2_attn_idx, 0x1100190, 0x110019c, 0x1100198, 0x1100194
+};
+
+static const u16 pcm_int2_k2_attn_idx[1] = {
+       19,
+};
+
+static struct attn_hw_reg pcm_int2_k2 = {
+       2, 1, pcm_int2_k2_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4
+};
+
+static struct attn_hw_reg *pcm_int_k2_regs[3] = {
+       &pcm_int0_k2, &pcm_int1_k2, &pcm_int2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pcm_prty_attn_desc[18] = {
+       "pcm_mem012_i_ecc_rf_int",
+       "pcm_mem010_i_ecc_0_rf_int",
+       "pcm_mem010_i_ecc_1_rf_int",
+       "pcm_mem008_i_mem_prty",
+       "pcm_mem007_i_mem_prty",
+       "pcm_mem006_i_mem_prty",
+       "pcm_mem002_i_mem_prty",
+       "pcm_mem003_i_mem_prty",
+       "pcm_mem004_i_mem_prty",
+       "pcm_mem005_i_mem_prty",
+       "pcm_mem011_i_mem_prty",
+       "pcm_mem001_i_mem_prty",
+       "pcm_mem011_i_ecc_rf_int",
+       "pcm_mem009_i_ecc_0_rf_int",
+       "pcm_mem009_i_ecc_1_rf_int",
+       "pcm_mem010_i_mem_prty",
+       "pcm_mem013_i_mem_prty",
+       "pcm_mem012_i_mem_prty",
+};
+#else
+#define pcm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pcm_prty1_bb_a0_attn_idx[14] = {
+       3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_a0 = {
+       0, 14, pcm_prty1_bb_a0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+       0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_a0_regs[1] = {
+       &pcm_prty1_bb_a0,
+};
+
+static const u16 pcm_prty1_bb_b0_attn_idx[11] = {
+       4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg pcm_prty1_bb_b0 = {
+       0, 11, pcm_prty1_bb_b0_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+       0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = {
+       &pcm_prty1_bb_b0,
+};
+
+static const u16 pcm_prty1_k2_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg pcm_prty1_k2 = {
+       0, 12, pcm_prty1_k2_attn_idx, 0x1100200, 0x110020c, 0x1100208,
+       0x1100204
+};
+
+static struct attn_hw_reg *pcm_prty_k2_regs[1] = {
+       &pcm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_int_attn_desc[22] = {
+       "qm_address_error",
+       "qm_ovf_err_tx",
+       "qm_ovf_err_other",
+       "qm_pf_usg_cnt_err",
+       "qm_vf_usg_cnt_err",
+       "qm_voq_crd_inc_err",
+       "qm_voq_crd_dec_err",
+       "qm_byte_crd_inc_err",
+       "qm_byte_crd_dec_err",
+       "qm_err_incdec_rlglblcrd",
+       "qm_err_incdec_rlpfcrd",
+       "qm_err_incdec_wfqpfcrd",
+       "qm_err_incdec_wfqvpcrd",
+       "qm_err_incdec_voqlinecrd",
+       "qm_err_incdec_voqbytecrd",
+       "qm_fifos_error",
+       "qm_qm_rl_dc_exp_pf_controler_pop_error",
+       "qm_qm_rl_dc_exp_pf_controler_push_error",
+       "qm_qm_rl_dc_rf_req_controler_pop_error",
+       "qm_qm_rl_dc_rf_req_controler_push_error",
+       "qm_qm_rl_dc_rf_res_controler_pop_error",
+       "qm_qm_rl_dc_rf_res_controler_push_error",
+};
+#else
+#define qm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_int0_bb_a0_attn_idx[16] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg qm_int0_bb_a0 = {
+       0, 16, qm_int0_bb_a0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_a0_regs[1] = {
+       &qm_int0_bb_a0,
+};
+
+static const u16 qm_int0_bb_b0_attn_idx[22] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21,
+};
+
+static struct attn_hw_reg qm_int0_bb_b0 = {
+       0, 22, qm_int0_bb_b0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_bb_b0_regs[1] = {
+       &qm_int0_bb_b0,
+};
+
+static const u16 qm_int0_k2_attn_idx[22] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21,
+};
+
+static struct attn_hw_reg qm_int0_k2 = {
+       0, 22, qm_int0_k2_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184
+};
+
+static struct attn_hw_reg *qm_int_k2_regs[1] = {
+       &qm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *qm_prty_attn_desc[109] = {
+       "qm_xcm_wrc_fifo",
+       "qm_ucm_wrc_fifo",
+       "qm_tcm_wrc_fifo",
+       "qm_ccm_wrc_fifo",
+       "qm_bigramhigh",
+       "qm_bigramlow",
+       "qm_base_address",
+       "qm_wrbuff",
+       "qm_bigramhigh_ext_a",
+       "qm_bigramlow_ext_a",
+       "qm_base_address_ext_a",
+       "qm_mem006_i_ecc_0_rf_int",
+       "qm_mem006_i_ecc_1_rf_int",
+       "qm_mem005_i_ecc_0_rf_int",
+       "qm_mem005_i_ecc_1_rf_int",
+       "qm_mem012_i_ecc_rf_int",
+       "qm_mem037_i_mem_prty",
+       "qm_mem036_i_mem_prty",
+       "qm_mem039_i_mem_prty",
+       "qm_mem038_i_mem_prty",
+       "qm_mem040_i_mem_prty",
+       "qm_mem042_i_mem_prty",
+       "qm_mem041_i_mem_prty",
+       "qm_mem056_i_mem_prty",
+       "qm_mem055_i_mem_prty",
+       "qm_mem053_i_mem_prty",
+       "qm_mem054_i_mem_prty",
+       "qm_mem057_i_mem_prty",
+       "qm_mem058_i_mem_prty",
+       "qm_mem062_i_mem_prty",
+       "qm_mem061_i_mem_prty",
+       "qm_mem059_i_mem_prty",
+       "qm_mem060_i_mem_prty",
+       "qm_mem063_i_mem_prty",
+       "qm_mem064_i_mem_prty",
+       "qm_mem033_i_mem_prty",
+       "qm_mem032_i_mem_prty",
+       "qm_mem030_i_mem_prty",
+       "qm_mem031_i_mem_prty",
+       "qm_mem034_i_mem_prty",
+       "qm_mem035_i_mem_prty",
+       "qm_mem051_i_mem_prty",
+       "qm_mem042_i_ecc_0_rf_int",
+       "qm_mem042_i_ecc_1_rf_int",
+       "qm_mem041_i_ecc_0_rf_int",
+       "qm_mem041_i_ecc_1_rf_int",
+       "qm_mem048_i_ecc_rf_int",
+       "qm_mem009_i_mem_prty",
+       "qm_mem008_i_mem_prty",
+       "qm_mem011_i_mem_prty",
+       "qm_mem010_i_mem_prty",
+       "qm_mem012_i_mem_prty",
+       "qm_mem014_i_mem_prty",
+       "qm_mem013_i_mem_prty",
+       "qm_mem028_i_mem_prty",
+       "qm_mem027_i_mem_prty",
+       "qm_mem025_i_mem_prty",
+       "qm_mem026_i_mem_prty",
+       "qm_mem029_i_mem_prty",
+       "qm_mem005_i_mem_prty",
+       "qm_mem004_i_mem_prty",
+       "qm_mem002_i_mem_prty",
+       "qm_mem003_i_mem_prty",
+       "qm_mem006_i_mem_prty",
+       "qm_mem007_i_mem_prty",
+       "qm_mem023_i_mem_prty",
+       "qm_mem047_i_mem_prty",
+       "qm_mem049_i_mem_prty",
+       "qm_mem048_i_mem_prty",
+       "qm_mem052_i_mem_prty",
+       "qm_mem050_i_mem_prty",
+       "qm_mem045_i_mem_prty",
+       "qm_mem046_i_mem_prty",
+       "qm_mem043_i_mem_prty",
+       "qm_mem044_i_mem_prty",
+       "qm_mem017_i_mem_prty",
+       "qm_mem016_i_mem_prty",
+       "qm_mem021_i_mem_prty",
+       "qm_mem024_i_mem_prty",
+       "qm_mem019_i_mem_prty",
+       "qm_mem018_i_mem_prty",
+       "qm_mem015_i_mem_prty",
+       "qm_mem022_i_mem_prty",
+       "qm_mem020_i_mem_prty",
+       "qm_mem007_i_mem_prty_0",
+       "qm_mem007_i_mem_prty_1",
+       "qm_mem007_i_mem_prty_2",
+       "qm_mem001_i_mem_prty",
+       "qm_mem043_i_mem_prty_0",
+       "qm_mem043_i_mem_prty_1",
+       "qm_mem043_i_mem_prty_2",
+       "qm_mem007_i_mem_prty_3",
+       "qm_mem007_i_mem_prty_4",
+       "qm_mem007_i_mem_prty_5",
+       "qm_mem007_i_mem_prty_6",
+       "qm_mem007_i_mem_prty_7",
+       "qm_mem007_i_mem_prty_8",
+       "qm_mem007_i_mem_prty_9",
+       "qm_mem007_i_mem_prty_10",
+       "qm_mem007_i_mem_prty_11",
+       "qm_mem007_i_mem_prty_12",
+       "qm_mem007_i_mem_prty_13",
+       "qm_mem007_i_mem_prty_14",
+       "qm_mem007_i_mem_prty_15",
+       "qm_mem043_i_mem_prty_3",
+       "qm_mem043_i_mem_prty_4",
+       "qm_mem043_i_mem_prty_5",
+       "qm_mem043_i_mem_prty_6",
+       "qm_mem043_i_mem_prty_7",
+};
+#else
+#define qm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 qm_prty0_bb_a0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_a0 = {
+       0, 11, qm_prty0_bb_a0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_a0_attn_idx[31] = {
+       17, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52,
+       53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
+};
+
+static struct attn_hw_reg qm_prty1_bb_a0 = {
+       1, 31, qm_prty1_bb_a0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_a0_attn_idx[31] = {
+       66, 67, 69, 70, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 20, 18, 25,
+       27, 32, 24, 26, 41, 31, 29, 28, 30, 23, 88, 89, 90,
+};
+
+static struct attn_hw_reg qm_prty2_bb_a0 = {
+       2, 31, qm_prty2_bb_a0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_a0_attn_idx[11] = {
+       104, 105, 106, 107, 108, 33, 16, 34, 19, 72, 71,
+};
+
+static struct attn_hw_reg qm_prty3_bb_a0 = {
+       3, 11, qm_prty3_bb_a0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_a0_regs[4] = {
+       &qm_prty0_bb_a0, &qm_prty1_bb_a0, &qm_prty2_bb_a0, &qm_prty3_bb_a0,
+};
+
+static const u16 qm_prty0_bb_b0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_bb_b0 = {
+       0, 11, qm_prty0_bb_b0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_bb_b0_attn_idx[31] = {
+       11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+       29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_bb_b0 = {
+       1, 31, qm_prty1_bb_b0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_bb_b0_attn_idx[31] = {
+       66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+       79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_bb_b0 = {
+       2, 31, qm_prty2_bb_b0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_bb_b0_attn_idx[11] = {
+       91, 92, 93, 94, 95, 55, 87, 54, 61, 50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_bb_b0 = {
+       3, 11, qm_prty3_bb_b0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = {
+       &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0,
+};
+
+static const u16 qm_prty0_k2_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg qm_prty0_k2 = {
+       0, 11, qm_prty0_k2_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194
+};
+
+static const u16 qm_prty1_k2_attn_idx[31] = {
+       11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+       29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg qm_prty1_k2 = {
+       1, 31, qm_prty1_k2_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204
+};
+
+static const u16 qm_prty2_k2_attn_idx[31] = {
+       66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78,
+       79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86,
+};
+
+static struct attn_hw_reg qm_prty2_k2 = {
+       2, 31, qm_prty2_k2_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214
+};
+
+static const u16 qm_prty3_k2_attn_idx[19] = {
+       91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 55, 87, 54, 61,
+       50, 47,
+};
+
+static struct attn_hw_reg qm_prty3_k2 = {
+       3, 19, qm_prty3_k2_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224
+};
+
+static struct attn_hw_reg *qm_prty_k2_regs[4] = {
+       &qm_prty0_k2, &qm_prty1_k2, &qm_prty2_k2, &qm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_int_attn_desc[43] = {
+       "tm_address_error",
+       "tm_pxp_read_data_fifo_ov",
+       "tm_pxp_read_data_fifo_un",
+       "tm_pxp_read_ctrl_fifo_ov",
+       "tm_pxp_read_ctrl_fifo_un",
+       "tm_cfc_load_command_fifo_ov",
+       "tm_cfc_load_command_fifo_un",
+       "tm_cfc_load_echo_fifo_ov",
+       "tm_cfc_load_echo_fifo_un",
+       "tm_client_out_fifo_ov",
+       "tm_client_out_fifo_un",
+       "tm_ac_command_fifo_ov",
+       "tm_ac_command_fifo_un",
+       "tm_client_in_pbf_fifo_ov",
+       "tm_client_in_pbf_fifo_un",
+       "tm_client_in_ucm_fifo_ov",
+       "tm_client_in_ucm_fifo_un",
+       "tm_client_in_tcm_fifo_ov",
+       "tm_client_in_tcm_fifo_un",
+       "tm_client_in_xcm_fifo_ov",
+       "tm_client_in_xcm_fifo_un",
+       "tm_expiration_cmd_fifo_ov",
+       "tm_expiration_cmd_fifo_un",
+       "tm_stop_all_lc_invalid",
+       "tm_command_lc_invalid_0",
+       "tm_command_lc_invalid_1",
+       "tm_init_command_lc_valid",
+       "tm_stop_all_exp_lc_valid",
+       "tm_command_cid_invalid_0",
+       "tm_reserved_command",
+       "tm_command_cid_invalid_1",
+       "tm_cload_res_loaderr_conn",
+       "tm_cload_res_loadcancel_conn",
+       "tm_cload_res_validerr_conn",
+       "tm_context_rd_last",
+       "tm_context_wr_last",
+       "tm_pxp_rd_data_eop_bvalid",
+       "tm_pend_conn_scan",
+       "tm_pend_task_scan",
+       "tm_pxp_rd_data_eop_error",
+       "tm_cload_res_loaderr_task",
+       "tm_cload_res_loadcancel_task",
+       "tm_cload_res_validerr_task",
+};
+#else
+#define tm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_a0 = {
+       0, 32, tm_int0_bb_a0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_a0_attn_idx[11] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_a0 = {
+       1, 11, tm_int1_bb_a0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_a0_regs[2] = {
+       &tm_int0_bb_a0, &tm_int1_bb_a0,
+};
+
+static const u16 tm_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_bb_b0 = {
+       0, 32, tm_int0_bb_b0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_bb_b0_attn_idx[11] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_bb_b0 = {
+       1, 11, tm_int1_bb_b0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_bb_b0_regs[2] = {
+       &tm_int0_bb_b0, &tm_int1_bb_b0,
+};
+
+static const u16 tm_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tm_int0_k2 = {
+       0, 32, tm_int0_k2_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184
+};
+
+static const u16 tm_int1_k2_attn_idx[11] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+};
+
+static struct attn_hw_reg tm_int1_k2 = {
+       1, 11, tm_int1_k2_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194
+};
+
+static struct attn_hw_reg *tm_int_k2_regs[2] = {
+       &tm_int0_k2, &tm_int1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tm_prty_attn_desc[17] = {
+       "tm_mem012_i_ecc_0_rf_int",
+       "tm_mem012_i_ecc_1_rf_int",
+       "tm_mem003_i_ecc_rf_int",
+       "tm_mem016_i_mem_prty",
+       "tm_mem007_i_mem_prty",
+       "tm_mem010_i_mem_prty",
+       "tm_mem008_i_mem_prty",
+       "tm_mem009_i_mem_prty",
+       "tm_mem013_i_mem_prty",
+       "tm_mem015_i_mem_prty",
+       "tm_mem014_i_mem_prty",
+       "tm_mem004_i_mem_prty",
+       "tm_mem005_i_mem_prty",
+       "tm_mem006_i_mem_prty",
+       "tm_mem011_i_mem_prty",
+       "tm_mem001_i_mem_prty",
+       "tm_mem002_i_mem_prty",
+};
+#else
+#define tm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tm_prty1_bb_a0_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_a0 = {
+       0, 17, tm_prty1_bb_a0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_a0_regs[1] = {
+       &tm_prty1_bb_a0,
+};
+
+static const u16 tm_prty1_bb_b0_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_bb_b0 = {
+       0, 17, tm_prty1_bb_b0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = {
+       &tm_prty1_bb_b0,
+};
+
+static const u16 tm_prty1_k2_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg tm_prty1_k2 = {
+       0, 17, tm_prty1_k2_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204
+};
+
+static struct attn_hw_reg *tm_prty_k2_regs[1] = {
+       &tm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_int_attn_desc[9] = {
+       "dorq_address_error",
+       "dorq_db_drop",
+       "dorq_dorq_fifo_ovfl_err",
+       "dorq_dorq_fifo_afull",
+       "dorq_cfc_byp_validation_err",
+       "dorq_cfc_ld_resp_err",
+       "dorq_xcm_done_cnt_err",
+       "dorq_cfc_ld_req_fifo_ovfl_err",
+       "dorq_cfc_ld_req_fifo_under_err",
+};
+#else
+#define dorq_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_int0_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_a0 = {
+       0, 9, dorq_int0_bb_a0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_a0_regs[1] = {
+       &dorq_int0_bb_a0,
+};
+
+static const u16 dorq_int0_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_bb_b0 = {
+       0, 9, dorq_int0_bb_b0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = {
+       &dorq_int0_bb_b0,
+};
+
+static const u16 dorq_int0_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg dorq_int0_k2 = {
+       0, 9, dorq_int0_k2_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184
+};
+
+static struct attn_hw_reg *dorq_int_k2_regs[1] = {
+       &dorq_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dorq_prty_attn_desc[7] = {
+       "dorq_datapath_registers",
+       "dorq_mem002_i_ecc_rf_int",
+       "dorq_mem001_i_mem_prty",
+       "dorq_mem003_i_mem_prty",
+       "dorq_mem004_i_mem_prty",
+       "dorq_mem005_i_mem_prty",
+       "dorq_mem006_i_mem_prty",
+};
+#else
+#define dorq_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dorq_prty1_bb_a0_attn_idx[6] = {
+       1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_a0 = {
+       0, 6, dorq_prty1_bb_a0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_a0_regs[1] = {
+       &dorq_prty1_bb_a0,
+};
+
+static const u16 dorq_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dorq_prty0_bb_b0 = {
+       0, 1, dorq_prty0_bb_b0_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_bb_b0_attn_idx[6] = {
+       1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_bb_b0 = {
+       1, 6, dorq_prty1_bb_b0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = {
+       &dorq_prty0_bb_b0, &dorq_prty1_bb_b0,
+};
+
+static const u16 dorq_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dorq_prty0_k2 = {
+       0, 1, dorq_prty0_k2_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194
+};
+
+static const u16 dorq_prty1_k2_attn_idx[6] = {
+       1, 2, 3, 4, 5, 6,
+};
+
+static struct attn_hw_reg dorq_prty1_k2 = {
+       1, 6, dorq_prty1_k2_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204
+};
+
+static struct attn_hw_reg *dorq_prty_k2_regs[2] = {
+       &dorq_prty0_k2, &dorq_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_int_attn_desc[237] = {
+       "brb_address_error",
+       "brb_rc_pkt0_rls_error",
+       "brb_rc_pkt0_1st_error",
+       "brb_rc_pkt0_len_error",
+       "brb_rc_pkt0_middle_error",
+       "brb_rc_pkt0_protocol_error",
+       "brb_rc_pkt1_rls_error",
+       "brb_rc_pkt1_1st_error",
+       "brb_rc_pkt1_len_error",
+       "brb_rc_pkt1_middle_error",
+       "brb_rc_pkt1_protocol_error",
+       "brb_rc_pkt2_rls_error",
+       "brb_rc_pkt2_1st_error",
+       "brb_rc_pkt2_len_error",
+       "brb_rc_pkt2_middle_error",
+       "brb_rc_pkt2_protocol_error",
+       "brb_rc_pkt3_rls_error",
+       "brb_rc_pkt3_1st_error",
+       "brb_rc_pkt3_len_error",
+       "brb_rc_pkt3_middle_error",
+       "brb_rc_pkt3_protocol_error",
+       "brb_rc_sop_req_tc_port_error",
+       "brb_uncomplient_lossless_error",
+       "brb_wc0_protocol_error",
+       "brb_wc1_protocol_error",
+       "brb_wc2_protocol_error",
+       "brb_wc3_protocol_error",
+       "brb_ll_arb_prefetch_sop_error",
+       "brb_ll_blk_error",
+       "brb_packet_counter_error",
+       "brb_byte_counter_error",
+       "brb_mac0_fc_cnt_error",
+       "brb_mac1_fc_cnt_error",
+       "brb_ll_arb_calc_error",
+       "brb_unused_0",
+       "brb_wc0_inp_fifo_error",
+       "brb_wc0_sop_fifo_error",
+       "brb_unused_1",
+       "brb_wc0_eop_fifo_error",
+       "brb_wc0_queue_fifo_error",
+       "brb_wc0_free_point_fifo_error",
+       "brb_wc0_next_point_fifo_error",
+       "brb_wc0_strt_fifo_error",
+       "brb_wc0_second_dscr_fifo_error",
+       "brb_wc0_pkt_avail_fifo_error",
+       "brb_wc0_cos_cnt_fifo_error",
+       "brb_wc0_notify_fifo_error",
+       "brb_wc0_ll_req_fifo_error",
+       "brb_wc0_ll_pa_cnt_error",
+       "brb_wc0_bb_pa_cnt_error",
+       "brb_wc1_inp_fifo_error",
+       "brb_wc1_sop_fifo_error",
+       "brb_wc1_eop_fifo_error",
+       "brb_wc1_queue_fifo_error",
+       "brb_wc1_free_point_fifo_error",
+       "brb_wc1_next_point_fifo_error",
+       "brb_wc1_strt_fifo_error",
+       "brb_wc1_second_dscr_fifo_error",
+       "brb_wc1_pkt_avail_fifo_error",
+       "brb_wc1_cos_cnt_fifo_error",
+       "brb_wc1_notify_fifo_error",
+       "brb_wc1_ll_req_fifo_error",
+       "brb_wc1_ll_pa_cnt_error",
+       "brb_wc1_bb_pa_cnt_error",
+       "brb_wc2_inp_fifo_error",
+       "brb_wc2_sop_fifo_error",
+       "brb_wc2_eop_fifo_error",
+       "brb_wc2_queue_fifo_error",
+       "brb_wc2_free_point_fifo_error",
+       "brb_wc2_next_point_fifo_error",
+       "brb_wc2_strt_fifo_error",
+       "brb_wc2_second_dscr_fifo_error",
+       "brb_wc2_pkt_avail_fifo_error",
+       "brb_wc2_cos_cnt_fifo_error",
+       "brb_wc2_notify_fifo_error",
+       "brb_wc2_ll_req_fifo_error",
+       "brb_wc2_ll_pa_cnt_error",
+       "brb_wc2_bb_pa_cnt_error",
+       "brb_wc3_inp_fifo_error",
+       "brb_wc3_sop_fifo_error",
+       "brb_wc3_eop_fifo_error",
+       "brb_wc3_queue_fifo_error",
+       "brb_wc3_free_point_fifo_error",
+       "brb_wc3_next_point_fifo_error",
+       "brb_wc3_strt_fifo_error",
+       "brb_wc3_second_dscr_fifo_error",
+       "brb_wc3_pkt_avail_fifo_error",
+       "brb_wc3_cos_cnt_fifo_error",
+       "brb_wc3_notify_fifo_error",
+       "brb_wc3_ll_req_fifo_error",
+       "brb_wc3_ll_pa_cnt_error",
+       "brb_wc3_bb_pa_cnt_error",
+       "brb_rc_pkt0_side_fifo_error",
+       "brb_rc_pkt0_req_fifo_error",
+       "brb_rc_pkt0_blk_fifo_error",
+       "brb_rc_pkt0_rls_left_fifo_error",
+       "brb_rc_pkt0_strt_ptr_fifo_error",
+       "brb_rc_pkt0_second_ptr_fifo_error",
+       "brb_rc_pkt0_rsp_fifo_error",
+       "brb_rc_pkt0_dscr_fifo_error",
+       "brb_rc_pkt1_side_fifo_error",
+       "brb_rc_pkt1_req_fifo_error",
+       "brb_rc_pkt1_blk_fifo_error",
+       "brb_rc_pkt1_rls_left_fifo_error",
+       "brb_rc_pkt1_strt_ptr_fifo_error",
+       "brb_rc_pkt1_second_ptr_fifo_error",
+       "brb_rc_pkt1_rsp_fifo_error",
+       "brb_rc_pkt1_dscr_fifo_error",
+       "brb_rc_pkt2_side_fifo_error",
+       "brb_rc_pkt2_req_fifo_error",
+       "brb_rc_pkt2_blk_fifo_error",
+       "brb_rc_pkt2_rls_left_fifo_error",
+       "brb_rc_pkt2_strt_ptr_fifo_error",
+       "brb_rc_pkt2_second_ptr_fifo_error",
+       "brb_rc_pkt2_rsp_fifo_error",
+       "brb_rc_pkt2_dscr_fifo_error",
+       "brb_rc_pkt3_side_fifo_error",
+       "brb_rc_pkt3_req_fifo_error",
+       "brb_rc_pkt3_blk_fifo_error",
+       "brb_rc_pkt3_rls_left_fifo_error",
+       "brb_rc_pkt3_strt_ptr_fifo_error",
+       "brb_rc_pkt3_second_ptr_fifo_error",
+       "brb_rc_pkt3_rsp_fifo_error",
+       "brb_rc_pkt3_dscr_fifo_error",
+       "brb_rc_sop_strt_fifo_error",
+       "brb_rc_sop_req_fifo_error",
+       "brb_rc_sop_dscr_fifo_error",
+       "brb_rc_sop_queue_fifo_error",
+       "brb_rc0_eop_error",
+       "brb_rc1_eop_error",
+       "brb_ll_arb_rls_fifo_error",
+       "brb_ll_arb_prefetch_fifo_error",
+       "brb_rc_pkt0_rls_fifo_error",
+       "brb_rc_pkt1_rls_fifo_error",
+       "brb_rc_pkt2_rls_fifo_error",
+       "brb_rc_pkt3_rls_fifo_error",
+       "brb_rc_pkt4_rls_fifo_error",
+       "brb_rc_pkt4_rls_error",
+       "brb_rc_pkt4_1st_error",
+       "brb_rc_pkt4_len_error",
+       "brb_rc_pkt4_middle_error",
+       "brb_rc_pkt4_protocol_error",
+       "brb_rc_pkt4_side_fifo_error",
+       "brb_rc_pkt4_req_fifo_error",
+       "brb_rc_pkt4_blk_fifo_error",
+       "brb_rc_pkt4_rls_left_fifo_error",
+       "brb_rc_pkt4_strt_ptr_fifo_error",
+       "brb_rc_pkt4_second_ptr_fifo_error",
+       "brb_rc_pkt4_rsp_fifo_error",
+       "brb_rc_pkt4_dscr_fifo_error",
+       "brb_rc_pkt5_rls_error",
+       "brb_packet_available_sync_fifo_push_error",
+       "brb_wc4_protocol_error",
+       "brb_wc5_protocol_error",
+       "brb_wc6_protocol_error",
+       "brb_wc7_protocol_error",
+       "brb_wc4_inp_fifo_error",
+       "brb_wc4_sop_fifo_error",
+       "brb_wc4_queue_fifo_error",
+       "brb_wc4_free_point_fifo_error",
+       "brb_wc4_next_point_fifo_error",
+       "brb_wc4_strt_fifo_error",
+       "brb_wc4_second_dscr_fifo_error",
+       "brb_wc4_pkt_avail_fifo_error",
+       "brb_wc4_cos_cnt_fifo_error",
+       "brb_wc4_notify_fifo_error",
+       "brb_wc4_ll_req_fifo_error",
+       "brb_wc4_ll_pa_cnt_error",
+       "brb_wc4_bb_pa_cnt_error",
+       "brb_wc5_inp_fifo_error",
+       "brb_wc5_sop_fifo_error",
+       "brb_wc5_queue_fifo_error",
+       "brb_wc5_free_point_fifo_error",
+       "brb_wc5_next_point_fifo_error",
+       "brb_wc5_strt_fifo_error",
+       "brb_wc5_second_dscr_fifo_error",
+       "brb_wc5_pkt_avail_fifo_error",
+       "brb_wc5_cos_cnt_fifo_error",
+       "brb_wc5_notify_fifo_error",
+       "brb_wc5_ll_req_fifo_error",
+       "brb_wc5_ll_pa_cnt_error",
+       "brb_wc5_bb_pa_cnt_error",
+       "brb_wc6_inp_fifo_error",
+       "brb_wc6_sop_fifo_error",
+       "brb_wc6_queue_fifo_error",
+       "brb_wc6_free_point_fifo_error",
+       "brb_wc6_next_point_fifo_error",
+       "brb_wc6_strt_fifo_error",
+       "brb_wc6_second_dscr_fifo_error",
+       "brb_wc6_pkt_avail_fifo_error",
+       "brb_wc6_cos_cnt_fifo_error",
+       "brb_wc6_notify_fifo_error",
+       "brb_wc6_ll_req_fifo_error",
+       "brb_wc6_ll_pa_cnt_error",
+       "brb_wc6_bb_pa_cnt_error",
+       "brb_wc7_inp_fifo_error",
+       "brb_wc7_sop_fifo_error",
+       "brb_wc7_queue_fifo_error",
+       "brb_wc7_free_point_fifo_error",
+       "brb_wc7_next_point_fifo_error",
+       "brb_wc7_strt_fifo_error",
+       "brb_wc7_second_dscr_fifo_error",
+       "brb_wc7_pkt_avail_fifo_error",
+       "brb_wc7_cos_cnt_fifo_error",
+       "brb_wc7_notify_fifo_error",
+       "brb_wc7_ll_req_fifo_error",
+       "brb_wc7_ll_pa_cnt_error",
+       "brb_wc7_bb_pa_cnt_error",
+       "brb_wc9_queue_fifo_error",
+       "brb_rc_sop_inp_sync_fifo_push_error",
+       "brb_rc0_inp_sync_fifo_push_error",
+       "brb_rc1_inp_sync_fifo_push_error",
+       "brb_rc2_inp_sync_fifo_push_error",
+       "brb_rc3_inp_sync_fifo_push_error",
+       "brb_rc0_out_sync_fifo_push_error",
+       "brb_rc1_out_sync_fifo_push_error",
+       "brb_rc2_out_sync_fifo_push_error",
+       "brb_rc3_out_sync_fifo_push_error",
+       "brb_rc4_out_sync_fifo_push_error",
+       "brb_unused_2",
+       "brb_rc0_eop_inp_sync_fifo_push_error",
+       "brb_rc1_eop_inp_sync_fifo_push_error",
+       "brb_rc2_eop_inp_sync_fifo_push_error",
+       "brb_rc3_eop_inp_sync_fifo_push_error",
+       "brb_rc0_eop_out_sync_fifo_push_error",
+       "brb_rc1_eop_out_sync_fifo_push_error",
+       "brb_rc2_eop_out_sync_fifo_push_error",
+       "brb_rc3_eop_out_sync_fifo_push_error",
+       "brb_unused_3",
+       "brb_rc2_eop_error",
+       "brb_rc3_eop_error",
+       "brb_mac2_fc_cnt_error",
+       "brb_mac3_fc_cnt_error",
+       "brb_wc4_eop_fifo_error",
+       "brb_wc5_eop_fifo_error",
+       "brb_wc6_eop_fifo_error",
+       "brb_wc7_eop_fifo_error",
+};
+#else
+#define brb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_a0 = {
+       0, 32, brb_int0_bb_a0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_a0_attn_idx[30] = {
+       32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+       52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_a0 = {
+       1, 30, brb_int1_bb_a0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_a0_attn_idx[28] = {
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+       82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_a0 = {
+       2, 28, brb_int2_bb_a0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_a0_attn_idx[31] = {
+       92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+       108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+           122,
+};
+
+static struct attn_hw_reg brb_int3_bb_a0 = {
+       3, 31, brb_int3_bb_a0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_a0_attn_idx[27] = {
+       123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+       137,
+       138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_a0 = {
+       4, 27, brb_int4_bb_a0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_a0_attn_idx[1] = {
+       150,
+};
+
+static struct attn_hw_reg brb_int5_bb_a0 = {
+       5, 1, brb_int5_bb_a0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_a0_attn_idx[8] = {
+       151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_a0 = {
+       6, 8, brb_int6_bb_a0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_a0_attn_idx[32] = {
+       159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+       173,
+       174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+           188, 189,
+       190,
+};
+
+static struct attn_hw_reg brb_int7_bb_a0 = {
+       7, 32, brb_int7_bb_a0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_a0_attn_idx[17] = {
+       191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+       205,
+       206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_a0 = {
+       8, 17, brb_int8_bb_a0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_a0_attn_idx[1] = {
+       208,
+};
+
+static struct attn_hw_reg brb_int9_bb_a0 = {
+       9, 1, brb_int9_bb_a0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_a0_attn_idx[14] = {
+       209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_a0 = {
+       10, 14, brb_int10_bb_a0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+       0x3401b8
+};
+
+static const u16 brb_int11_bb_a0_attn_idx[8] = {
+       229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_a0 = {
+       11, 8, brb_int11_bb_a0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_a0_regs[12] = {
+       &brb_int0_bb_a0, &brb_int1_bb_a0, &brb_int2_bb_a0, &brb_int3_bb_a0,
+       &brb_int4_bb_a0, &brb_int5_bb_a0, &brb_int6_bb_a0, &brb_int7_bb_a0,
+       &brb_int8_bb_a0, &brb_int9_bb_a0,
+       &brb_int10_bb_a0, &brb_int11_bb_a0,
+};
+
+static const u16 brb_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_bb_b0 = {
+       0, 32, brb_int0_bb_b0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_bb_b0_attn_idx[30] = {
+       32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+       52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_bb_b0 = {
+       1, 30, brb_int1_bb_b0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_bb_b0_attn_idx[28] = {
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+       82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_bb_b0 = {
+       2, 28, brb_int2_bb_b0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_bb_b0_attn_idx[31] = {
+       92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+       108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+           122,
+};
+
+static struct attn_hw_reg brb_int3_bb_b0 = {
+       3, 31, brb_int3_bb_b0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_bb_b0_attn_idx[27] = {
+       123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+       137,
+       138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_bb_b0 = {
+       4, 27, brb_int4_bb_b0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_bb_b0_attn_idx[1] = {
+       150,
+};
+
+static struct attn_hw_reg brb_int5_bb_b0 = {
+       5, 1, brb_int5_bb_b0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_bb_b0_attn_idx[8] = {
+       151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_bb_b0 = {
+       6, 8, brb_int6_bb_b0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_bb_b0_attn_idx[32] = {
+       159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+       173,
+       174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+           188, 189,
+       190,
+};
+
+static struct attn_hw_reg brb_int7_bb_b0 = {
+       7, 32, brb_int7_bb_b0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_bb_b0_attn_idx[17] = {
+       191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+       205,
+       206, 207,
+};
+
+static struct attn_hw_reg brb_int8_bb_b0 = {
+       8, 17, brb_int8_bb_b0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_bb_b0_attn_idx[1] = {
+       208,
+};
+
+static struct attn_hw_reg brb_int9_bb_b0 = {
+       9, 1, brb_int9_bb_b0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_bb_b0_attn_idx[14] = {
+       209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225,
+};
+
+static struct attn_hw_reg brb_int10_bb_b0 = {
+       10, 14, brb_int10_bb_b0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc,
+       0x3401b8
+};
+
+static const u16 brb_int11_bb_b0_attn_idx[8] = {
+       229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_bb_b0 = {
+       11, 8, brb_int11_bb_b0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_bb_b0_regs[12] = {
+       &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0,
+       &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0,
+       &brb_int8_bb_b0, &brb_int9_bb_b0,
+       &brb_int10_bb_b0, &brb_int11_bb_b0,
+};
+
+static const u16 brb_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg brb_int0_k2 = {
+       0, 32, brb_int0_k2_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4
+};
+
+static const u16 brb_int1_k2_attn_idx[30] = {
+       32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+       52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+};
+
+static struct attn_hw_reg brb_int1_k2 = {
+       1, 30, brb_int1_k2_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc
+};
+
+static const u16 brb_int2_k2_attn_idx[28] = {
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+       82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
+};
+
+static struct attn_hw_reg brb_int2_k2 = {
+       2, 28, brb_int2_k2_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4
+};
+
+static const u16 brb_int3_k2_attn_idx[31] = {
+       92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+       108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
+           122,
+};
+
+static struct attn_hw_reg brb_int3_k2 = {
+       3, 31, brb_int3_k2_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c
+};
+
+static const u16 brb_int4_k2_attn_idx[27] = {
+       123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136,
+       137,
+       138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+};
+
+static struct attn_hw_reg brb_int4_k2 = {
+       4, 27, brb_int4_k2_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124
+};
+
+static const u16 brb_int5_k2_attn_idx[1] = {
+       150,
+};
+
+static struct attn_hw_reg brb_int5_k2 = {
+       5, 1, brb_int5_k2_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c
+};
+
+static const u16 brb_int6_k2_attn_idx[8] = {
+       151, 152, 153, 154, 155, 156, 157, 158,
+};
+
+static struct attn_hw_reg brb_int6_k2 = {
+       6, 8, brb_int6_k2_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154
+};
+
+static const u16 brb_int7_k2_attn_idx[32] = {
+       159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+       173,
+       174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
+           188, 189,
+       190,
+};
+
+static struct attn_hw_reg brb_int7_k2 = {
+       7, 32, brb_int7_k2_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c
+};
+
+static const u16 brb_int8_k2_attn_idx[17] = {
+       191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204,
+       205,
+       206, 207,
+};
+
+static struct attn_hw_reg brb_int8_k2 = {
+       8, 17, brb_int8_k2_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188
+};
+
+static const u16 brb_int9_k2_attn_idx[1] = {
+       208,
+};
+
+static struct attn_hw_reg brb_int9_k2 = {
+       9, 1, brb_int9_k2_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0
+};
+
+static const u16 brb_int10_k2_attn_idx[18] = {
+       209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 222, 223,
+       224,
+       225, 226, 227,
+};
+
+static struct attn_hw_reg brb_int10_k2 = {
+       10, 18, brb_int10_k2_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8
+};
+
+static const u16 brb_int11_k2_attn_idx[8] = {
+       229, 230, 231, 232, 233, 234, 235, 236,
+};
+
+static struct attn_hw_reg brb_int11_k2 = {
+       11, 8, brb_int11_k2_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0
+};
+
+static struct attn_hw_reg *brb_int_k2_regs[12] = {
+       &brb_int0_k2, &brb_int1_k2, &brb_int2_k2, &brb_int3_k2, &brb_int4_k2,
+       &brb_int5_k2, &brb_int6_k2, &brb_int7_k2, &brb_int8_k2, &brb_int9_k2,
+       &brb_int10_k2, &brb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *brb_prty_attn_desc[75] = {
+       "brb_ll_bank0_mem_prty",
+       "brb_ll_bank1_mem_prty",
+       "brb_ll_bank2_mem_prty",
+       "brb_ll_bank3_mem_prty",
+       "brb_datapath_registers",
+       "brb_mem001_i_ecc_rf_int",
+       "brb_mem008_i_ecc_rf_int",
+       "brb_mem009_i_ecc_rf_int",
+       "brb_mem010_i_ecc_rf_int",
+       "brb_mem011_i_ecc_rf_int",
+       "brb_mem012_i_ecc_rf_int",
+       "brb_mem013_i_ecc_rf_int",
+       "brb_mem014_i_ecc_rf_int",
+       "brb_mem015_i_ecc_rf_int",
+       "brb_mem016_i_ecc_rf_int",
+       "brb_mem002_i_ecc_rf_int",
+       "brb_mem003_i_ecc_rf_int",
+       "brb_mem004_i_ecc_rf_int",
+       "brb_mem005_i_ecc_rf_int",
+       "brb_mem006_i_ecc_rf_int",
+       "brb_mem007_i_ecc_rf_int",
+       "brb_mem070_i_mem_prty",
+       "brb_mem069_i_mem_prty",
+       "brb_mem053_i_mem_prty",
+       "brb_mem054_i_mem_prty",
+       "brb_mem055_i_mem_prty",
+       "brb_mem056_i_mem_prty",
+       "brb_mem057_i_mem_prty",
+       "brb_mem058_i_mem_prty",
+       "brb_mem059_i_mem_prty",
+       "brb_mem060_i_mem_prty",
+       "brb_mem061_i_mem_prty",
+       "brb_mem062_i_mem_prty",
+       "brb_mem063_i_mem_prty",
+       "brb_mem064_i_mem_prty",
+       "brb_mem065_i_mem_prty",
+       "brb_mem045_i_mem_prty",
+       "brb_mem046_i_mem_prty",
+       "brb_mem047_i_mem_prty",
+       "brb_mem048_i_mem_prty",
+       "brb_mem049_i_mem_prty",
+       "brb_mem050_i_mem_prty",
+       "brb_mem051_i_mem_prty",
+       "brb_mem052_i_mem_prty",
+       "brb_mem041_i_mem_prty",
+       "brb_mem042_i_mem_prty",
+       "brb_mem043_i_mem_prty",
+       "brb_mem044_i_mem_prty",
+       "brb_mem040_i_mem_prty",
+       "brb_mem035_i_mem_prty",
+       "brb_mem066_i_mem_prty",
+       "brb_mem067_i_mem_prty",
+       "brb_mem068_i_mem_prty",
+       "brb_mem030_i_mem_prty",
+       "brb_mem031_i_mem_prty",
+       "brb_mem032_i_mem_prty",
+       "brb_mem033_i_mem_prty",
+       "brb_mem037_i_mem_prty",
+       "brb_mem038_i_mem_prty",
+       "brb_mem034_i_mem_prty",
+       "brb_mem036_i_mem_prty",
+       "brb_mem017_i_mem_prty",
+       "brb_mem018_i_mem_prty",
+       "brb_mem019_i_mem_prty",
+       "brb_mem020_i_mem_prty",
+       "brb_mem021_i_mem_prty",
+       "brb_mem022_i_mem_prty",
+       "brb_mem023_i_mem_prty",
+       "brb_mem024_i_mem_prty",
+       "brb_mem029_i_mem_prty",
+       "brb_mem026_i_mem_prty",
+       "brb_mem027_i_mem_prty",
+       "brb_mem028_i_mem_prty",
+       "brb_mem025_i_mem_prty",
+       "brb_mem039_i_mem_prty",
+};
+#else
+#define brb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 brb_prty1_bb_a0_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+       37,
+       38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 49,
+};
+
+static struct attn_hw_reg brb_prty1_bb_a0 = {
+       0, 31, brb_prty1_bb_a0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_a0_attn_idx[19] = {
+       53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74,
+       48,
+};
+
+static struct attn_hw_reg brb_prty2_bb_a0 = {
+       1, 19, brb_prty2_bb_a0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_a0_regs[2] = {
+       &brb_prty1_bb_a0, &brb_prty2_bb_a0,
+};
+
+static const u16 brb_prty0_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_bb_b0 = {
+       0, 5, brb_prty0_bb_b0_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_bb_b0_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36,
+       37,
+       38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+};
+
+static struct attn_hw_reg brb_prty1_bb_b0 = {
+       1, 31, brb_prty1_bb_b0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_bb_b0_attn_idx[14] = {
+       53, 54, 55, 56, 59, 61, 62, 63, 64, 69, 70, 71, 72, 73,
+};
+
+static struct attn_hw_reg brb_prty2_bb_b0 = {
+       2, 14, brb_prty2_bb_b0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = {
+       &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0,
+};
+
+static const u16 brb_prty0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg brb_prty0_k2 = {
+       0, 5, brb_prty0_k2_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0
+};
+
+static const u16 brb_prty1_k2_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+       24,
+       25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg brb_prty1_k2 = {
+       1, 31, brb_prty1_k2_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404
+};
+
+static const u16 brb_prty2_k2_attn_idx[30] = {
+       50, 51, 52, 36, 37, 38, 39, 40, 41, 42, 43, 47, 53, 54, 55, 56, 57, 58,
+       59, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+};
+
+static struct attn_hw_reg brb_prty2_k2 = {
+       2, 30, brb_prty2_k2_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414
+};
+
+static struct attn_hw_reg *brb_prty_k2_regs[3] = {
+       &brb_prty0_k2, &brb_prty1_k2, &brb_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *src_int_attn_desc[1] = {
+       "src_address_error",
+};
+#else
+#define src_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 src_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg src_int0_bb_a0 = {
+       0, 1, src_int0_bb_a0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_a0_regs[1] = {
+       &src_int0_bb_a0,
+};
+
+static const u16 src_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg src_int0_bb_b0 = {
+       0, 1, src_int0_bb_b0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_bb_b0_regs[1] = {
+       &src_int0_bb_b0,
+};
+
+static const u16 src_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg src_int0_k2 = {
+       0, 1, src_int0_k2_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4
+};
+
+static struct attn_hw_reg *src_int_k2_regs[1] = {
+       &src_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_int_attn_desc[2] = {
+       "prs_address_error",
+       "prs_lcid_validation_err",
+};
+#else
+#define prs_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_int0_bb_a0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_a0 = {
+       0, 2, prs_int0_bb_a0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_a0_regs[1] = {
+       &prs_int0_bb_a0,
+};
+
+static const u16 prs_int0_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg prs_int0_bb_b0 = {
+       0, 2, prs_int0_bb_b0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_bb_b0_regs[1] = {
+       &prs_int0_bb_b0,
+};
+
+static const u16 prs_int0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg prs_int0_k2 = {
+       0, 2, prs_int0_k2_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044
+};
+
+static struct attn_hw_reg *prs_int_k2_regs[1] = {
+       &prs_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prs_prty_attn_desc[75] = {
+       "prs_cam_parity",
+       "prs_gft_cam_parity",
+       "prs_mem011_i_ecc_rf_int",
+       "prs_mem012_i_ecc_rf_int",
+       "prs_mem016_i_ecc_rf_int",
+       "prs_mem017_i_ecc_rf_int",
+       "prs_mem021_i_ecc_rf_int",
+       "prs_mem022_i_ecc_rf_int",
+       "prs_mem026_i_ecc_rf_int",
+       "prs_mem027_i_ecc_rf_int",
+       "prs_mem064_i_mem_prty",
+       "prs_mem044_i_mem_prty",
+       "prs_mem043_i_mem_prty",
+       "prs_mem037_i_mem_prty",
+       "prs_mem033_i_mem_prty",
+       "prs_mem034_i_mem_prty",
+       "prs_mem035_i_mem_prty",
+       "prs_mem036_i_mem_prty",
+       "prs_mem029_i_mem_prty",
+       "prs_mem030_i_mem_prty",
+       "prs_mem031_i_mem_prty",
+       "prs_mem032_i_mem_prty",
+       "prs_mem007_i_mem_prty",
+       "prs_mem028_i_mem_prty",
+       "prs_mem039_i_mem_prty",
+       "prs_mem040_i_mem_prty",
+       "prs_mem058_i_mem_prty",
+       "prs_mem059_i_mem_prty",
+       "prs_mem041_i_mem_prty",
+       "prs_mem042_i_mem_prty",
+       "prs_mem060_i_mem_prty",
+       "prs_mem061_i_mem_prty",
+       "prs_mem009_i_mem_prty",
+       "prs_mem009_i_ecc_rf_int",
+       "prs_mem010_i_ecc_rf_int",
+       "prs_mem014_i_ecc_rf_int",
+       "prs_mem015_i_ecc_rf_int",
+       "prs_mem026_i_mem_prty",
+       "prs_mem025_i_mem_prty",
+       "prs_mem021_i_mem_prty",
+       "prs_mem019_i_mem_prty",
+       "prs_mem020_i_mem_prty",
+       "prs_mem017_i_mem_prty",
+       "prs_mem018_i_mem_prty",
+       "prs_mem005_i_mem_prty",
+       "prs_mem016_i_mem_prty",
+       "prs_mem023_i_mem_prty",
+       "prs_mem024_i_mem_prty",
+       "prs_mem008_i_mem_prty",
+       "prs_mem012_i_mem_prty",
+       "prs_mem013_i_mem_prty",
+       "prs_mem006_i_mem_prty",
+       "prs_mem011_i_mem_prty",
+       "prs_mem003_i_mem_prty",
+       "prs_mem004_i_mem_prty",
+       "prs_mem027_i_mem_prty",
+       "prs_mem010_i_mem_prty",
+       "prs_mem014_i_mem_prty",
+       "prs_mem015_i_mem_prty",
+       "prs_mem054_i_mem_prty",
+       "prs_mem055_i_mem_prty",
+       "prs_mem056_i_mem_prty",
+       "prs_mem057_i_mem_prty",
+       "prs_mem046_i_mem_prty",
+       "prs_mem047_i_mem_prty",
+       "prs_mem048_i_mem_prty",
+       "prs_mem049_i_mem_prty",
+       "prs_mem050_i_mem_prty",
+       "prs_mem051_i_mem_prty",
+       "prs_mem052_i_mem_prty",
+       "prs_mem053_i_mem_prty",
+       "prs_mem062_i_mem_prty",
+       "prs_mem045_i_mem_prty",
+       "prs_mem002_i_mem_prty",
+       "prs_mem001_i_mem_prty",
+};
+#else
+#define prs_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prs_prty0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg prs_prty0_bb_a0 = {
+       0, 1, prs_prty0_bb_a0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_a0_attn_idx[31] = {
+       13, 14, 15, 16, 18, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+       43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+};
+
+static struct attn_hw_reg prs_prty1_bb_a0 = {
+       1, 31, prs_prty1_bb_a0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_a0_attn_idx[5] = {
+       73, 74, 20, 17, 19,
+};
+
+static struct attn_hw_reg prs_prty2_bb_a0 = {
+       2, 5, prs_prty2_bb_a0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_a0_regs[3] = {
+       &prs_prty0_bb_a0, &prs_prty1_bb_a0, &prs_prty2_bb_a0,
+};
+
+static const u16 prs_prty0_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_bb_b0 = {
+       0, 2, prs_prty0_bb_b0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_bb_b0_attn_idx[31] = {
+       13, 14, 15, 16, 18, 19, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+       42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg prs_prty1_bb_b0 = {
+       1, 31, prs_prty1_bb_b0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_bb_b0_attn_idx[5] = {
+       73, 74, 20, 17, 55,
+};
+
+static struct attn_hw_reg prs_prty2_bb_b0 = {
+       2, 5, prs_prty2_bb_b0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = {
+       &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0,
+};
+
+static const u16 prs_prty0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg prs_prty0_k2 = {
+       0, 2, prs_prty0_k2_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054
+};
+
+static const u16 prs_prty1_k2_attn_idx[31] = {
+       2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+};
+
+static struct attn_hw_reg prs_prty1_k2 = {
+       1, 31, prs_prty1_k2_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208
+};
+
+static const u16 prs_prty2_k2_attn_idx[31] = {
+       56, 57, 58, 40, 41, 47, 38, 48, 50, 43, 46, 59, 60, 61, 62, 53, 54, 44,
+       51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+};
+
+static struct attn_hw_reg prs_prty2_k2 = {
+       2, 31, prs_prty2_k2_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218
+};
+
+static struct attn_hw_reg *prs_prty_k2_regs[3] = {
+       &prs_prty0_k2, &prs_prty1_k2, &prs_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_int_attn_desc[28] = {
+       "tsdm_address_error",
+       "tsdm_inp_queue_error",
+       "tsdm_delay_fifo_error",
+       "tsdm_async_host_error",
+       "tsdm_prm_fifo_error",
+       "tsdm_ccfc_load_pend_error",
+       "tsdm_tcfc_load_pend_error",
+       "tsdm_dst_int_ram_wait_error",
+       "tsdm_dst_pas_buf_wait_error",
+       "tsdm_dst_pxp_immed_error",
+       "tsdm_dst_pxp_dst_pend_error",
+       "tsdm_dst_brb_src_pend_error",
+       "tsdm_dst_brb_src_addr_error",
+       "tsdm_rsp_brb_pend_error",
+       "tsdm_rsp_int_ram_pend_error",
+       "tsdm_rsp_brb_rd_data_error",
+       "tsdm_rsp_int_ram_rd_data_error",
+       "tsdm_rsp_pxp_rd_data_error",
+       "tsdm_cm_delay_error",
+       "tsdm_sh_delay_error",
+       "tsdm_cmpl_pend_error",
+       "tsdm_cprm_pend_error",
+       "tsdm_timer_addr_error",
+       "tsdm_timer_pend_error",
+       "tsdm_dorq_dpm_error",
+       "tsdm_dst_pxp_done_error",
+       "tsdm_xcm_rmt_buffer_error",
+       "tsdm_ycm_rmt_buffer_error",
+};
+#else
+#define tsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_int0_bb_a0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_a0 = {
+       0, 26, tsdm_int0_bb_a0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_a0_regs[1] = {
+       &tsdm_int0_bb_a0,
+};
+
+static const u16 tsdm_int0_bb_b0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg tsdm_int0_bb_b0 = {
+       0, 26, tsdm_int0_bb_b0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = {
+       &tsdm_int0_bb_b0,
+};
+
+static const u16 tsdm_int0_k2_attn_idx[28] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg tsdm_int0_k2 = {
+       0, 28, tsdm_int0_k2_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044
+};
+
+static struct attn_hw_reg *tsdm_int_k2_regs[1] = {
+       &tsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsdm_prty_attn_desc[10] = {
+       "tsdm_mem009_i_mem_prty",
+       "tsdm_mem008_i_mem_prty",
+       "tsdm_mem007_i_mem_prty",
+       "tsdm_mem006_i_mem_prty",
+       "tsdm_mem005_i_mem_prty",
+       "tsdm_mem002_i_mem_prty",
+       "tsdm_mem010_i_mem_prty",
+       "tsdm_mem001_i_mem_prty",
+       "tsdm_mem003_i_mem_prty",
+       "tsdm_mem004_i_mem_prty",
+};
+#else
+#define tsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsdm_prty1_bb_a0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_a0 = {
+       0, 10, tsdm_prty1_bb_a0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+       0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_a0_regs[1] = {
+       &tsdm_prty1_bb_a0,
+};
+
+static const u16 tsdm_prty1_bb_b0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_bb_b0 = {
+       0, 10, tsdm_prty1_bb_b0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208,
+       0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = {
+       &tsdm_prty1_bb_b0,
+};
+
+static const u16 tsdm_prty1_k2_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tsdm_prty1_k2 = {
+       0, 10, tsdm_prty1_k2_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204
+};
+
+static struct attn_hw_reg *tsdm_prty_k2_regs[1] = {
+       &tsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_int_attn_desc[28] = {
+       "msdm_address_error",
+       "msdm_inp_queue_error",
+       "msdm_delay_fifo_error",
+       "msdm_async_host_error",
+       "msdm_prm_fifo_error",
+       "msdm_ccfc_load_pend_error",
+       "msdm_tcfc_load_pend_error",
+       "msdm_dst_int_ram_wait_error",
+       "msdm_dst_pas_buf_wait_error",
+       "msdm_dst_pxp_immed_error",
+       "msdm_dst_pxp_dst_pend_error",
+       "msdm_dst_brb_src_pend_error",
+       "msdm_dst_brb_src_addr_error",
+       "msdm_rsp_brb_pend_error",
+       "msdm_rsp_int_ram_pend_error",
+       "msdm_rsp_brb_rd_data_error",
+       "msdm_rsp_int_ram_rd_data_error",
+       "msdm_rsp_pxp_rd_data_error",
+       "msdm_cm_delay_error",
+       "msdm_sh_delay_error",
+       "msdm_cmpl_pend_error",
+       "msdm_cprm_pend_error",
+       "msdm_timer_addr_error",
+       "msdm_timer_pend_error",
+       "msdm_dorq_dpm_error",
+       "msdm_dst_pxp_done_error",
+       "msdm_xcm_rmt_buffer_error",
+       "msdm_ycm_rmt_buffer_error",
+};
+#else
+#define msdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_int0_bb_a0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_a0 = {
+       0, 26, msdm_int0_bb_a0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_a0_regs[1] = {
+       &msdm_int0_bb_a0,
+};
+
+static const u16 msdm_int0_bb_b0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg msdm_int0_bb_b0 = {
+       0, 26, msdm_int0_bb_b0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = {
+       &msdm_int0_bb_b0,
+};
+
+static const u16 msdm_int0_k2_attn_idx[28] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg msdm_int0_k2 = {
+       0, 28, msdm_int0_k2_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044
+};
+
+static struct attn_hw_reg *msdm_int_k2_regs[1] = {
+       &msdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msdm_prty_attn_desc[11] = {
+       "msdm_mem009_i_mem_prty",
+       "msdm_mem008_i_mem_prty",
+       "msdm_mem007_i_mem_prty",
+       "msdm_mem006_i_mem_prty",
+       "msdm_mem005_i_mem_prty",
+       "msdm_mem002_i_mem_prty",
+       "msdm_mem011_i_mem_prty",
+       "msdm_mem001_i_mem_prty",
+       "msdm_mem003_i_mem_prty",
+       "msdm_mem004_i_mem_prty",
+       "msdm_mem010_i_mem_prty",
+};
+#else
+#define msdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msdm_prty1_bb_a0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_a0 = {
+       0, 11, msdm_prty1_bb_a0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+       0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_a0_regs[1] = {
+       &msdm_prty1_bb_a0,
+};
+
+static const u16 msdm_prty1_bb_b0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_bb_b0 = {
+       0, 11, msdm_prty1_bb_b0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208,
+       0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = {
+       &msdm_prty1_bb_b0,
+};
+
+static const u16 msdm_prty1_k2_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg msdm_prty1_k2 = {
+       0, 11, msdm_prty1_k2_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204
+};
+
+static struct attn_hw_reg *msdm_prty_k2_regs[1] = {
+       &msdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_int_attn_desc[28] = {
+       "usdm_address_error",
+       "usdm_inp_queue_error",
+       "usdm_delay_fifo_error",
+       "usdm_async_host_error",
+       "usdm_prm_fifo_error",
+       "usdm_ccfc_load_pend_error",
+       "usdm_tcfc_load_pend_error",
+       "usdm_dst_int_ram_wait_error",
+       "usdm_dst_pas_buf_wait_error",
+       "usdm_dst_pxp_immed_error",
+       "usdm_dst_pxp_dst_pend_error",
+       "usdm_dst_brb_src_pend_error",
+       "usdm_dst_brb_src_addr_error",
+       "usdm_rsp_brb_pend_error",
+       "usdm_rsp_int_ram_pend_error",
+       "usdm_rsp_brb_rd_data_error",
+       "usdm_rsp_int_ram_rd_data_error",
+       "usdm_rsp_pxp_rd_data_error",
+       "usdm_cm_delay_error",
+       "usdm_sh_delay_error",
+       "usdm_cmpl_pend_error",
+       "usdm_cprm_pend_error",
+       "usdm_timer_addr_error",
+       "usdm_timer_pend_error",
+       "usdm_dorq_dpm_error",
+       "usdm_dst_pxp_done_error",
+       "usdm_xcm_rmt_buffer_error",
+       "usdm_ycm_rmt_buffer_error",
+};
+#else
+#define usdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_int0_bb_a0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_a0 = {
+       0, 26, usdm_int0_bb_a0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_a0_regs[1] = {
+       &usdm_int0_bb_a0,
+};
+
+static const u16 usdm_int0_bb_b0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg usdm_int0_bb_b0 = {
+       0, 26, usdm_int0_bb_b0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = {
+       &usdm_int0_bb_b0,
+};
+
+static const u16 usdm_int0_k2_attn_idx[28] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg usdm_int0_k2 = {
+       0, 28, usdm_int0_k2_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044
+};
+
+static struct attn_hw_reg *usdm_int_k2_regs[1] = {
+       &usdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usdm_prty_attn_desc[10] = {
+       "usdm_mem008_i_mem_prty",
+       "usdm_mem007_i_mem_prty",
+       "usdm_mem006_i_mem_prty",
+       "usdm_mem005_i_mem_prty",
+       "usdm_mem002_i_mem_prty",
+       "usdm_mem010_i_mem_prty",
+       "usdm_mem001_i_mem_prty",
+       "usdm_mem003_i_mem_prty",
+       "usdm_mem004_i_mem_prty",
+       "usdm_mem009_i_mem_prty",
+};
+#else
+#define usdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usdm_prty1_bb_a0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_a0 = {
+       0, 10, usdm_prty1_bb_a0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+       0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_a0_regs[1] = {
+       &usdm_prty1_bb_a0,
+};
+
+static const u16 usdm_prty1_bb_b0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_bb_b0 = {
+       0, 10, usdm_prty1_bb_b0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208,
+       0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = {
+       &usdm_prty1_bb_b0,
+};
+
+static const u16 usdm_prty1_k2_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg usdm_prty1_k2 = {
+       0, 10, usdm_prty1_k2_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204
+};
+
+static struct attn_hw_reg *usdm_prty_k2_regs[1] = {
+       &usdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_int_attn_desc[28] = {
+       "xsdm_address_error",
+       "xsdm_inp_queue_error",
+       "xsdm_delay_fifo_error",
+       "xsdm_async_host_error",
+       "xsdm_prm_fifo_error",
+       "xsdm_ccfc_load_pend_error",
+       "xsdm_tcfc_load_pend_error",
+       "xsdm_dst_int_ram_wait_error",
+       "xsdm_dst_pas_buf_wait_error",
+       "xsdm_dst_pxp_immed_error",
+       "xsdm_dst_pxp_dst_pend_error",
+       "xsdm_dst_brb_src_pend_error",
+       "xsdm_dst_brb_src_addr_error",
+       "xsdm_rsp_brb_pend_error",
+       "xsdm_rsp_int_ram_pend_error",
+       "xsdm_rsp_brb_rd_data_error",
+       "xsdm_rsp_int_ram_rd_data_error",
+       "xsdm_rsp_pxp_rd_data_error",
+       "xsdm_cm_delay_error",
+       "xsdm_sh_delay_error",
+       "xsdm_cmpl_pend_error",
+       "xsdm_cprm_pend_error",
+       "xsdm_timer_addr_error",
+       "xsdm_timer_pend_error",
+       "xsdm_dorq_dpm_error",
+       "xsdm_dst_pxp_done_error",
+       "xsdm_xcm_rmt_buffer_error",
+       "xsdm_ycm_rmt_buffer_error",
+};
+#else
+#define xsdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_int0_bb_a0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_a0 = {
+       0, 26, xsdm_int0_bb_a0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_a0_regs[1] = {
+       &xsdm_int0_bb_a0,
+};
+
+static const u16 xsdm_int0_bb_b0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg xsdm_int0_bb_b0 = {
+       0, 26, xsdm_int0_bb_b0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = {
+       &xsdm_int0_bb_b0,
+};
+
+static const u16 xsdm_int0_k2_attn_idx[28] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg xsdm_int0_k2 = {
+       0, 28, xsdm_int0_k2_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044
+};
+
+static struct attn_hw_reg *xsdm_int_k2_regs[1] = {
+       &xsdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsdm_prty_attn_desc[10] = {
+       "xsdm_mem009_i_mem_prty",
+       "xsdm_mem008_i_mem_prty",
+       "xsdm_mem007_i_mem_prty",
+       "xsdm_mem006_i_mem_prty",
+       "xsdm_mem003_i_mem_prty",
+       "xsdm_mem010_i_mem_prty",
+       "xsdm_mem002_i_mem_prty",
+       "xsdm_mem004_i_mem_prty",
+       "xsdm_mem005_i_mem_prty",
+       "xsdm_mem001_i_mem_prty",
+};
+#else
+#define xsdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsdm_prty1_bb_a0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_a0 = {
+       0, 10, xsdm_prty1_bb_a0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+       0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_a0_regs[1] = {
+       &xsdm_prty1_bb_a0,
+};
+
+static const u16 xsdm_prty1_bb_b0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_bb_b0 = {
+       0, 10, xsdm_prty1_bb_b0_attn_idx, 0xf80200, 0xf8020c, 0xf80208,
+       0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = {
+       &xsdm_prty1_bb_b0,
+};
+
+static const u16 xsdm_prty1_k2_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsdm_prty1_k2 = {
+       0, 10, xsdm_prty1_k2_attn_idx, 0xf80200, 0xf8020c, 0xf80208, 0xf80204
+};
+
+static struct attn_hw_reg *xsdm_prty_k2_regs[1] = {
+       &xsdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_int_attn_desc[28] = {
+       "ysdm_address_error",
+       "ysdm_inp_queue_error",
+       "ysdm_delay_fifo_error",
+       "ysdm_async_host_error",
+       "ysdm_prm_fifo_error",
+       "ysdm_ccfc_load_pend_error",
+       "ysdm_tcfc_load_pend_error",
+       "ysdm_dst_int_ram_wait_error",
+       "ysdm_dst_pas_buf_wait_error",
+       "ysdm_dst_pxp_immed_error",
+       "ysdm_dst_pxp_dst_pend_error",
+       "ysdm_dst_brb_src_pend_error",
+       "ysdm_dst_brb_src_addr_error",
+       "ysdm_rsp_brb_pend_error",
+       "ysdm_rsp_int_ram_pend_error",
+       "ysdm_rsp_brb_rd_data_error",
+       "ysdm_rsp_int_ram_rd_data_error",
+       "ysdm_rsp_pxp_rd_data_error",
+       "ysdm_cm_delay_error",
+       "ysdm_sh_delay_error",
+       "ysdm_cmpl_pend_error",
+       "ysdm_cprm_pend_error",
+       "ysdm_timer_addr_error",
+       "ysdm_timer_pend_error",
+       "ysdm_dorq_dpm_error",
+       "ysdm_dst_pxp_done_error",
+       "ysdm_xcm_rmt_buffer_error",
+       "ysdm_ycm_rmt_buffer_error",
+};
+#else
+#define ysdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_int0_bb_a0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_a0 = {
+       0, 26, ysdm_int0_bb_a0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_a0_regs[1] = {
+       &ysdm_int0_bb_a0,
+};
+
+static const u16 ysdm_int0_bb_b0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg ysdm_int0_bb_b0 = {
+       0, 26, ysdm_int0_bb_b0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = {
+       &ysdm_int0_bb_b0,
+};
+
+static const u16 ysdm_int0_k2_attn_idx[28] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg ysdm_int0_k2 = {
+       0, 28, ysdm_int0_k2_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044
+};
+
+static struct attn_hw_reg *ysdm_int_k2_regs[1] = {
+       &ysdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysdm_prty_attn_desc[9] = {
+       "ysdm_mem008_i_mem_prty",
+       "ysdm_mem007_i_mem_prty",
+       "ysdm_mem006_i_mem_prty",
+       "ysdm_mem005_i_mem_prty",
+       "ysdm_mem002_i_mem_prty",
+       "ysdm_mem009_i_mem_prty",
+       "ysdm_mem001_i_mem_prty",
+       "ysdm_mem003_i_mem_prty",
+       "ysdm_mem004_i_mem_prty",
+};
+#else
+#define ysdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysdm_prty1_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_a0 = {
+       0, 9, ysdm_prty1_bb_a0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_a0_regs[1] = {
+       &ysdm_prty1_bb_a0,
+};
+
+static const u16 ysdm_prty1_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_bb_b0 = {
+       0, 9, ysdm_prty1_bb_b0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = {
+       &ysdm_prty1_bb_b0,
+};
+
+static const u16 ysdm_prty1_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg ysdm_prty1_k2 = {
+       0, 9, ysdm_prty1_k2_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204
+};
+
+static struct attn_hw_reg *ysdm_prty_k2_regs[1] = {
+       &ysdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_int_attn_desc[28] = {
+       "psdm_address_error",
+       "psdm_inp_queue_error",
+       "psdm_delay_fifo_error",
+       "psdm_async_host_error",
+       "psdm_prm_fifo_error",
+       "psdm_ccfc_load_pend_error",
+       "psdm_tcfc_load_pend_error",
+       "psdm_dst_int_ram_wait_error",
+       "psdm_dst_pas_buf_wait_error",
+       "psdm_dst_pxp_immed_error",
+       "psdm_dst_pxp_dst_pend_error",
+       "psdm_dst_brb_src_pend_error",
+       "psdm_dst_brb_src_addr_error",
+       "psdm_rsp_brb_pend_error",
+       "psdm_rsp_int_ram_pend_error",
+       "psdm_rsp_brb_rd_data_error",
+       "psdm_rsp_int_ram_rd_data_error",
+       "psdm_rsp_pxp_rd_data_error",
+       "psdm_cm_delay_error",
+       "psdm_sh_delay_error",
+       "psdm_cmpl_pend_error",
+       "psdm_cprm_pend_error",
+       "psdm_timer_addr_error",
+       "psdm_timer_pend_error",
+       "psdm_dorq_dpm_error",
+       "psdm_dst_pxp_done_error",
+       "psdm_xcm_rmt_buffer_error",
+       "psdm_ycm_rmt_buffer_error",
+};
+#else
+#define psdm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_int0_bb_a0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_a0 = {
+       0, 26, psdm_int0_bb_a0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_a0_regs[1] = {
+       &psdm_int0_bb_a0,
+};
+
+static const u16 psdm_int0_bb_b0_attn_idx[26] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25,
+};
+
+static struct attn_hw_reg psdm_int0_bb_b0 = {
+       0, 26, psdm_int0_bb_b0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = {
+       &psdm_int0_bb_b0,
+};
+
+static const u16 psdm_int0_k2_attn_idx[28] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27,
+};
+
+static struct attn_hw_reg psdm_int0_k2 = {
+       0, 28, psdm_int0_k2_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044
+};
+
+static struct attn_hw_reg *psdm_int_k2_regs[1] = {
+       &psdm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psdm_prty_attn_desc[9] = {
+       "psdm_mem008_i_mem_prty",
+       "psdm_mem007_i_mem_prty",
+       "psdm_mem006_i_mem_prty",
+       "psdm_mem005_i_mem_prty",
+       "psdm_mem002_i_mem_prty",
+       "psdm_mem009_i_mem_prty",
+       "psdm_mem001_i_mem_prty",
+       "psdm_mem003_i_mem_prty",
+       "psdm_mem004_i_mem_prty",
+};
+#else
+#define psdm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psdm_prty1_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_a0 = {
+       0, 9, psdm_prty1_bb_a0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_a0_regs[1] = {
+       &psdm_prty1_bb_a0,
+};
+
+static const u16 psdm_prty1_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_bb_b0 = {
+       0, 9, psdm_prty1_bb_b0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = {
+       &psdm_prty1_bb_b0,
+};
+
+static const u16 psdm_prty1_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psdm_prty1_k2 = {
+       0, 9, psdm_prty1_k2_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204
+};
+
+static struct attn_hw_reg *psdm_prty_k2_regs[1] = {
+       &psdm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_int_attn_desc[46] = {
+       "tsem_address_error",
+       "tsem_fic_last_error",
+       "tsem_fic_length_error",
+       "tsem_fic_fifo_error",
+       "tsem_pas_buf_fifo_error",
+       "tsem_sync_fin_pop_error",
+       "tsem_sync_dra_wr_push_error",
+       "tsem_sync_dra_wr_pop_error",
+       "tsem_sync_dra_rd_push_error",
+       "tsem_sync_dra_rd_pop_error",
+       "tsem_sync_fin_push_error",
+       "tsem_sem_fast_address_error",
+       "tsem_cam_lsb_inp_fifo",
+       "tsem_cam_msb_inp_fifo",
+       "tsem_cam_out_fifo",
+       "tsem_fin_fifo",
+       "tsem_thread_fifo_error",
+       "tsem_thread_overrun",
+       "tsem_sync_ext_store_push_error",
+       "tsem_sync_ext_store_pop_error",
+       "tsem_sync_ext_load_push_error",
+       "tsem_sync_ext_load_pop_error",
+       "tsem_sync_ram_rd_push_error",
+       "tsem_sync_ram_rd_pop_error",
+       "tsem_sync_ram_wr_pop_error",
+       "tsem_sync_ram_wr_push_error",
+       "tsem_sync_dbg_push_error",
+       "tsem_sync_dbg_pop_error",
+       "tsem_dbg_fifo_error",
+       "tsem_cam_msb2_inp_fifo",
+       "tsem_vfc_interrupt",
+       "tsem_vfc_out_fifo_error",
+       "tsem_storm_stack_uf_attn",
+       "tsem_storm_stack_of_attn",
+       "tsem_storm_runtime_error",
+       "tsem_ext_load_pend_wr_error",
+       "tsem_thread_rls_orun_error",
+       "tsem_thread_rls_aloc_error",
+       "tsem_thread_rls_vld_error",
+       "tsem_ext_thread_oor_error",
+       "tsem_ord_id_fifo_error",
+       "tsem_invld_foc_error",
+       "tsem_ext_ld_len_error",
+       "tsem_thrd_ord_fifo_error",
+       "tsem_invld_thrd_ord_error",
+       "tsem_fast_memory_address_error",
+};
+#else
+#define tsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_a0 = {
+       0, 32, tsem_int0_bb_a0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+       0x1700044
+};
+
+static const u16 tsem_int1_bb_a0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_a0 = {
+       1, 13, tsem_int1_bb_a0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+       0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_a0 = {
+       2, 1, tsem_fast_memory_int0_bb_a0_attn_idx, 0x1740040, 0x174004c,
+       0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_a0_regs[3] = {
+       &tsem_int0_bb_a0, &tsem_int1_bb_a0, &tsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 tsem_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_bb_b0 = {
+       0, 32, tsem_int0_bb_b0_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+       0x1700044
+};
+
+static const u16 tsem_int1_bb_b0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_bb_b0 = {
+       1, 13, tsem_int1_bb_b0_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+       0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = {
+       2, 1, tsem_fast_memory_int0_bb_b0_attn_idx, 0x1740040, 0x174004c,
+       0x1740048, 0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = {
+       &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 tsem_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg tsem_int0_k2 = {
+       0, 32, tsem_int0_k2_attn_idx, 0x1700040, 0x170004c, 0x1700048,
+       0x1700044
+};
+
+static const u16 tsem_int1_k2_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg tsem_int1_k2 = {
+       1, 13, tsem_int1_k2_attn_idx, 0x1700050, 0x170005c, 0x1700058,
+       0x1700054
+};
+
+static const u16 tsem_fast_memory_int0_k2_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg tsem_fast_memory_int0_k2 = {
+       2, 1, tsem_fast_memory_int0_k2_attn_idx, 0x1740040, 0x174004c,
+       0x1740048,
+       0x1740044
+};
+
+static struct attn_hw_reg *tsem_int_k2_regs[3] = {
+       &tsem_int0_k2, &tsem_int1_k2, &tsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tsem_prty_attn_desc[23] = {
+       "tsem_vfc_rbc_parity_error",
+       "tsem_storm_rf_parity_error",
+       "tsem_reg_gen_parity_error",
+       "tsem_mem005_i_ecc_0_rf_int",
+       "tsem_mem005_i_ecc_1_rf_int",
+       "tsem_mem004_i_mem_prty",
+       "tsem_mem002_i_mem_prty",
+       "tsem_mem003_i_mem_prty",
+       "tsem_mem001_i_mem_prty",
+       "tsem_fast_memory_mem024_i_mem_prty",
+       "tsem_fast_memory_mem023_i_mem_prty",
+       "tsem_fast_memory_mem022_i_mem_prty",
+       "tsem_fast_memory_mem021_i_mem_prty",
+       "tsem_fast_memory_mem020_i_mem_prty",
+       "tsem_fast_memory_mem019_i_mem_prty",
+       "tsem_fast_memory_mem018_i_mem_prty",
+       "tsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+       "tsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+       "tsem_fast_memory_vfc_config_mem006_i_mem_prty",
+       "tsem_fast_memory_vfc_config_mem001_i_mem_prty",
+       "tsem_fast_memory_vfc_config_mem004_i_mem_prty",
+       "tsem_fast_memory_vfc_config_mem003_i_mem_prty",
+       "tsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define tsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tsem_prty0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_a0 = {
+       0, 3, tsem_prty0_bb_a0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+       0x17000cc
+};
+
+static const u16 tsem_prty1_bb_a0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_a0 = {
+       1, 6, tsem_prty1_bb_a0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+       0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+       16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_a0 = {
+       2, 6, tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x174a200,
+       0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_a0_regs[3] = {
+       &tsem_prty0_bb_a0, &tsem_prty1_bb_a0,
+       &tsem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 tsem_prty0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_bb_b0 = {
+       0, 3, tsem_prty0_bb_b0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+       0x17000cc
+};
+
+static const u16 tsem_prty1_bb_b0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_bb_b0 = {
+       1, 6, tsem_prty1_bb_b0_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+       0x1700204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+       16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = {
+       2, 6, tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x174a200,
+       0x174a20c, 0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = {
+       &tsem_prty0_bb_b0, &tsem_prty1_bb_b0,
+       &tsem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 tsem_prty0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg tsem_prty0_k2 = {
+       0, 3, tsem_prty0_k2_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0,
+       0x17000cc
+};
+
+static const u16 tsem_prty1_k2_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tsem_prty1_k2 = {
+       1, 6, tsem_prty1_k2_attn_idx, 0x1700200, 0x170020c, 0x1700208,
+       0x1700204
+};
+
+static const u16 tsem_fast_memory_prty1_k2_attn_idx[7] = {
+       9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg tsem_fast_memory_prty1_k2 = {
+       2, 7, tsem_fast_memory_prty1_k2_attn_idx, 0x1740200, 0x174020c,
+       0x1740208,
+       0x1740204
+};
+
+static const u16 tsem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+       16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_k2 = {
+       3, 6, tsem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x174a200,
+       0x174a20c,
+       0x174a208, 0x174a204
+};
+
+static struct attn_hw_reg *tsem_prty_k2_regs[4] = {
+       &tsem_prty0_k2, &tsem_prty1_k2, &tsem_fast_memory_prty1_k2,
+       &tsem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_int_attn_desc[46] = {
+       "msem_address_error",
+       "msem_fic_last_error",
+       "msem_fic_length_error",
+       "msem_fic_fifo_error",
+       "msem_pas_buf_fifo_error",
+       "msem_sync_fin_pop_error",
+       "msem_sync_dra_wr_push_error",
+       "msem_sync_dra_wr_pop_error",
+       "msem_sync_dra_rd_push_error",
+       "msem_sync_dra_rd_pop_error",
+       "msem_sync_fin_push_error",
+       "msem_sem_fast_address_error",
+       "msem_cam_lsb_inp_fifo",
+       "msem_cam_msb_inp_fifo",
+       "msem_cam_out_fifo",
+       "msem_fin_fifo",
+       "msem_thread_fifo_error",
+       "msem_thread_overrun",
+       "msem_sync_ext_store_push_error",
+       "msem_sync_ext_store_pop_error",
+       "msem_sync_ext_load_push_error",
+       "msem_sync_ext_load_pop_error",
+       "msem_sync_ram_rd_push_error",
+       "msem_sync_ram_rd_pop_error",
+       "msem_sync_ram_wr_pop_error",
+       "msem_sync_ram_wr_push_error",
+       "msem_sync_dbg_push_error",
+       "msem_sync_dbg_pop_error",
+       "msem_dbg_fifo_error",
+       "msem_cam_msb2_inp_fifo",
+       "msem_vfc_interrupt",
+       "msem_vfc_out_fifo_error",
+       "msem_storm_stack_uf_attn",
+       "msem_storm_stack_of_attn",
+       "msem_storm_runtime_error",
+       "msem_ext_load_pend_wr_error",
+       "msem_thread_rls_orun_error",
+       "msem_thread_rls_aloc_error",
+       "msem_thread_rls_vld_error",
+       "msem_ext_thread_oor_error",
+       "msem_ord_id_fifo_error",
+       "msem_invld_foc_error",
+       "msem_ext_ld_len_error",
+       "msem_thrd_ord_fifo_error",
+       "msem_invld_thrd_ord_error",
+       "msem_fast_memory_address_error",
+};
+#else
+#define msem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_a0 = {
+       0, 32, msem_int0_bb_a0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+       0x1800044
+};
+
+static const u16 msem_int1_bb_a0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_a0 = {
+       1, 13, msem_int1_bb_a0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+       0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_a0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_a0 = {
+       2, 1, msem_fast_memory_int0_bb_a0_attn_idx, 0x1840040, 0x184004c,
+       0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_a0_regs[3] = {
+       &msem_int0_bb_a0, &msem_int1_bb_a0, &msem_fast_memory_int0_bb_a0,
+};
+
+static const u16 msem_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_bb_b0 = {
+       0, 32, msem_int0_bb_b0_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+       0x1800044
+};
+
+static const u16 msem_int1_bb_b0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_bb_b0 = {
+       1, 13, msem_int1_bb_b0_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+       0x1800054
+};
+
+static const u16 msem_fast_memory_int0_bb_b0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = {
+       2, 1, msem_fast_memory_int0_bb_b0_attn_idx, 0x1840040, 0x184004c,
+       0x1840048, 0x1840044
+};
+
+static struct attn_hw_reg *msem_int_bb_b0_regs[3] = {
+       &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0,
+};
+
+static const u16 msem_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg msem_int0_k2 = {
+       0, 32, msem_int0_k2_attn_idx, 0x1800040, 0x180004c, 0x1800048,
+       0x1800044
+};
+
+static const u16 msem_int1_k2_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg msem_int1_k2 = {
+       1, 13, msem_int1_k2_attn_idx, 0x1800050, 0x180005c, 0x1800058,
+       0x1800054
+};
+
+static const u16 msem_fast_memory_int0_k2_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg msem_fast_memory_int0_k2 = {
+       2, 1, msem_fast_memory_int0_k2_attn_idx, 0x1840040, 0x184004c,
+       0x1840048,
+       0x1840044
+};
+
+static struct attn_hw_reg *msem_int_k2_regs[3] = {
+       &msem_int0_k2, &msem_int1_k2, &msem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *msem_prty_attn_desc[23] = {
+       "msem_vfc_rbc_parity_error",
+       "msem_storm_rf_parity_error",
+       "msem_reg_gen_parity_error",
+       "msem_mem005_i_ecc_0_rf_int",
+       "msem_mem005_i_ecc_1_rf_int",
+       "msem_mem004_i_mem_prty",
+       "msem_mem002_i_mem_prty",
+       "msem_mem003_i_mem_prty",
+       "msem_mem001_i_mem_prty",
+       "msem_fast_memory_mem024_i_mem_prty",
+       "msem_fast_memory_mem023_i_mem_prty",
+       "msem_fast_memory_mem022_i_mem_prty",
+       "msem_fast_memory_mem021_i_mem_prty",
+       "msem_fast_memory_mem020_i_mem_prty",
+       "msem_fast_memory_mem019_i_mem_prty",
+       "msem_fast_memory_mem018_i_mem_prty",
+       "msem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+       "msem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+       "msem_fast_memory_vfc_config_mem006_i_mem_prty",
+       "msem_fast_memory_vfc_config_mem001_i_mem_prty",
+       "msem_fast_memory_vfc_config_mem004_i_mem_prty",
+       "msem_fast_memory_vfc_config_mem003_i_mem_prty",
+       "msem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define msem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 msem_prty0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_a0 = {
+       0, 3, msem_prty0_bb_a0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+       0x18000cc
+};
+
+static const u16 msem_prty1_bb_a0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_a0 = {
+       1, 6, msem_prty1_bb_a0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+       0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_a0_regs[2] = {
+       &msem_prty0_bb_a0, &msem_prty1_bb_a0,
+};
+
+static const u16 msem_prty0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_bb_b0 = {
+       0, 3, msem_prty0_bb_b0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+       0x18000cc
+};
+
+static const u16 msem_prty1_bb_b0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_bb_b0 = {
+       1, 6, msem_prty1_bb_b0_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+       0x1800204
+};
+
+static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = {
+       &msem_prty0_bb_b0, &msem_prty1_bb_b0,
+};
+
+static const u16 msem_prty0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg msem_prty0_k2 = {
+       0, 3, msem_prty0_k2_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0,
+       0x18000cc
+};
+
+static const u16 msem_prty1_k2_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg msem_prty1_k2 = {
+       1, 6, msem_prty1_k2_attn_idx, 0x1800200, 0x180020c, 0x1800208,
+       0x1800204
+};
+
+static const u16 msem_fast_memory_prty1_k2_attn_idx[7] = {
+       9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg msem_fast_memory_prty1_k2 = {
+       2, 7, msem_fast_memory_prty1_k2_attn_idx, 0x1840200, 0x184020c,
+       0x1840208,
+       0x1840204
+};
+
+static struct attn_hw_reg *msem_prty_k2_regs[3] = {
+       &msem_prty0_k2, &msem_prty1_k2, &msem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_int_attn_desc[46] = {
+       "usem_address_error",
+       "usem_fic_last_error",
+       "usem_fic_length_error",
+       "usem_fic_fifo_error",
+       "usem_pas_buf_fifo_error",
+       "usem_sync_fin_pop_error",
+       "usem_sync_dra_wr_push_error",
+       "usem_sync_dra_wr_pop_error",
+       "usem_sync_dra_rd_push_error",
+       "usem_sync_dra_rd_pop_error",
+       "usem_sync_fin_push_error",
+       "usem_sem_fast_address_error",
+       "usem_cam_lsb_inp_fifo",
+       "usem_cam_msb_inp_fifo",
+       "usem_cam_out_fifo",
+       "usem_fin_fifo",
+       "usem_thread_fifo_error",
+       "usem_thread_overrun",
+       "usem_sync_ext_store_push_error",
+       "usem_sync_ext_store_pop_error",
+       "usem_sync_ext_load_push_error",
+       "usem_sync_ext_load_pop_error",
+       "usem_sync_ram_rd_push_error",
+       "usem_sync_ram_rd_pop_error",
+       "usem_sync_ram_wr_pop_error",
+       "usem_sync_ram_wr_push_error",
+       "usem_sync_dbg_push_error",
+       "usem_sync_dbg_pop_error",
+       "usem_dbg_fifo_error",
+       "usem_cam_msb2_inp_fifo",
+       "usem_vfc_interrupt",
+       "usem_vfc_out_fifo_error",
+       "usem_storm_stack_uf_attn",
+       "usem_storm_stack_of_attn",
+       "usem_storm_runtime_error",
+       "usem_ext_load_pend_wr_error",
+       "usem_thread_rls_orun_error",
+       "usem_thread_rls_aloc_error",
+       "usem_thread_rls_vld_error",
+       "usem_ext_thread_oor_error",
+       "usem_ord_id_fifo_error",
+       "usem_invld_foc_error",
+       "usem_ext_ld_len_error",
+       "usem_thrd_ord_fifo_error",
+       "usem_invld_thrd_ord_error",
+       "usem_fast_memory_address_error",
+};
+#else
+#define usem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_a0 = {
+       0, 32, usem_int0_bb_a0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+       0x1900044
+};
+
+static const u16 usem_int1_bb_a0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_a0 = {
+       1, 13, usem_int1_bb_a0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+       0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_a0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_a0 = {
+       2, 1, usem_fast_memory_int0_bb_a0_attn_idx, 0x1940040, 0x194004c,
+       0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_a0_regs[3] = {
+       &usem_int0_bb_a0, &usem_int1_bb_a0, &usem_fast_memory_int0_bb_a0,
+};
+
+static const u16 usem_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_bb_b0 = {
+       0, 32, usem_int0_bb_b0_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+       0x1900044
+};
+
+static const u16 usem_int1_bb_b0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_bb_b0 = {
+       1, 13, usem_int1_bb_b0_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+       0x1900054
+};
+
+static const u16 usem_fast_memory_int0_bb_b0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = {
+       2, 1, usem_fast_memory_int0_bb_b0_attn_idx, 0x1940040, 0x194004c,
+       0x1940048, 0x1940044
+};
+
+static struct attn_hw_reg *usem_int_bb_b0_regs[3] = {
+       &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0,
+};
+
+static const u16 usem_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg usem_int0_k2 = {
+       0, 32, usem_int0_k2_attn_idx, 0x1900040, 0x190004c, 0x1900048,
+       0x1900044
+};
+
+static const u16 usem_int1_k2_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg usem_int1_k2 = {
+       1, 13, usem_int1_k2_attn_idx, 0x1900050, 0x190005c, 0x1900058,
+       0x1900054
+};
+
+static const u16 usem_fast_memory_int0_k2_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg usem_fast_memory_int0_k2 = {
+       2, 1, usem_fast_memory_int0_k2_attn_idx, 0x1940040, 0x194004c,
+       0x1940048,
+       0x1940044
+};
+
+static struct attn_hw_reg *usem_int_k2_regs[3] = {
+       &usem_int0_k2, &usem_int1_k2, &usem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *usem_prty_attn_desc[23] = {
+       "usem_vfc_rbc_parity_error",
+       "usem_storm_rf_parity_error",
+       "usem_reg_gen_parity_error",
+       "usem_mem005_i_ecc_0_rf_int",
+       "usem_mem005_i_ecc_1_rf_int",
+       "usem_mem004_i_mem_prty",
+       "usem_mem002_i_mem_prty",
+       "usem_mem003_i_mem_prty",
+       "usem_mem001_i_mem_prty",
+       "usem_fast_memory_mem024_i_mem_prty",
+       "usem_fast_memory_mem023_i_mem_prty",
+       "usem_fast_memory_mem022_i_mem_prty",
+       "usem_fast_memory_mem021_i_mem_prty",
+       "usem_fast_memory_mem020_i_mem_prty",
+       "usem_fast_memory_mem019_i_mem_prty",
+       "usem_fast_memory_mem018_i_mem_prty",
+       "usem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+       "usem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+       "usem_fast_memory_vfc_config_mem006_i_mem_prty",
+       "usem_fast_memory_vfc_config_mem001_i_mem_prty",
+       "usem_fast_memory_vfc_config_mem004_i_mem_prty",
+       "usem_fast_memory_vfc_config_mem003_i_mem_prty",
+       "usem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define usem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 usem_prty0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_a0 = {
+       0, 3, usem_prty0_bb_a0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+       0x19000cc
+};
+
+static const u16 usem_prty1_bb_a0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_a0 = {
+       1, 6, usem_prty1_bb_a0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+       0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_a0_regs[2] = {
+       &usem_prty0_bb_a0, &usem_prty1_bb_a0,
+};
+
+static const u16 usem_prty0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_bb_b0 = {
+       0, 3, usem_prty0_bb_b0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+       0x19000cc
+};
+
+static const u16 usem_prty1_bb_b0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_bb_b0 = {
+       1, 6, usem_prty1_bb_b0_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+       0x1900204
+};
+
+static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = {
+       &usem_prty0_bb_b0, &usem_prty1_bb_b0,
+};
+
+static const u16 usem_prty0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg usem_prty0_k2 = {
+       0, 3, usem_prty0_k2_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0,
+       0x19000cc
+};
+
+static const u16 usem_prty1_k2_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg usem_prty1_k2 = {
+       1, 6, usem_prty1_k2_attn_idx, 0x1900200, 0x190020c, 0x1900208,
+       0x1900204
+};
+
+static const u16 usem_fast_memory_prty1_k2_attn_idx[7] = {
+       9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg usem_fast_memory_prty1_k2 = {
+       2, 7, usem_fast_memory_prty1_k2_attn_idx, 0x1940200, 0x194020c,
+       0x1940208,
+       0x1940204
+};
+
+static struct attn_hw_reg *usem_prty_k2_regs[3] = {
+       &usem_prty0_k2, &usem_prty1_k2, &usem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_int_attn_desc[46] = {
+       "xsem_address_error",
+       "xsem_fic_last_error",
+       "xsem_fic_length_error",
+       "xsem_fic_fifo_error",
+       "xsem_pas_buf_fifo_error",
+       "xsem_sync_fin_pop_error",
+       "xsem_sync_dra_wr_push_error",
+       "xsem_sync_dra_wr_pop_error",
+       "xsem_sync_dra_rd_push_error",
+       "xsem_sync_dra_rd_pop_error",
+       "xsem_sync_fin_push_error",
+       "xsem_sem_fast_address_error",
+       "xsem_cam_lsb_inp_fifo",
+       "xsem_cam_msb_inp_fifo",
+       "xsem_cam_out_fifo",
+       "xsem_fin_fifo",
+       "xsem_thread_fifo_error",
+       "xsem_thread_overrun",
+       "xsem_sync_ext_store_push_error",
+       "xsem_sync_ext_store_pop_error",
+       "xsem_sync_ext_load_push_error",
+       "xsem_sync_ext_load_pop_error",
+       "xsem_sync_ram_rd_push_error",
+       "xsem_sync_ram_rd_pop_error",
+       "xsem_sync_ram_wr_pop_error",
+       "xsem_sync_ram_wr_push_error",
+       "xsem_sync_dbg_push_error",
+       "xsem_sync_dbg_pop_error",
+       "xsem_dbg_fifo_error",
+       "xsem_cam_msb2_inp_fifo",
+       "xsem_vfc_interrupt",
+       "xsem_vfc_out_fifo_error",
+       "xsem_storm_stack_uf_attn",
+       "xsem_storm_stack_of_attn",
+       "xsem_storm_runtime_error",
+       "xsem_ext_load_pend_wr_error",
+       "xsem_thread_rls_orun_error",
+       "xsem_thread_rls_aloc_error",
+       "xsem_thread_rls_vld_error",
+       "xsem_ext_thread_oor_error",
+       "xsem_ord_id_fifo_error",
+       "xsem_invld_foc_error",
+       "xsem_ext_ld_len_error",
+       "xsem_thrd_ord_fifo_error",
+       "xsem_invld_thrd_ord_error",
+       "xsem_fast_memory_address_error",
+};
+#else
+#define xsem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_a0 = {
+       0, 32, xsem_int0_bb_a0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+       0x1400044
+};
+
+static const u16 xsem_int1_bb_a0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_a0 = {
+       1, 13, xsem_int1_bb_a0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+       0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_a0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_a0 = {
+       2, 1, xsem_fast_memory_int0_bb_a0_attn_idx, 0x1440040, 0x144004c,
+       0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_a0_regs[3] = {
+       &xsem_int0_bb_a0, &xsem_int1_bb_a0, &xsem_fast_memory_int0_bb_a0,
+};
+
+static const u16 xsem_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_bb_b0 = {
+       0, 32, xsem_int0_bb_b0_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+       0x1400044
+};
+
+static const u16 xsem_int1_bb_b0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_bb_b0 = {
+       1, 13, xsem_int1_bb_b0_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+       0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_bb_b0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = {
+       2, 1, xsem_fast_memory_int0_bb_b0_attn_idx, 0x1440040, 0x144004c,
+       0x1440048, 0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = {
+       &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0,
+};
+
+static const u16 xsem_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg xsem_int0_k2 = {
+       0, 32, xsem_int0_k2_attn_idx, 0x1400040, 0x140004c, 0x1400048,
+       0x1400044
+};
+
+static const u16 xsem_int1_k2_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg xsem_int1_k2 = {
+       1, 13, xsem_int1_k2_attn_idx, 0x1400050, 0x140005c, 0x1400058,
+       0x1400054
+};
+
+static const u16 xsem_fast_memory_int0_k2_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg xsem_fast_memory_int0_k2 = {
+       2, 1, xsem_fast_memory_int0_k2_attn_idx, 0x1440040, 0x144004c,
+       0x1440048,
+       0x1440044
+};
+
+static struct attn_hw_reg *xsem_int_k2_regs[3] = {
+       &xsem_int0_k2, &xsem_int1_k2, &xsem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xsem_prty_attn_desc[24] = {
+       "xsem_vfc_rbc_parity_error",
+       "xsem_storm_rf_parity_error",
+       "xsem_reg_gen_parity_error",
+       "xsem_mem006_i_ecc_0_rf_int",
+       "xsem_mem006_i_ecc_1_rf_int",
+       "xsem_mem005_i_mem_prty",
+       "xsem_mem002_i_mem_prty",
+       "xsem_mem004_i_mem_prty",
+       "xsem_mem003_i_mem_prty",
+       "xsem_mem001_i_mem_prty",
+       "xsem_fast_memory_mem024_i_mem_prty",
+       "xsem_fast_memory_mem023_i_mem_prty",
+       "xsem_fast_memory_mem022_i_mem_prty",
+       "xsem_fast_memory_mem021_i_mem_prty",
+       "xsem_fast_memory_mem020_i_mem_prty",
+       "xsem_fast_memory_mem019_i_mem_prty",
+       "xsem_fast_memory_mem018_i_mem_prty",
+       "xsem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+       "xsem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+       "xsem_fast_memory_vfc_config_mem006_i_mem_prty",
+       "xsem_fast_memory_vfc_config_mem001_i_mem_prty",
+       "xsem_fast_memory_vfc_config_mem004_i_mem_prty",
+       "xsem_fast_memory_vfc_config_mem003_i_mem_prty",
+       "xsem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define xsem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xsem_prty0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_a0 = {
+       0, 3, xsem_prty0_bb_a0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+       0x14000cc
+};
+
+static const u16 xsem_prty1_bb_a0_attn_idx[7] = {
+       3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_a0 = {
+       1, 7, xsem_prty1_bb_a0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+       0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_a0_regs[2] = {
+       &xsem_prty0_bb_a0, &xsem_prty1_bb_a0,
+};
+
+static const u16 xsem_prty0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_bb_b0 = {
+       0, 3, xsem_prty0_bb_b0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+       0x14000cc
+};
+
+static const u16 xsem_prty1_bb_b0_attn_idx[7] = {
+       3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_bb_b0 = {
+       1, 7, xsem_prty1_bb_b0_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+       0x1400204
+};
+
+static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = {
+       &xsem_prty0_bb_b0, &xsem_prty1_bb_b0,
+};
+
+static const u16 xsem_prty0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg xsem_prty0_k2 = {
+       0, 3, xsem_prty0_k2_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0,
+       0x14000cc
+};
+
+static const u16 xsem_prty1_k2_attn_idx[7] = {
+       3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg xsem_prty1_k2 = {
+       1, 7, xsem_prty1_k2_attn_idx, 0x1400200, 0x140020c, 0x1400208,
+       0x1400204
+};
+
+static const u16 xsem_fast_memory_prty1_k2_attn_idx[7] = {
+       10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg xsem_fast_memory_prty1_k2 = {
+       2, 7, xsem_fast_memory_prty1_k2_attn_idx, 0x1440200, 0x144020c,
+       0x1440208,
+       0x1440204
+};
+
+static struct attn_hw_reg *xsem_prty_k2_regs[3] = {
+       &xsem_prty0_k2, &xsem_prty1_k2, &xsem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_int_attn_desc[46] = {
+       "ysem_address_error",
+       "ysem_fic_last_error",
+       "ysem_fic_length_error",
+       "ysem_fic_fifo_error",
+       "ysem_pas_buf_fifo_error",
+       "ysem_sync_fin_pop_error",
+       "ysem_sync_dra_wr_push_error",
+       "ysem_sync_dra_wr_pop_error",
+       "ysem_sync_dra_rd_push_error",
+       "ysem_sync_dra_rd_pop_error",
+       "ysem_sync_fin_push_error",
+       "ysem_sem_fast_address_error",
+       "ysem_cam_lsb_inp_fifo",
+       "ysem_cam_msb_inp_fifo",
+       "ysem_cam_out_fifo",
+       "ysem_fin_fifo",
+       "ysem_thread_fifo_error",
+       "ysem_thread_overrun",
+       "ysem_sync_ext_store_push_error",
+       "ysem_sync_ext_store_pop_error",
+       "ysem_sync_ext_load_push_error",
+       "ysem_sync_ext_load_pop_error",
+       "ysem_sync_ram_rd_push_error",
+       "ysem_sync_ram_rd_pop_error",
+       "ysem_sync_ram_wr_pop_error",
+       "ysem_sync_ram_wr_push_error",
+       "ysem_sync_dbg_push_error",
+       "ysem_sync_dbg_pop_error",
+       "ysem_dbg_fifo_error",
+       "ysem_cam_msb2_inp_fifo",
+       "ysem_vfc_interrupt",
+       "ysem_vfc_out_fifo_error",
+       "ysem_storm_stack_uf_attn",
+       "ysem_storm_stack_of_attn",
+       "ysem_storm_runtime_error",
+       "ysem_ext_load_pend_wr_error",
+       "ysem_thread_rls_orun_error",
+       "ysem_thread_rls_aloc_error",
+       "ysem_thread_rls_vld_error",
+       "ysem_ext_thread_oor_error",
+       "ysem_ord_id_fifo_error",
+       "ysem_invld_foc_error",
+       "ysem_ext_ld_len_error",
+       "ysem_thrd_ord_fifo_error",
+       "ysem_invld_thrd_ord_error",
+       "ysem_fast_memory_address_error",
+};
+#else
+#define ysem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_a0 = {
+       0, 32, ysem_int0_bb_a0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+       0x1500044
+};
+
+static const u16 ysem_int1_bb_a0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_a0 = {
+       1, 13, ysem_int1_bb_a0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+       0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_a0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_a0 = {
+       2, 1, ysem_fast_memory_int0_bb_a0_attn_idx, 0x1540040, 0x154004c,
+       0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_a0_regs[3] = {
+       &ysem_int0_bb_a0, &ysem_int1_bb_a0, &ysem_fast_memory_int0_bb_a0,
+};
+
+static const u16 ysem_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_bb_b0 = {
+       0, 32, ysem_int0_bb_b0_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+       0x1500044
+};
+
+static const u16 ysem_int1_bb_b0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_bb_b0 = {
+       1, 13, ysem_int1_bb_b0_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+       0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_bb_b0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = {
+       2, 1, ysem_fast_memory_int0_bb_b0_attn_idx, 0x1540040, 0x154004c,
+       0x1540048, 0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = {
+       &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0,
+};
+
+static const u16 ysem_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg ysem_int0_k2 = {
+       0, 32, ysem_int0_k2_attn_idx, 0x1500040, 0x150004c, 0x1500048,
+       0x1500044
+};
+
+static const u16 ysem_int1_k2_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg ysem_int1_k2 = {
+       1, 13, ysem_int1_k2_attn_idx, 0x1500050, 0x150005c, 0x1500058,
+       0x1500054
+};
+
+static const u16 ysem_fast_memory_int0_k2_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg ysem_fast_memory_int0_k2 = {
+       2, 1, ysem_fast_memory_int0_k2_attn_idx, 0x1540040, 0x154004c,
+       0x1540048,
+       0x1540044
+};
+
+static struct attn_hw_reg *ysem_int_k2_regs[3] = {
+       &ysem_int0_k2, &ysem_int1_k2, &ysem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ysem_prty_attn_desc[24] = {
+       "ysem_vfc_rbc_parity_error",
+       "ysem_storm_rf_parity_error",
+       "ysem_reg_gen_parity_error",
+       "ysem_mem006_i_ecc_0_rf_int",
+       "ysem_mem006_i_ecc_1_rf_int",
+       "ysem_mem005_i_mem_prty",
+       "ysem_mem002_i_mem_prty",
+       "ysem_mem004_i_mem_prty",
+       "ysem_mem003_i_mem_prty",
+       "ysem_mem001_i_mem_prty",
+       "ysem_fast_memory_mem024_i_mem_prty",
+       "ysem_fast_memory_mem023_i_mem_prty",
+       "ysem_fast_memory_mem022_i_mem_prty",
+       "ysem_fast_memory_mem021_i_mem_prty",
+       "ysem_fast_memory_mem020_i_mem_prty",
+       "ysem_fast_memory_mem019_i_mem_prty",
+       "ysem_fast_memory_mem018_i_mem_prty",
+       "ysem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+       "ysem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+       "ysem_fast_memory_vfc_config_mem006_i_mem_prty",
+       "ysem_fast_memory_vfc_config_mem001_i_mem_prty",
+       "ysem_fast_memory_vfc_config_mem004_i_mem_prty",
+       "ysem_fast_memory_vfc_config_mem003_i_mem_prty",
+       "ysem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define ysem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ysem_prty0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_a0 = {
+       0, 3, ysem_prty0_bb_a0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+       0x15000cc
+};
+
+static const u16 ysem_prty1_bb_a0_attn_idx[7] = {
+       3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_a0 = {
+       1, 7, ysem_prty1_bb_a0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+       0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_a0_regs[2] = {
+       &ysem_prty0_bb_a0, &ysem_prty1_bb_a0,
+};
+
+static const u16 ysem_prty0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_bb_b0 = {
+       0, 3, ysem_prty0_bb_b0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+       0x15000cc
+};
+
+static const u16 ysem_prty1_bb_b0_attn_idx[7] = {
+       3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_bb_b0 = {
+       1, 7, ysem_prty1_bb_b0_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+       0x1500204
+};
+
+static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = {
+       &ysem_prty0_bb_b0, &ysem_prty1_bb_b0,
+};
+
+static const u16 ysem_prty0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg ysem_prty0_k2 = {
+       0, 3, ysem_prty0_k2_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0,
+       0x15000cc
+};
+
+static const u16 ysem_prty1_k2_attn_idx[7] = {
+       3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ysem_prty1_k2 = {
+       1, 7, ysem_prty1_k2_attn_idx, 0x1500200, 0x150020c, 0x1500208,
+       0x1500204
+};
+
+static const u16 ysem_fast_memory_prty1_k2_attn_idx[7] = {
+       10, 11, 12, 13, 14, 15, 16,
+};
+
+static struct attn_hw_reg ysem_fast_memory_prty1_k2 = {
+       2, 7, ysem_fast_memory_prty1_k2_attn_idx, 0x1540200, 0x154020c,
+       0x1540208,
+       0x1540204
+};
+
+static struct attn_hw_reg *ysem_prty_k2_regs[3] = {
+       &ysem_prty0_k2, &ysem_prty1_k2, &ysem_fast_memory_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_int_attn_desc[46] = {
+       "psem_address_error",
+       "psem_fic_last_error",
+       "psem_fic_length_error",
+       "psem_fic_fifo_error",
+       "psem_pas_buf_fifo_error",
+       "psem_sync_fin_pop_error",
+       "psem_sync_dra_wr_push_error",
+       "psem_sync_dra_wr_pop_error",
+       "psem_sync_dra_rd_push_error",
+       "psem_sync_dra_rd_pop_error",
+       "psem_sync_fin_push_error",
+       "psem_sem_fast_address_error",
+       "psem_cam_lsb_inp_fifo",
+       "psem_cam_msb_inp_fifo",
+       "psem_cam_out_fifo",
+       "psem_fin_fifo",
+       "psem_thread_fifo_error",
+       "psem_thread_overrun",
+       "psem_sync_ext_store_push_error",
+       "psem_sync_ext_store_pop_error",
+       "psem_sync_ext_load_push_error",
+       "psem_sync_ext_load_pop_error",
+       "psem_sync_ram_rd_push_error",
+       "psem_sync_ram_rd_pop_error",
+       "psem_sync_ram_wr_pop_error",
+       "psem_sync_ram_wr_push_error",
+       "psem_sync_dbg_push_error",
+       "psem_sync_dbg_pop_error",
+       "psem_dbg_fifo_error",
+       "psem_cam_msb2_inp_fifo",
+       "psem_vfc_interrupt",
+       "psem_vfc_out_fifo_error",
+       "psem_storm_stack_uf_attn",
+       "psem_storm_stack_of_attn",
+       "psem_storm_runtime_error",
+       "psem_ext_load_pend_wr_error",
+       "psem_thread_rls_orun_error",
+       "psem_thread_rls_aloc_error",
+       "psem_thread_rls_vld_error",
+       "psem_ext_thread_oor_error",
+       "psem_ord_id_fifo_error",
+       "psem_invld_foc_error",
+       "psem_ext_ld_len_error",
+       "psem_thrd_ord_fifo_error",
+       "psem_invld_thrd_ord_error",
+       "psem_fast_memory_address_error",
+};
+#else
+#define psem_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_int0_bb_a0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_a0 = {
+       0, 32, psem_int0_bb_a0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+       0x1600044
+};
+
+static const u16 psem_int1_bb_a0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_a0 = {
+       1, 13, psem_int1_bb_a0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+       0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_a0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_a0 = {
+       2, 1, psem_fast_memory_int0_bb_a0_attn_idx, 0x1640040, 0x164004c,
+       0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_a0_regs[3] = {
+       &psem_int0_bb_a0, &psem_int1_bb_a0, &psem_fast_memory_int0_bb_a0,
+};
+
+static const u16 psem_int0_bb_b0_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_bb_b0 = {
+       0, 32, psem_int0_bb_b0_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+       0x1600044
+};
+
+static const u16 psem_int1_bb_b0_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_bb_b0 = {
+       1, 13, psem_int1_bb_b0_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+       0x1600054
+};
+
+static const u16 psem_fast_memory_int0_bb_b0_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = {
+       2, 1, psem_fast_memory_int0_bb_b0_attn_idx, 0x1640040, 0x164004c,
+       0x1640048, 0x1640044
+};
+
+static struct attn_hw_reg *psem_int_bb_b0_regs[3] = {
+       &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0,
+};
+
+static const u16 psem_int0_k2_attn_idx[32] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg psem_int0_k2 = {
+       0, 32, psem_int0_k2_attn_idx, 0x1600040, 0x160004c, 0x1600048,
+       0x1600044
+};
+
+static const u16 psem_int1_k2_attn_idx[13] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+};
+
+static struct attn_hw_reg psem_int1_k2 = {
+       1, 13, psem_int1_k2_attn_idx, 0x1600050, 0x160005c, 0x1600058,
+       0x1600054
+};
+
+static const u16 psem_fast_memory_int0_k2_attn_idx[1] = {
+       45,
+};
+
+static struct attn_hw_reg psem_fast_memory_int0_k2 = {
+       2, 1, psem_fast_memory_int0_k2_attn_idx, 0x1640040, 0x164004c,
+       0x1640048,
+       0x1640044
+};
+
+static struct attn_hw_reg *psem_int_k2_regs[3] = {
+       &psem_int0_k2, &psem_int1_k2, &psem_fast_memory_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *psem_prty_attn_desc[23] = {
+       "psem_vfc_rbc_parity_error",
+       "psem_storm_rf_parity_error",
+       "psem_reg_gen_parity_error",
+       "psem_mem005_i_ecc_0_rf_int",
+       "psem_mem005_i_ecc_1_rf_int",
+       "psem_mem004_i_mem_prty",
+       "psem_mem002_i_mem_prty",
+       "psem_mem003_i_mem_prty",
+       "psem_mem001_i_mem_prty",
+       "psem_fast_memory_mem024_i_mem_prty",
+       "psem_fast_memory_mem023_i_mem_prty",
+       "psem_fast_memory_mem022_i_mem_prty",
+       "psem_fast_memory_mem021_i_mem_prty",
+       "psem_fast_memory_mem020_i_mem_prty",
+       "psem_fast_memory_mem019_i_mem_prty",
+       "psem_fast_memory_mem018_i_mem_prty",
+       "psem_fast_memory_vfc_config_mem005_i_ecc_rf_int",
+       "psem_fast_memory_vfc_config_mem002_i_ecc_rf_int",
+       "psem_fast_memory_vfc_config_mem006_i_mem_prty",
+       "psem_fast_memory_vfc_config_mem001_i_mem_prty",
+       "psem_fast_memory_vfc_config_mem004_i_mem_prty",
+       "psem_fast_memory_vfc_config_mem003_i_mem_prty",
+       "psem_fast_memory_vfc_config_mem007_i_mem_prty",
+};
+#else
+#define psem_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 psem_prty0_bb_a0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_a0 = {
+       0, 3, psem_prty0_bb_a0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+       0x16000cc
+};
+
+static const u16 psem_prty1_bb_a0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_a0 = {
+       1, 6, psem_prty1_bb_a0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+       0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = {
+       16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_a0 = {
+       2, 6, psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x164a200,
+       0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_a0_regs[3] = {
+       &psem_prty0_bb_a0, &psem_prty1_bb_a0,
+       &psem_fast_memory_vfc_config_prty1_bb_a0,
+};
+
+static const u16 psem_prty0_bb_b0_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_bb_b0 = {
+       0, 3, psem_prty0_bb_b0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+       0x16000cc
+};
+
+static const u16 psem_prty1_bb_b0_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_bb_b0 = {
+       1, 6, psem_prty1_bb_b0_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+       0x1600204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = {
+       16, 17, 19, 20, 21, 22,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = {
+       2, 6, psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x164a200,
+       0x164a20c, 0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = {
+       &psem_prty0_bb_b0, &psem_prty1_bb_b0,
+       &psem_fast_memory_vfc_config_prty1_bb_b0,
+};
+
+static const u16 psem_prty0_k2_attn_idx[3] = {
+       0, 1, 2,
+};
+
+static struct attn_hw_reg psem_prty0_k2 = {
+       0, 3, psem_prty0_k2_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0,
+       0x16000cc
+};
+
+static const u16 psem_prty1_k2_attn_idx[6] = {
+       3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg psem_prty1_k2 = {
+       1, 6, psem_prty1_k2_attn_idx, 0x1600200, 0x160020c, 0x1600208,
+       0x1600204
+};
+
+static const u16 psem_fast_memory_prty1_k2_attn_idx[7] = {
+       9, 10, 11, 12, 13, 14, 15,
+};
+
+static struct attn_hw_reg psem_fast_memory_prty1_k2 = {
+       2, 7, psem_fast_memory_prty1_k2_attn_idx, 0x1640200, 0x164020c,
+       0x1640208,
+       0x1640204
+};
+
+static const u16 psem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = {
+       16, 17, 18, 19, 20, 21,
+};
+
+static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_k2 = {
+       3, 6, psem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x164a200,
+       0x164a20c,
+       0x164a208, 0x164a204
+};
+
+static struct attn_hw_reg *psem_prty_k2_regs[4] = {
+       &psem_prty0_k2, &psem_prty1_k2, &psem_fast_memory_prty1_k2,
+       &psem_fast_memory_vfc_config_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_int_attn_desc[12] = {
+       "rss_address_error",
+       "rss_msg_inp_cnt_error",
+       "rss_msg_out_cnt_error",
+       "rss_inp_state_error",
+       "rss_out_state_error",
+       "rss_main_state_error",
+       "rss_calc_state_error",
+       "rss_inp_fifo_error",
+       "rss_cmd_fifo_error",
+       "rss_msg_fifo_error",
+       "rss_rsp_fifo_error",
+       "rss_hdr_fifo_error",
+};
+#else
+#define rss_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_int0_bb_a0_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_a0 = {
+       0, 12, rss_int0_bb_a0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_a0_regs[1] = {
+       &rss_int0_bb_a0,
+};
+
+static const u16 rss_int0_bb_b0_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_bb_b0 = {
+       0, 12, rss_int0_bb_b0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_bb_b0_regs[1] = {
+       &rss_int0_bb_b0,
+};
+
+static const u16 rss_int0_k2_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg rss_int0_k2 = {
+       0, 12, rss_int0_k2_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984
+};
+
+static struct attn_hw_reg *rss_int_k2_regs[1] = {
+       &rss_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rss_prty_attn_desc[4] = {
+       "rss_mem002_i_ecc_rf_int",
+       "rss_mem001_i_ecc_rf_int",
+       "rss_mem003_i_mem_prty",
+       "rss_mem004_i_mem_prty",
+};
+#else
+#define rss_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rss_prty1_bb_a0_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_a0 = {
+       0, 4, rss_prty1_bb_a0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_a0_regs[1] = {
+       &rss_prty1_bb_a0,
+};
+
+static const u16 rss_prty1_bb_b0_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_bb_b0 = {
+       0, 4, rss_prty1_bb_b0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = {
+       &rss_prty1_bb_b0,
+};
+
+static const u16 rss_prty1_k2_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg rss_prty1_k2 = {
+       0, 4, rss_prty1_k2_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04
+};
+
+static struct attn_hw_reg *rss_prty_k2_regs[1] = {
+       &rss_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_int_attn_desc[6] = {
+       "tmld_address_error",
+       "tmld_ld_hdr_err",
+       "tmld_ld_seg_msg_err",
+       "tmld_ld_tid_mini_cache_err",
+       "tmld_ld_cid_mini_cache_err",
+       "tmld_ld_long_message",
+};
+#else
+#define tmld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_int0_bb_a0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_a0 = {
+       0, 6, tmld_int0_bb_a0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_a0_regs[1] = {
+       &tmld_int0_bb_a0,
+};
+
+static const u16 tmld_int0_bb_b0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_bb_b0 = {
+       0, 6, tmld_int0_bb_b0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = {
+       &tmld_int0_bb_b0,
+};
+
+static const u16 tmld_int0_k2_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg tmld_int0_k2 = {
+       0, 6, tmld_int0_k2_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184
+};
+
+static struct attn_hw_reg *tmld_int_k2_regs[1] = {
+       &tmld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tmld_prty_attn_desc[8] = {
+       "tmld_mem006_i_ecc_rf_int",
+       "tmld_mem002_i_ecc_rf_int",
+       "tmld_mem003_i_mem_prty",
+       "tmld_mem004_i_mem_prty",
+       "tmld_mem007_i_mem_prty",
+       "tmld_mem008_i_mem_prty",
+       "tmld_mem005_i_mem_prty",
+       "tmld_mem001_i_mem_prty",
+};
+#else
+#define tmld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tmld_prty1_bb_a0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_a0 = {
+       0, 8, tmld_prty1_bb_a0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_a0_regs[1] = {
+       &tmld_prty1_bb_a0,
+};
+
+static const u16 tmld_prty1_bb_b0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_bb_b0 = {
+       0, 8, tmld_prty1_bb_b0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = {
+       &tmld_prty1_bb_b0,
+};
+
+static const u16 tmld_prty1_k2_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tmld_prty1_k2 = {
+       0, 8, tmld_prty1_k2_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204
+};
+
+static struct attn_hw_reg *tmld_prty_k2_regs[1] = {
+       &tmld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_int_attn_desc[6] = {
+       "muld_address_error",
+       "muld_ld_hdr_err",
+       "muld_ld_seg_msg_err",
+       "muld_ld_tid_mini_cache_err",
+       "muld_ld_cid_mini_cache_err",
+       "muld_ld_long_message",
+};
+#else
+#define muld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_int0_bb_a0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_a0 = {
+       0, 6, muld_int0_bb_a0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_a0_regs[1] = {
+       &muld_int0_bb_a0,
+};
+
+static const u16 muld_int0_bb_b0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_bb_b0 = {
+       0, 6, muld_int0_bb_b0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_bb_b0_regs[1] = {
+       &muld_int0_bb_b0,
+};
+
+static const u16 muld_int0_k2_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg muld_int0_k2 = {
+       0, 6, muld_int0_k2_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184
+};
+
+static struct attn_hw_reg *muld_int_k2_regs[1] = {
+       &muld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *muld_prty_attn_desc[10] = {
+       "muld_mem005_i_ecc_rf_int",
+       "muld_mem001_i_ecc_rf_int",
+       "muld_mem008_i_ecc_rf_int",
+       "muld_mem007_i_ecc_rf_int",
+       "muld_mem002_i_mem_prty",
+       "muld_mem003_i_mem_prty",
+       "muld_mem009_i_mem_prty",
+       "muld_mem010_i_mem_prty",
+       "muld_mem004_i_mem_prty",
+       "muld_mem006_i_mem_prty",
+};
+#else
+#define muld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 muld_prty1_bb_a0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_a0 = {
+       0, 10, muld_prty1_bb_a0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+       0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_a0_regs[1] = {
+       &muld_prty1_bb_a0,
+};
+
+static const u16 muld_prty1_bb_b0_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_bb_b0 = {
+       0, 10, muld_prty1_bb_b0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208,
+       0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = {
+       &muld_prty1_bb_b0,
+};
+
+static const u16 muld_prty1_k2_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg muld_prty1_k2 = {
+       0, 10, muld_prty1_k2_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204
+};
+
+static struct attn_hw_reg *muld_prty_k2_regs[1] = {
+       &muld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_int_attn_desc[6] = {
+       "yuld_address_error",
+       "yuld_ld_hdr_err",
+       "yuld_ld_seg_msg_err",
+       "yuld_ld_tid_mini_cache_err",
+       "yuld_ld_cid_mini_cache_err",
+       "yuld_ld_long_message",
+};
+#else
+#define yuld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_int0_bb_a0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_a0 = {
+       0, 6, yuld_int0_bb_a0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_a0_regs[1] = {
+       &yuld_int0_bb_a0,
+};
+
+static const u16 yuld_int0_bb_b0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_bb_b0 = {
+       0, 6, yuld_int0_bb_b0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = {
+       &yuld_int0_bb_b0,
+};
+
+static const u16 yuld_int0_k2_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_int0_k2 = {
+       0, 6, yuld_int0_k2_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184
+};
+
+static struct attn_hw_reg *yuld_int_k2_regs[1] = {
+       &yuld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *yuld_prty_attn_desc[6] = {
+       "yuld_mem001_i_mem_prty",
+       "yuld_mem002_i_mem_prty",
+       "yuld_mem005_i_mem_prty",
+       "yuld_mem006_i_mem_prty",
+       "yuld_mem004_i_mem_prty",
+       "yuld_mem003_i_mem_prty",
+};
+#else
+#define yuld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 yuld_prty1_bb_a0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_a0 = {
+       0, 6, yuld_prty1_bb_a0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_a0_regs[1] = {
+       &yuld_prty1_bb_a0,
+};
+
+static const u16 yuld_prty1_bb_b0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_bb_b0 = {
+       0, 6, yuld_prty1_bb_b0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = {
+       &yuld_prty1_bb_b0,
+};
+
+static const u16 yuld_prty1_k2_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg yuld_prty1_k2 = {
+       0, 6, yuld_prty1_k2_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204
+};
+
+static struct attn_hw_reg *yuld_prty_k2_regs[1] = {
+       &yuld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_int_attn_desc[6] = {
+       "xyld_address_error",
+       "xyld_ld_hdr_err",
+       "xyld_ld_seg_msg_err",
+       "xyld_ld_tid_mini_cache_err",
+       "xyld_ld_cid_mini_cache_err",
+       "xyld_ld_long_message",
+};
+#else
+#define xyld_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_int0_bb_a0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_a0 = {
+       0, 6, xyld_int0_bb_a0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_a0_regs[1] = {
+       &xyld_int0_bb_a0,
+};
+
+static const u16 xyld_int0_bb_b0_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_bb_b0 = {
+       0, 6, xyld_int0_bb_b0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = {
+       &xyld_int0_bb_b0,
+};
+
+static const u16 xyld_int0_k2_attn_idx[6] = {
+       0, 1, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg xyld_int0_k2 = {
+       0, 6, xyld_int0_k2_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184
+};
+
+static struct attn_hw_reg *xyld_int_k2_regs[1] = {
+       &xyld_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *xyld_prty_attn_desc[9] = {
+       "xyld_mem004_i_ecc_rf_int",
+       "xyld_mem006_i_ecc_rf_int",
+       "xyld_mem001_i_mem_prty",
+       "xyld_mem002_i_mem_prty",
+       "xyld_mem008_i_mem_prty",
+       "xyld_mem009_i_mem_prty",
+       "xyld_mem003_i_mem_prty",
+       "xyld_mem005_i_mem_prty",
+       "xyld_mem007_i_mem_prty",
+};
+#else
+#define xyld_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 xyld_prty1_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_a0 = {
+       0, 9, xyld_prty1_bb_a0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_a0_regs[1] = {
+       &xyld_prty1_bb_a0,
+};
+
+static const u16 xyld_prty1_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_bb_b0 = {
+       0, 9, xyld_prty1_bb_b0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = {
+       &xyld_prty1_bb_b0,
+};
+
+static const u16 xyld_prty1_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg xyld_prty1_k2 = {
+       0, 9, xyld_prty1_k2_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204
+};
+
+static struct attn_hw_reg *xyld_prty_k2_regs[1] = {
+       &xyld_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_int_attn_desc[11] = {
+       "prm_address_error",
+       "prm_ififo_error",
+       "prm_immed_fifo_error",
+       "prm_ofst_pend_error",
+       "prm_pad_pend_error",
+       "prm_pbinp_pend_error",
+       "prm_tag_pend_error",
+       "prm_mstorm_eop_err",
+       "prm_ustorm_eop_err",
+       "prm_mstorm_que_err",
+       "prm_ustorm_que_err",
+};
+#else
+#define prm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_int0_bb_a0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_a0 = {
+       0, 11, prm_int0_bb_a0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_a0_regs[1] = {
+       &prm_int0_bb_a0,
+};
+
+static const u16 prm_int0_bb_b0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_bb_b0 = {
+       0, 11, prm_int0_bb_b0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_bb_b0_regs[1] = {
+       &prm_int0_bb_b0,
+};
+
+static const u16 prm_int0_k2_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg prm_int0_k2 = {
+       0, 11, prm_int0_k2_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044
+};
+
+static struct attn_hw_reg *prm_int_k2_regs[1] = {
+       &prm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *prm_prty_attn_desc[30] = {
+       "prm_datapath_registers",
+       "prm_mem012_i_ecc_rf_int",
+       "prm_mem013_i_ecc_rf_int",
+       "prm_mem014_i_ecc_rf_int",
+       "prm_mem020_i_ecc_rf_int",
+       "prm_mem004_i_mem_prty",
+       "prm_mem024_i_mem_prty",
+       "prm_mem016_i_mem_prty",
+       "prm_mem017_i_mem_prty",
+       "prm_mem008_i_mem_prty",
+       "prm_mem009_i_mem_prty",
+       "prm_mem010_i_mem_prty",
+       "prm_mem015_i_mem_prty",
+       "prm_mem011_i_mem_prty",
+       "prm_mem003_i_mem_prty",
+       "prm_mem002_i_mem_prty",
+       "prm_mem005_i_mem_prty",
+       "prm_mem023_i_mem_prty",
+       "prm_mem006_i_mem_prty",
+       "prm_mem007_i_mem_prty",
+       "prm_mem001_i_mem_prty",
+       "prm_mem022_i_mem_prty",
+       "prm_mem021_i_mem_prty",
+       "prm_mem019_i_mem_prty",
+       "prm_mem015_i_ecc_rf_int",
+       "prm_mem021_i_ecc_rf_int",
+       "prm_mem025_i_mem_prty",
+       "prm_mem018_i_mem_prty",
+       "prm_mem012_i_mem_prty",
+       "prm_mem020_i_mem_prty",
+};
+#else
+#define prm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 prm_prty1_bb_a0_attn_idx[25] = {
+       2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24,
+       25, 26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_a0 = {
+       0, 25, prm_prty1_bb_a0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_a0_regs[1] = {
+       &prm_prty1_bb_a0,
+};
+
+static const u16 prm_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg prm_prty0_bb_b0 = {
+       0, 1, prm_prty0_bb_b0_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_bb_b0_attn_idx[24] = {
+       2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24, 25,
+       26, 27, 28, 29,
+};
+
+static struct attn_hw_reg prm_prty1_bb_b0 = {
+       1, 24, prm_prty1_bb_b0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = {
+       &prm_prty0_bb_b0, &prm_prty1_bb_b0,
+};
+
+static const u16 prm_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg prm_prty0_k2 = {
+       0, 1, prm_prty0_k2_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054
+};
+
+static const u16 prm_prty1_k2_attn_idx[23] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23,
+};
+
+static struct attn_hw_reg prm_prty1_k2 = {
+       1, 23, prm_prty1_k2_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204
+};
+
+static struct attn_hw_reg *prm_prty_k2_regs[2] = {
+       &prm_prty0_k2, &prm_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_int_attn_desc[9] = {
+       "pbf_pb1_address_error",
+       "pbf_pb1_eop_error",
+       "pbf_pb1_ififo_error",
+       "pbf_pb1_pfifo_error",
+       "pbf_pb1_db_buf_error",
+       "pbf_pb1_th_exec_error",
+       "pbf_pb1_tq_error_wr",
+       "pbf_pb1_tq_error_rd_th",
+       "pbf_pb1_tq_error_rd_ih",
+};
+#else
+#define pbf_pb1_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_int0_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_a0 = {
+       0, 9, pbf_pb1_int0_bb_a0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+       0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_a0_regs[1] = {
+       &pbf_pb1_int0_bb_a0,
+};
+
+static const u16 pbf_pb1_int0_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_bb_b0 = {
+       0, 9, pbf_pb1_int0_bb_b0_attn_idx, 0xda0040, 0xda004c, 0xda0048,
+       0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = {
+       &pbf_pb1_int0_bb_b0,
+};
+
+static const u16 pbf_pb1_int0_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb1_int0_k2 = {
+       0, 9, pbf_pb1_int0_k2_attn_idx, 0xda0040, 0xda004c, 0xda0048, 0xda0044
+};
+
+static struct attn_hw_reg *pbf_pb1_int_k2_regs[1] = {
+       &pbf_pb1_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb1_prty_attn_desc[1] = {
+       "pbf_pb1_datapath_registers",
+};
+#else
+#define pbf_pb1_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb1_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = {
+       0, 1, pbf_pb1_prty0_bb_b0_attn_idx, 0xda0050, 0xda005c, 0xda0058,
+       0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = {
+       &pbf_pb1_prty0_bb_b0,
+};
+
+static const u16 pbf_pb1_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_pb1_prty0_k2 = {
+       0, 1, pbf_pb1_prty0_k2_attn_idx, 0xda0050, 0xda005c, 0xda0058, 0xda0054
+};
+
+static struct attn_hw_reg *pbf_pb1_prty_k2_regs[1] = {
+       &pbf_pb1_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_int_attn_desc[9] = {
+       "pbf_pb2_address_error",
+       "pbf_pb2_eop_error",
+       "pbf_pb2_ififo_error",
+       "pbf_pb2_pfifo_error",
+       "pbf_pb2_db_buf_error",
+       "pbf_pb2_th_exec_error",
+       "pbf_pb2_tq_error_wr",
+       "pbf_pb2_tq_error_rd_th",
+       "pbf_pb2_tq_error_rd_ih",
+};
+#else
+#define pbf_pb2_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_int0_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_a0 = {
+       0, 9, pbf_pb2_int0_bb_a0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+       0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_a0_regs[1] = {
+       &pbf_pb2_int0_bb_a0,
+};
+
+static const u16 pbf_pb2_int0_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_bb_b0 = {
+       0, 9, pbf_pb2_int0_bb_b0_attn_idx, 0xda4040, 0xda404c, 0xda4048,
+       0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = {
+       &pbf_pb2_int0_bb_b0,
+};
+
+static const u16 pbf_pb2_int0_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg pbf_pb2_int0_k2 = {
+       0, 9, pbf_pb2_int0_k2_attn_idx, 0xda4040, 0xda404c, 0xda4048, 0xda4044
+};
+
+static struct attn_hw_reg *pbf_pb2_int_k2_regs[1] = {
+       &pbf_pb2_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_pb2_prty_attn_desc[1] = {
+       "pbf_pb2_datapath_registers",
+};
+#else
+#define pbf_pb2_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_pb2_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = {
+       0, 1, pbf_pb2_prty0_bb_b0_attn_idx, 0xda4050, 0xda405c, 0xda4058,
+       0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = {
+       &pbf_pb2_prty0_bb_b0,
+};
+
+static const u16 pbf_pb2_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_pb2_prty0_k2 = {
+       0, 1, pbf_pb2_prty0_k2_attn_idx, 0xda4050, 0xda405c, 0xda4058, 0xda4054
+};
+
+static struct attn_hw_reg *pbf_pb2_prty_k2_regs[1] = {
+       &pbf_pb2_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_int_attn_desc[9] = {
+       "rpb_address_error",
+       "rpb_eop_error",
+       "rpb_ififo_error",
+       "rpb_pfifo_error",
+       "rpb_db_buf_error",
+       "rpb_th_exec_error",
+       "rpb_tq_error_wr",
+       "rpb_tq_error_rd_th",
+       "rpb_tq_error_rd_ih",
+};
+#else
+#define rpb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_int0_bb_a0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_a0 = {
+       0, 9, rpb_int0_bb_a0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_a0_regs[1] = {
+       &rpb_int0_bb_a0,
+};
+
+static const u16 rpb_int0_bb_b0_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_bb_b0 = {
+       0, 9, rpb_int0_bb_b0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = {
+       &rpb_int0_bb_b0,
+};
+
+static const u16 rpb_int0_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rpb_int0_k2 = {
+       0, 9, rpb_int0_k2_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044
+};
+
+static struct attn_hw_reg *rpb_int_k2_regs[1] = {
+       &rpb_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rpb_prty_attn_desc[1] = {
+       "rpb_datapath_registers",
+};
+#else
+#define rpb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rpb_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg rpb_prty0_bb_b0 = {
+       0, 1, rpb_prty0_bb_b0_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = {
+       &rpb_prty0_bb_b0,
+};
+
+static const u16 rpb_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg rpb_prty0_k2 = {
+       0, 1, rpb_prty0_k2_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054
+};
+
+static struct attn_hw_reg *rpb_prty_k2_regs[1] = {
+       &rpb_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_int_attn_desc[139] = {
+       "btb_address_error",
+       "btb_rc_pkt0_rls_error",
+       "btb_unused_0",
+       "btb_rc_pkt0_len_error",
+       "btb_unused_1",
+       "btb_rc_pkt0_protocol_error",
+       "btb_rc_pkt1_rls_error",
+       "btb_unused_2",
+       "btb_rc_pkt1_len_error",
+       "btb_unused_3",
+       "btb_rc_pkt1_protocol_error",
+       "btb_rc_pkt2_rls_error",
+       "btb_unused_4",
+       "btb_rc_pkt2_len_error",
+       "btb_unused_5",
+       "btb_rc_pkt2_protocol_error",
+       "btb_rc_pkt3_rls_error",
+       "btb_unused_6",
+       "btb_rc_pkt3_len_error",
+       "btb_unused_7",
+       "btb_rc_pkt3_protocol_error",
+       "btb_rc_sop_req_tc_port_error",
+       "btb_unused_8",
+       "btb_wc0_protocol_error",
+       "btb_unused_9",
+       "btb_ll_blk_error",
+       "btb_ll_arb_calc_error",
+       "btb_fc_alm_calc_error",
+       "btb_wc0_inp_fifo_error",
+       "btb_wc0_sop_fifo_error",
+       "btb_wc0_len_fifo_error",
+       "btb_wc0_eop_fifo_error",
+       "btb_wc0_queue_fifo_error",
+       "btb_wc0_free_point_fifo_error",
+       "btb_wc0_next_point_fifo_error",
+       "btb_wc0_strt_fifo_error",
+       "btb_wc0_second_dscr_fifo_error",
+       "btb_wc0_pkt_avail_fifo_error",
+       "btb_wc0_notify_fifo_error",
+       "btb_wc0_ll_req_fifo_error",
+       "btb_wc0_ll_pa_cnt_error",
+       "btb_wc0_bb_pa_cnt_error",
+       "btb_wc_dup_upd_data_fifo_error",
+       "btb_wc_dup_rsp_dscr_fifo_error",
+       "btb_wc_dup_upd_point_fifo_error",
+       "btb_wc_dup_pkt_avail_fifo_error",
+       "btb_wc_dup_pkt_avail_cnt_error",
+       "btb_rc_pkt0_side_fifo_error",
+       "btb_rc_pkt0_req_fifo_error",
+       "btb_rc_pkt0_blk_fifo_error",
+       "btb_rc_pkt0_rls_left_fifo_error",
+       "btb_rc_pkt0_strt_ptr_fifo_error",
+       "btb_rc_pkt0_second_ptr_fifo_error",
+       "btb_rc_pkt0_rsp_fifo_error",
+       "btb_rc_pkt0_dscr_fifo_error",
+       "btb_rc_pkt1_side_fifo_error",
+       "btb_rc_pkt1_req_fifo_error",
+       "btb_rc_pkt1_blk_fifo_error",
+       "btb_rc_pkt1_rls_left_fifo_error",
+       "btb_rc_pkt1_strt_ptr_fifo_error",
+       "btb_rc_pkt1_second_ptr_fifo_error",
+       "btb_rc_pkt1_rsp_fifo_error",
+       "btb_rc_pkt1_dscr_fifo_error",
+       "btb_rc_pkt2_side_fifo_error",
+       "btb_rc_pkt2_req_fifo_error",
+       "btb_rc_pkt2_blk_fifo_error",
+       "btb_rc_pkt2_rls_left_fifo_error",
+       "btb_rc_pkt2_strt_ptr_fifo_error",
+       "btb_rc_pkt2_second_ptr_fifo_error",
+       "btb_rc_pkt2_rsp_fifo_error",
+       "btb_rc_pkt2_dscr_fifo_error",
+       "btb_rc_pkt3_side_fifo_error",
+       "btb_rc_pkt3_req_fifo_error",
+       "btb_rc_pkt3_blk_fifo_error",
+       "btb_rc_pkt3_rls_left_fifo_error",
+       "btb_rc_pkt3_strt_ptr_fifo_error",
+       "btb_rc_pkt3_second_ptr_fifo_error",
+       "btb_rc_pkt3_rsp_fifo_error",
+       "btb_rc_pkt3_dscr_fifo_error",
+       "btb_rc_sop_queue_fifo_error",
+       "btb_ll_arb_rls_fifo_error",
+       "btb_ll_arb_prefetch_fifo_error",
+       "btb_rc_pkt0_rls_fifo_error",
+       "btb_rc_pkt1_rls_fifo_error",
+       "btb_rc_pkt2_rls_fifo_error",
+       "btb_rc_pkt3_rls_fifo_error",
+       "btb_rc_pkt4_rls_fifo_error",
+       "btb_rc_pkt5_rls_fifo_error",
+       "btb_rc_pkt6_rls_fifo_error",
+       "btb_rc_pkt7_rls_fifo_error",
+       "btb_rc_pkt4_rls_error",
+       "btb_rc_pkt4_len_error",
+       "btb_rc_pkt4_protocol_error",
+       "btb_rc_pkt4_side_fifo_error",
+       "btb_rc_pkt4_req_fifo_error",
+       "btb_rc_pkt4_blk_fifo_error",
+       "btb_rc_pkt4_rls_left_fifo_error",
+       "btb_rc_pkt4_strt_ptr_fifo_error",
+       "btb_rc_pkt4_second_ptr_fifo_error",
+       "btb_rc_pkt4_rsp_fifo_error",
+       "btb_rc_pkt4_dscr_fifo_error",
+       "btb_rc_pkt5_rls_error",
+       "btb_rc_pkt5_len_error",
+       "btb_rc_pkt5_protocol_error",
+       "btb_rc_pkt5_side_fifo_error",
+       "btb_rc_pkt5_req_fifo_error",
+       "btb_rc_pkt5_blk_fifo_error",
+       "btb_rc_pkt5_rls_left_fifo_error",
+       "btb_rc_pkt5_strt_ptr_fifo_error",
+       "btb_rc_pkt5_second_ptr_fifo_error",
+       "btb_rc_pkt5_rsp_fifo_error",
+       "btb_rc_pkt5_dscr_fifo_error",
+       "btb_rc_pkt6_rls_error",
+       "btb_rc_pkt6_len_error",
+       "btb_rc_pkt6_protocol_error",
+       "btb_rc_pkt6_side_fifo_error",
+       "btb_rc_pkt6_req_fifo_error",
+       "btb_rc_pkt6_blk_fifo_error",
+       "btb_rc_pkt6_rls_left_fifo_error",
+       "btb_rc_pkt6_strt_ptr_fifo_error",
+       "btb_rc_pkt6_second_ptr_fifo_error",
+       "btb_rc_pkt6_rsp_fifo_error",
+       "btb_rc_pkt6_dscr_fifo_error",
+       "btb_rc_pkt7_rls_error",
+       "btb_rc_pkt7_len_error",
+       "btb_rc_pkt7_protocol_error",
+       "btb_rc_pkt7_side_fifo_error",
+       "btb_rc_pkt7_req_fifo_error",
+       "btb_rc_pkt7_blk_fifo_error",
+       "btb_rc_pkt7_rls_left_fifo_error",
+       "btb_rc_pkt7_strt_ptr_fifo_error",
+       "btb_rc_pkt7_second_ptr_fifo_error",
+       "btb_rc_pkt7_rsp_fifo_error",
+       "btb_packet_available_sync_fifo_push_error",
+       "btb_wc6_notify_fifo_error",
+       "btb_wc9_queue_fifo_error",
+       "btb_wc0_sync_fifo_push_error",
+       "btb_rls_sync_fifo_push_error",
+       "btb_rc_pkt7_dscr_fifo_error",
+};
+#else
+#define btb_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_int0_bb_a0_attn_idx[16] = {
+       0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_a0 = {
+       0, 16, btb_int0_bb_a0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_a0_attn_idx[16] = {
+       26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_a0 = {
+       1, 16, btb_int1_bb_a0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_a0_attn_idx[4] = {
+       42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_a0 = {
+       2, 4, btb_int2_bb_a0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_a0_attn_idx[32] = {
+       46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_a0 = {
+       3, 32, btb_int3_bb_a0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_a0_attn_idx[23] = {
+       78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+       96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_a0 = {
+       4, 23, btb_int4_bb_a0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_a0_attn_idx[32] = {
+       101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+       115,
+       116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+           130, 131,
+       132,
+};
+
+static struct attn_hw_reg btb_int5_bb_a0 = {
+       5, 32, btb_int5_bb_a0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_a0_attn_idx[1] = {
+       133,
+};
+
+static struct attn_hw_reg btb_int6_bb_a0 = {
+       6, 1, btb_int6_bb_a0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_a0_attn_idx[1] = {
+       134,
+};
+
+static struct attn_hw_reg btb_int8_bb_a0 = {
+       7, 1, btb_int8_bb_a0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_a0_attn_idx[1] = {
+       135,
+};
+
+static struct attn_hw_reg btb_int9_bb_a0 = {
+       8, 1, btb_int9_bb_a0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_a0_attn_idx[1] = {
+       136,
+};
+
+static struct attn_hw_reg btb_int10_bb_a0 = {
+       9, 1, btb_int10_bb_a0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_a0_attn_idx[2] = {
+       137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_a0 = {
+       10, 2, btb_int11_bb_a0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_a0_regs[11] = {
+       &btb_int0_bb_a0, &btb_int1_bb_a0, &btb_int2_bb_a0, &btb_int3_bb_a0,
+       &btb_int4_bb_a0, &btb_int5_bb_a0, &btb_int6_bb_a0, &btb_int8_bb_a0,
+       &btb_int9_bb_a0, &btb_int10_bb_a0,
+       &btb_int11_bb_a0,
+};
+
+static const u16 btb_int0_bb_b0_attn_idx[16] = {
+       0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_bb_b0 = {
+       0, 16, btb_int0_bb_b0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_bb_b0_attn_idx[16] = {
+       26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_bb_b0 = {
+       1, 16, btb_int1_bb_b0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_bb_b0_attn_idx[4] = {
+       42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_bb_b0 = {
+       2, 4, btb_int2_bb_b0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_bb_b0_attn_idx[32] = {
+       46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_bb_b0 = {
+       3, 32, btb_int3_bb_b0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_bb_b0_attn_idx[23] = {
+       78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+       96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_bb_b0 = {
+       4, 23, btb_int4_bb_b0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_bb_b0_attn_idx[32] = {
+       101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+       115,
+       116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+           130, 131,
+       132,
+};
+
+static struct attn_hw_reg btb_int5_bb_b0 = {
+       5, 32, btb_int5_bb_b0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_bb_b0_attn_idx[1] = {
+       133,
+};
+
+static struct attn_hw_reg btb_int6_bb_b0 = {
+       6, 1, btb_int6_bb_b0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_bb_b0_attn_idx[1] = {
+       134,
+};
+
+static struct attn_hw_reg btb_int8_bb_b0 = {
+       7, 1, btb_int8_bb_b0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_bb_b0_attn_idx[1] = {
+       135,
+};
+
+static struct attn_hw_reg btb_int9_bb_b0 = {
+       8, 1, btb_int9_bb_b0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_bb_b0_attn_idx[1] = {
+       136,
+};
+
+static struct attn_hw_reg btb_int10_bb_b0 = {
+       9, 1, btb_int10_bb_b0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_bb_b0_attn_idx[2] = {
+       137, 138,
+};
+
+static struct attn_hw_reg btb_int11_bb_b0 = {
+       10, 2, btb_int11_bb_b0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_bb_b0_regs[11] = {
+       &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0,
+       &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0,
+       &btb_int9_bb_b0, &btb_int10_bb_b0,
+       &btb_int11_bb_b0,
+};
+
+static const u16 btb_int0_k2_attn_idx[16] = {
+       0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25,
+};
+
+static struct attn_hw_reg btb_int0_k2 = {
+       0, 16, btb_int0_k2_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4
+};
+
+static const u16 btb_int1_k2_attn_idx[16] = {
+       26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg btb_int1_k2 = {
+       1, 16, btb_int1_k2_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc
+};
+
+static const u16 btb_int2_k2_attn_idx[4] = {
+       42, 43, 44, 45,
+};
+
+static struct attn_hw_reg btb_int2_k2 = {
+       2, 4, btb_int2_k2_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4
+};
+
+static const u16 btb_int3_k2_attn_idx[32] = {
+       46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+};
+
+static struct attn_hw_reg btb_int3_k2 = {
+       3, 32, btb_int3_k2_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c
+};
+
+static const u16 btb_int4_k2_attn_idx[23] = {
+       78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+       96, 97, 98, 99, 100,
+};
+
+static struct attn_hw_reg btb_int4_k2 = {
+       4, 23, btb_int4_k2_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124
+};
+
+static const u16 btb_int5_k2_attn_idx[32] = {
+       101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+       115,
+       116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+           130, 131,
+       132,
+};
+
+static struct attn_hw_reg btb_int5_k2 = {
+       5, 32, btb_int5_k2_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c
+};
+
+static const u16 btb_int6_k2_attn_idx[1] = {
+       133,
+};
+
+static struct attn_hw_reg btb_int6_k2 = {
+       6, 1, btb_int6_k2_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154
+};
+
+static const u16 btb_int8_k2_attn_idx[1] = {
+       134,
+};
+
+static struct attn_hw_reg btb_int8_k2 = {
+       7, 1, btb_int8_k2_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188
+};
+
+static const u16 btb_int9_k2_attn_idx[1] = {
+       135,
+};
+
+static struct attn_hw_reg btb_int9_k2 = {
+       8, 1, btb_int9_k2_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0
+};
+
+static const u16 btb_int10_k2_attn_idx[1] = {
+       136,
+};
+
+static struct attn_hw_reg btb_int10_k2 = {
+       9, 1, btb_int10_k2_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8
+};
+
+static const u16 btb_int11_k2_attn_idx[2] = {
+       137, 138,
+};
+
+static struct attn_hw_reg btb_int11_k2 = {
+       10, 2, btb_int11_k2_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0
+};
+
+static struct attn_hw_reg *btb_int_k2_regs[11] = {
+       &btb_int0_k2, &btb_int1_k2, &btb_int2_k2, &btb_int3_k2, &btb_int4_k2,
+       &btb_int5_k2, &btb_int6_k2, &btb_int8_k2, &btb_int9_k2, &btb_int10_k2,
+       &btb_int11_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *btb_prty_attn_desc[36] = {
+       "btb_ll_bank0_mem_prty",
+       "btb_ll_bank1_mem_prty",
+       "btb_ll_bank2_mem_prty",
+       "btb_ll_bank3_mem_prty",
+       "btb_datapath_registers",
+       "btb_mem001_i_ecc_rf_int",
+       "btb_mem008_i_ecc_rf_int",
+       "btb_mem009_i_ecc_rf_int",
+       "btb_mem010_i_ecc_rf_int",
+       "btb_mem011_i_ecc_rf_int",
+       "btb_mem012_i_ecc_rf_int",
+       "btb_mem013_i_ecc_rf_int",
+       "btb_mem014_i_ecc_rf_int",
+       "btb_mem015_i_ecc_rf_int",
+       "btb_mem016_i_ecc_rf_int",
+       "btb_mem002_i_ecc_rf_int",
+       "btb_mem003_i_ecc_rf_int",
+       "btb_mem004_i_ecc_rf_int",
+       "btb_mem005_i_ecc_rf_int",
+       "btb_mem006_i_ecc_rf_int",
+       "btb_mem007_i_ecc_rf_int",
+       "btb_mem033_i_mem_prty",
+       "btb_mem035_i_mem_prty",
+       "btb_mem034_i_mem_prty",
+       "btb_mem032_i_mem_prty",
+       "btb_mem031_i_mem_prty",
+       "btb_mem021_i_mem_prty",
+       "btb_mem022_i_mem_prty",
+       "btb_mem023_i_mem_prty",
+       "btb_mem024_i_mem_prty",
+       "btb_mem025_i_mem_prty",
+       "btb_mem026_i_mem_prty",
+       "btb_mem027_i_mem_prty",
+       "btb_mem028_i_mem_prty",
+       "btb_mem030_i_mem_prty",
+       "btb_mem029_i_mem_prty",
+};
+#else
+#define btb_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 btb_prty1_bb_a0_attn_idx[27] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 26, 27,
+       28,
+       29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_a0 = {
+       0, 27, btb_prty1_bb_a0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_a0_regs[1] = {
+       &btb_prty1_bb_a0,
+};
+
+static const u16 btb_prty0_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_bb_b0 = {
+       0, 5, btb_prty0_bb_b0_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_bb_b0_attn_idx[23] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 31,
+       32,
+       33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_bb_b0 = {
+       1, 23, btb_prty1_bb_b0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = {
+       &btb_prty0_bb_b0, &btb_prty1_bb_b0,
+};
+
+static const u16 btb_prty0_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg btb_prty0_k2 = {
+       0, 5, btb_prty0_k2_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0
+};
+
+static const u16 btb_prty1_k2_attn_idx[31] = {
+       5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+       24,
+       25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
+};
+
+static struct attn_hw_reg btb_prty1_k2 = {
+       1, 31, btb_prty1_k2_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404
+};
+
+static struct attn_hw_reg *btb_prty_k2_regs[2] = {
+       &btb_prty0_k2, &btb_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_int_attn_desc[1] = {
+       "pbf_address_error",
+};
+#else
+#define pbf_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_a0 = {
+       0, 1, pbf_int0_bb_a0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_a0_regs[1] = {
+       &pbf_int0_bb_a0,
+};
+
+static const u16 pbf_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_int0_bb_b0 = {
+       0, 1, pbf_int0_bb_b0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = {
+       &pbf_int0_bb_b0,
+};
+
+static const u16 pbf_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_int0_k2 = {
+       0, 1, pbf_int0_k2_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184
+};
+
+static struct attn_hw_reg *pbf_int_k2_regs[1] = {
+       &pbf_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *pbf_prty_attn_desc[59] = {
+       "pbf_datapath_registers",
+       "pbf_mem041_i_ecc_rf_int",
+       "pbf_mem042_i_ecc_rf_int",
+       "pbf_mem033_i_ecc_rf_int",
+       "pbf_mem003_i_ecc_rf_int",
+       "pbf_mem018_i_ecc_rf_int",
+       "pbf_mem009_i_ecc_0_rf_int",
+       "pbf_mem009_i_ecc_1_rf_int",
+       "pbf_mem012_i_ecc_0_rf_int",
+       "pbf_mem012_i_ecc_1_rf_int",
+       "pbf_mem012_i_ecc_2_rf_int",
+       "pbf_mem012_i_ecc_3_rf_int",
+       "pbf_mem012_i_ecc_4_rf_int",
+       "pbf_mem012_i_ecc_5_rf_int",
+       "pbf_mem012_i_ecc_6_rf_int",
+       "pbf_mem012_i_ecc_7_rf_int",
+       "pbf_mem012_i_ecc_8_rf_int",
+       "pbf_mem012_i_ecc_9_rf_int",
+       "pbf_mem012_i_ecc_10_rf_int",
+       "pbf_mem012_i_ecc_11_rf_int",
+       "pbf_mem012_i_ecc_12_rf_int",
+       "pbf_mem012_i_ecc_13_rf_int",
+       "pbf_mem012_i_ecc_14_rf_int",
+       "pbf_mem012_i_ecc_15_rf_int",
+       "pbf_mem040_i_mem_prty",
+       "pbf_mem039_i_mem_prty",
+       "pbf_mem038_i_mem_prty",
+       "pbf_mem034_i_mem_prty",
+       "pbf_mem032_i_mem_prty",
+       "pbf_mem031_i_mem_prty",
+       "pbf_mem030_i_mem_prty",
+       "pbf_mem029_i_mem_prty",
+       "pbf_mem022_i_mem_prty",
+       "pbf_mem023_i_mem_prty",
+       "pbf_mem021_i_mem_prty",
+       "pbf_mem020_i_mem_prty",
+       "pbf_mem001_i_mem_prty",
+       "pbf_mem002_i_mem_prty",
+       "pbf_mem006_i_mem_prty",
+       "pbf_mem007_i_mem_prty",
+       "pbf_mem005_i_mem_prty",
+       "pbf_mem004_i_mem_prty",
+       "pbf_mem028_i_mem_prty",
+       "pbf_mem026_i_mem_prty",
+       "pbf_mem027_i_mem_prty",
+       "pbf_mem019_i_mem_prty",
+       "pbf_mem016_i_mem_prty",
+       "pbf_mem017_i_mem_prty",
+       "pbf_mem008_i_mem_prty",
+       "pbf_mem011_i_mem_prty",
+       "pbf_mem010_i_mem_prty",
+       "pbf_mem024_i_mem_prty",
+       "pbf_mem025_i_mem_prty",
+       "pbf_mem037_i_mem_prty",
+       "pbf_mem036_i_mem_prty",
+       "pbf_mem035_i_mem_prty",
+       "pbf_mem014_i_mem_prty",
+       "pbf_mem015_i_mem_prty",
+       "pbf_mem013_i_mem_prty",
+};
+#else
+#define pbf_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 pbf_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_a0 = {
+       0, 31, pbf_prty1_bb_a0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_a0_attn_idx[27] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+       50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_a0 = {
+       1, 27, pbf_prty2_bb_a0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_a0_regs[2] = {
+       &pbf_prty1_bb_a0, &pbf_prty2_bb_a0,
+};
+
+static const u16 pbf_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_prty0_bb_b0 = {
+       0, 1, pbf_prty0_bb_b0_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_bb_b0_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_bb_b0 = {
+       1, 31, pbf_prty1_bb_b0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_bb_b0_attn_idx[27] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+       50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_bb_b0 = {
+       2, 27, pbf_prty2_bb_b0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = {
+       &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0,
+};
+
+static const u16 pbf_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg pbf_prty0_k2 = {
+       0, 1, pbf_prty0_k2_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194
+};
+
+static const u16 pbf_prty1_k2_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg pbf_prty1_k2 = {
+       1, 31, pbf_prty1_k2_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204
+};
+
+static const u16 pbf_prty2_k2_attn_idx[27] = {
+       32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+       50, 51, 52, 53, 54, 55, 56, 57, 58,
+};
+
+static struct attn_hw_reg pbf_prty2_k2 = {
+       2, 27, pbf_prty2_k2_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214
+};
+
+static struct attn_hw_reg *pbf_prty_k2_regs[3] = {
+       &pbf_prty0_k2, &pbf_prty1_k2, &pbf_prty2_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_int_attn_desc[9] = {
+       "rdif_address_error",
+       "rdif_fatal_dix_err",
+       "rdif_fatal_config_err",
+       "rdif_cmd_fifo_err",
+       "rdif_order_fifo_err",
+       "rdif_rdata_fifo_err",
+       "rdif_dif_stop_err",
+       "rdif_partial_dif_w_eob",
+       "rdif_l1_dirty_bit",
+};
+#else
+#define rdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_int0_bb_a0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_a0 = {
+       0, 8, rdif_int0_bb_a0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_a0_regs[1] = {
+       &rdif_int0_bb_a0,
+};
+
+static const u16 rdif_int0_bb_b0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg rdif_int0_bb_b0 = {
+       0, 8, rdif_int0_bb_b0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = {
+       &rdif_int0_bb_b0,
+};
+
+static const u16 rdif_int0_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg rdif_int0_k2 = {
+       0, 9, rdif_int0_k2_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184
+};
+
+static struct attn_hw_reg *rdif_int_k2_regs[1] = {
+       &rdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *rdif_prty_attn_desc[2] = {
+       "rdif_unused_0",
+       "rdif_datapath_registers",
+};
+#else
+#define rdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 rdif_prty0_bb_b0_attn_idx[1] = {
+       1,
+};
+
+static struct attn_hw_reg rdif_prty0_bb_b0 = {
+       0, 1, rdif_prty0_bb_b0_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = {
+       &rdif_prty0_bb_b0,
+};
+
+static const u16 rdif_prty0_k2_attn_idx[1] = {
+       1,
+};
+
+static struct attn_hw_reg rdif_prty0_k2 = {
+       0, 1, rdif_prty0_k2_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194
+};
+
+static struct attn_hw_reg *rdif_prty_k2_regs[1] = {
+       &rdif_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_int_attn_desc[9] = {
+       "tdif_address_error",
+       "tdif_fatal_dix_err",
+       "tdif_fatal_config_err",
+       "tdif_cmd_fifo_err",
+       "tdif_order_fifo_err",
+       "tdif_rdata_fifo_err",
+       "tdif_dif_stop_err",
+       "tdif_partial_dif_w_eob",
+       "tdif_l1_dirty_bit",
+};
+#else
+#define tdif_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_int0_bb_a0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_a0 = {
+       0, 8, tdif_int0_bb_a0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_a0_regs[1] = {
+       &tdif_int0_bb_a0,
+};
+
+static const u16 tdif_int0_bb_b0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg tdif_int0_bb_b0 = {
+       0, 8, tdif_int0_bb_b0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = {
+       &tdif_int0_bb_b0,
+};
+
+static const u16 tdif_int0_k2_attn_idx[9] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8,
+};
+
+static struct attn_hw_reg tdif_int0_k2 = {
+       0, 9, tdif_int0_k2_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184
+};
+
+static struct attn_hw_reg *tdif_int_k2_regs[1] = {
+       &tdif_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tdif_prty_attn_desc[13] = {
+       "tdif_unused_0",
+       "tdif_datapath_registers",
+       "tdif_mem005_i_ecc_rf_int",
+       "tdif_mem009_i_ecc_rf_int",
+       "tdif_mem010_i_ecc_rf_int",
+       "tdif_mem011_i_ecc_rf_int",
+       "tdif_mem001_i_mem_prty",
+       "tdif_mem003_i_mem_prty",
+       "tdif_mem002_i_mem_prty",
+       "tdif_mem006_i_mem_prty",
+       "tdif_mem007_i_mem_prty",
+       "tdif_mem008_i_mem_prty",
+       "tdif_mem004_i_mem_prty",
+};
+#else
+#define tdif_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tdif_prty1_bb_a0_attn_idx[11] = {
+       2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_a0 = {
+       0, 11, tdif_prty1_bb_a0_attn_idx, 0x310200, 0x31020c, 0x310208,
+       0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_a0_regs[1] = {
+       &tdif_prty1_bb_a0,
+};
+
+static const u16 tdif_prty0_bb_b0_attn_idx[1] = {
+       1,
+};
+
+static struct attn_hw_reg tdif_prty0_bb_b0 = {
+       0, 1, tdif_prty0_bb_b0_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_bb_b0_attn_idx[11] = {
+       2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_bb_b0 = {
+       1, 11, tdif_prty1_bb_b0_attn_idx, 0x310200, 0x31020c, 0x310208,
+       0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = {
+       &tdif_prty0_bb_b0, &tdif_prty1_bb_b0,
+};
+
+static const u16 tdif_prty0_k2_attn_idx[1] = {
+       1,
+};
+
+static struct attn_hw_reg tdif_prty0_k2 = {
+       0, 1, tdif_prty0_k2_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194
+};
+
+static const u16 tdif_prty1_k2_attn_idx[11] = {
+       2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg tdif_prty1_k2 = {
+       1, 11, tdif_prty1_k2_attn_idx, 0x310200, 0x31020c, 0x310208, 0x310204
+};
+
+static struct attn_hw_reg *tdif_prty_k2_regs[2] = {
+       &tdif_prty0_k2, &tdif_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_int_attn_desc[8] = {
+       "cdu_address_error",
+       "cdu_ccfc_ld_l1_num_error",
+       "cdu_tcfc_ld_l1_num_error",
+       "cdu_ccfc_wb_l1_num_error",
+       "cdu_tcfc_wb_l1_num_error",
+       "cdu_ccfc_cvld_error",
+       "cdu_tcfc_cvld_error",
+       "cdu_bvalid_error",
+};
+#else
+#define cdu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_int0_bb_a0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_a0 = {
+       0, 8, cdu_int0_bb_a0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_a0_regs[1] = {
+       &cdu_int0_bb_a0,
+};
+
+static const u16 cdu_int0_bb_b0_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_bb_b0 = {
+       0, 8, cdu_int0_bb_b0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = {
+       &cdu_int0_bb_b0,
+};
+
+static const u16 cdu_int0_k2_attn_idx[8] = {
+       0, 1, 2, 3, 4, 5, 6, 7,
+};
+
+static struct attn_hw_reg cdu_int0_k2 = {
+       0, 8, cdu_int0_k2_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc
+};
+
+static struct attn_hw_reg *cdu_int_k2_regs[1] = {
+       &cdu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cdu_prty_attn_desc[5] = {
+       "cdu_mem001_i_mem_prty",
+       "cdu_mem004_i_mem_prty",
+       "cdu_mem002_i_mem_prty",
+       "cdu_mem005_i_mem_prty",
+       "cdu_mem003_i_mem_prty",
+};
+#else
+#define cdu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cdu_prty1_bb_a0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_a0 = {
+       0, 5, cdu_prty1_bb_a0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_a0_regs[1] = {
+       &cdu_prty1_bb_a0,
+};
+
+static const u16 cdu_prty1_bb_b0_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_bb_b0 = {
+       0, 5, cdu_prty1_bb_b0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = {
+       &cdu_prty1_bb_b0,
+};
+
+static const u16 cdu_prty1_k2_attn_idx[5] = {
+       0, 1, 2, 3, 4,
+};
+
+static struct attn_hw_reg cdu_prty1_k2 = {
+       0, 5, cdu_prty1_k2_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204
+};
+
+static struct attn_hw_reg *cdu_prty_k2_regs[1] = {
+       &cdu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_int_attn_desc[2] = {
+       "ccfc_address_error",
+       "ccfc_exe_error",
+};
+#else
+#define ccfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_int0_bb_a0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_a0 = {
+       0, 2, ccfc_int0_bb_a0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_a0_regs[1] = {
+       &ccfc_int0_bb_a0,
+};
+
+static const u16 ccfc_int0_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_bb_b0 = {
+       0, 2, ccfc_int0_bb_b0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = {
+       &ccfc_int0_bb_b0,
+};
+
+static const u16 ccfc_int0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg ccfc_int0_k2 = {
+       0, 2, ccfc_int0_k2_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184
+};
+
+static struct attn_hw_reg *ccfc_int_k2_regs[1] = {
+       &ccfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ccfc_prty_attn_desc[10] = {
+       "ccfc_mem001_i_ecc_rf_int",
+       "ccfc_mem003_i_mem_prty",
+       "ccfc_mem007_i_mem_prty",
+       "ccfc_mem006_i_mem_prty",
+       "ccfc_ccam_par_err",
+       "ccfc_scam_par_err",
+       "ccfc_lc_que_ram_porta_lsb_par_err",
+       "ccfc_lc_que_ram_porta_msb_par_err",
+       "ccfc_lc_que_ram_portb_lsb_par_err",
+       "ccfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define ccfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ccfc_prty1_bb_a0_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_a0 = {
+       0, 4, ccfc_prty1_bb_a0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_a0_attn_idx[2] = {
+       4, 5,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_a0 = {
+       1, 2, ccfc_prty0_bb_a0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_a0_regs[2] = {
+       &ccfc_prty1_bb_a0, &ccfc_prty0_bb_a0,
+};
+
+static const u16 ccfc_prty1_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_bb_b0 = {
+       0, 2, ccfc_prty1_bb_b0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_bb_b0_attn_idx[6] = {
+       4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_bb_b0 = {
+       1, 6, ccfc_prty0_bb_b0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = {
+       &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0,
+};
+
+static const u16 ccfc_prty1_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg ccfc_prty1_k2 = {
+       0, 2, ccfc_prty1_k2_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204
+};
+
+static const u16 ccfc_prty0_k2_attn_idx[6] = {
+       4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg ccfc_prty0_k2 = {
+       1, 6, ccfc_prty0_k2_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8
+};
+
+static struct attn_hw_reg *ccfc_prty_k2_regs[2] = {
+       &ccfc_prty1_k2, &ccfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_int_attn_desc[2] = {
+       "tcfc_address_error",
+       "tcfc_exe_error",
+};
+#else
+#define tcfc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_int0_bb_a0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_a0 = {
+       0, 2, tcfc_int0_bb_a0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_a0_regs[1] = {
+       &tcfc_int0_bb_a0,
+};
+
+static const u16 tcfc_int0_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_bb_b0 = {
+       0, 2, tcfc_int0_bb_b0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = {
+       &tcfc_int0_bb_b0,
+};
+
+static const u16 tcfc_int0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg tcfc_int0_k2 = {
+       0, 2, tcfc_int0_k2_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184
+};
+
+static struct attn_hw_reg *tcfc_int_k2_regs[1] = {
+       &tcfc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *tcfc_prty_attn_desc[10] = {
+       "tcfc_mem002_i_mem_prty",
+       "tcfc_mem001_i_mem_prty",
+       "tcfc_mem006_i_mem_prty",
+       "tcfc_mem005_i_mem_prty",
+       "tcfc_ccam_par_err",
+       "tcfc_scam_par_err",
+       "tcfc_lc_que_ram_porta_lsb_par_err",
+       "tcfc_lc_que_ram_porta_msb_par_err",
+       "tcfc_lc_que_ram_portb_lsb_par_err",
+       "tcfc_lc_que_ram_portb_msb_par_err",
+};
+#else
+#define tcfc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 tcfc_prty1_bb_a0_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_a0 = {
+       0, 4, tcfc_prty1_bb_a0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_a0_attn_idx[2] = {
+       4, 5,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_a0 = {
+       1, 2, tcfc_prty0_bb_a0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_a0_regs[2] = {
+       &tcfc_prty1_bb_a0, &tcfc_prty0_bb_a0,
+};
+
+static const u16 tcfc_prty1_bb_b0_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_bb_b0 = {
+       0, 2, tcfc_prty1_bb_b0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_bb_b0_attn_idx[6] = {
+       4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_bb_b0 = {
+       1, 6, tcfc_prty0_bb_b0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = {
+       &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0,
+};
+
+static const u16 tcfc_prty1_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg tcfc_prty1_k2 = {
+       0, 2, tcfc_prty1_k2_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204
+};
+
+static const u16 tcfc_prty0_k2_attn_idx[6] = {
+       4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg tcfc_prty0_k2 = {
+       1, 6, tcfc_prty0_k2_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8
+};
+
+static struct attn_hw_reg *tcfc_prty_k2_regs[2] = {
+       &tcfc_prty1_k2, &tcfc_prty0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_int_attn_desc[11] = {
+       "igu_address_error",
+       "igu_ctrl_fifo_error_err",
+       "igu_pxp_req_length_too_big",
+       "igu_host_tries2access_prod_upd",
+       "igu_vf_tries2acc_attn_cmd",
+       "igu_mme_bigger_then_5",
+       "igu_sb_index_is_not_valid",
+       "igu_durin_int_read_with_simd_dis",
+       "igu_cmd_fid_not_match",
+       "igu_segment_access_invalid",
+       "igu_attn_prod_acc",
+};
+#else
+#define igu_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_int0_bb_a0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_a0 = {
+       0, 11, igu_int0_bb_a0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_a0_regs[1] = {
+       &igu_int0_bb_a0,
+};
+
+static const u16 igu_int0_bb_b0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_bb_b0 = {
+       0, 11, igu_int0_bb_b0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_bb_b0_regs[1] = {
+       &igu_int0_bb_b0,
+};
+
+static const u16 igu_int0_k2_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg igu_int0_k2 = {
+       0, 11, igu_int0_k2_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184
+};
+
+static struct attn_hw_reg *igu_int_k2_regs[1] = {
+       &igu_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *igu_prty_attn_desc[42] = {
+       "igu_cam_parity",
+       "igu_mem009_i_ecc_rf_int",
+       "igu_mem015_i_mem_prty",
+       "igu_mem016_i_mem_prty",
+       "igu_mem017_i_mem_prty",
+       "igu_mem018_i_mem_prty",
+       "igu_mem019_i_mem_prty",
+       "igu_mem001_i_mem_prty",
+       "igu_mem002_i_mem_prty_0",
+       "igu_mem002_i_mem_prty_1",
+       "igu_mem004_i_mem_prty_0",
+       "igu_mem004_i_mem_prty_1",
+       "igu_mem004_i_mem_prty_2",
+       "igu_mem003_i_mem_prty",
+       "igu_mem005_i_mem_prty",
+       "igu_mem006_i_mem_prty_0",
+       "igu_mem006_i_mem_prty_1",
+       "igu_mem008_i_mem_prty_0",
+       "igu_mem008_i_mem_prty_1",
+       "igu_mem008_i_mem_prty_2",
+       "igu_mem007_i_mem_prty",
+       "igu_mem010_i_mem_prty_0",
+       "igu_mem010_i_mem_prty_1",
+       "igu_mem012_i_mem_prty_0",
+       "igu_mem012_i_mem_prty_1",
+       "igu_mem012_i_mem_prty_2",
+       "igu_mem011_i_mem_prty",
+       "igu_mem013_i_mem_prty",
+       "igu_mem014_i_mem_prty",
+       "igu_mem020_i_mem_prty",
+       "igu_mem003_i_mem_prty_0",
+       "igu_mem003_i_mem_prty_1",
+       "igu_mem003_i_mem_prty_2",
+       "igu_mem002_i_mem_prty",
+       "igu_mem007_i_mem_prty_0",
+       "igu_mem007_i_mem_prty_1",
+       "igu_mem007_i_mem_prty_2",
+       "igu_mem006_i_mem_prty",
+       "igu_mem010_i_mem_prty_2",
+       "igu_mem010_i_mem_prty_3",
+       "igu_mem013_i_mem_prty_0",
+       "igu_mem013_i_mem_prty_1",
+};
+#else
+#define igu_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 igu_prty0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_a0 = {
+       0, 1, igu_prty0_bb_a0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_a0_attn_idx[31] = {
+       1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+       30,
+       31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_a0 = {
+       1, 31, igu_prty1_bb_a0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_a0_attn_idx[1] = {
+       2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_a0 = {
+       2, 1, igu_prty2_bb_a0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_a0_regs[3] = {
+       &igu_prty0_bb_a0, &igu_prty1_bb_a0, &igu_prty2_bb_a0,
+};
+
+static const u16 igu_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg igu_prty0_bb_b0 = {
+       0, 1, igu_prty0_bb_b0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_bb_b0_attn_idx[31] = {
+       1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29,
+       30,
+       31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+};
+
+static struct attn_hw_reg igu_prty1_bb_b0 = {
+       1, 31, igu_prty1_bb_b0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static const u16 igu_prty2_bb_b0_attn_idx[1] = {
+       2,
+};
+
+static struct attn_hw_reg igu_prty2_bb_b0 = {
+       2, 1, igu_prty2_bb_b0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214
+};
+
+static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = {
+       &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0,
+};
+
+static const u16 igu_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg igu_prty0_k2 = {
+       0, 1, igu_prty0_k2_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194
+};
+
+static const u16 igu_prty1_k2_attn_idx[28] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg igu_prty1_k2 = {
+       1, 28, igu_prty1_k2_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204
+};
+
+static struct attn_hw_reg *igu_prty_k2_regs[2] = {
+       &igu_prty0_k2, &igu_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_int_attn_desc[11] = {
+       "cau_address_error",
+       "cau_unauthorized_pxp_rd_cmd",
+       "cau_unauthorized_pxp_length_cmd",
+       "cau_pxp_sb_address_error",
+       "cau_pxp_pi_number_error",
+       "cau_cleanup_reg_sb_idx_error",
+       "cau_fsm_invalid_line",
+       "cau_cqe_fifo_err",
+       "cau_igu_wdata_fifo_err",
+       "cau_igu_req_fifo_err",
+       "cau_igu_cmd_fifo_err",
+};
+#else
+#define cau_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_int0_bb_a0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_a0 = {
+       0, 11, cau_int0_bb_a0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_a0_regs[1] = {
+       &cau_int0_bb_a0,
+};
+
+static const u16 cau_int0_bb_b0_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_bb_b0 = {
+       0, 11, cau_int0_bb_b0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_bb_b0_regs[1] = {
+       &cau_int0_bb_b0,
+};
+
+static const u16 cau_int0_k2_attn_idx[11] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
+};
+
+static struct attn_hw_reg cau_int0_k2 = {
+       0, 11, cau_int0_k2_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0
+};
+
+static struct attn_hw_reg *cau_int_k2_regs[1] = {
+       &cau_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *cau_prty_attn_desc[15] = {
+       "cau_mem006_i_ecc_rf_int",
+       "cau_mem001_i_ecc_0_rf_int",
+       "cau_mem001_i_ecc_1_rf_int",
+       "cau_mem002_i_ecc_rf_int",
+       "cau_mem004_i_ecc_rf_int",
+       "cau_mem005_i_mem_prty",
+       "cau_mem007_i_mem_prty",
+       "cau_mem008_i_mem_prty",
+       "cau_mem009_i_mem_prty",
+       "cau_mem010_i_mem_prty",
+       "cau_mem011_i_mem_prty",
+       "cau_mem003_i_mem_prty_0",
+       "cau_mem003_i_mem_prty_1",
+       "cau_mem002_i_mem_prty",
+       "cau_mem004_i_mem_prty",
+};
+#else
+#define cau_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 cau_prty1_bb_a0_attn_idx[13] = {
+       0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_a0 = {
+       0, 13, cau_prty1_bb_a0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_a0_regs[1] = {
+       &cau_prty1_bb_a0,
+};
+
+static const u16 cau_prty1_bb_b0_attn_idx[13] = {
+       0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+static struct attn_hw_reg cau_prty1_bb_b0 = {
+       0, 13, cau_prty1_bb_b0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = {
+       &cau_prty1_bb_b0,
+};
+
+static const u16 cau_prty1_k2_attn_idx[13] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+};
+
+static struct attn_hw_reg cau_prty1_k2 = {
+       0, 13, cau_prty1_k2_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204
+};
+
+static struct attn_hw_reg *cau_prty_k2_regs[1] = {
+       &cau_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *umac_int_attn_desc[2] = {
+       "umac_address_error",
+       "umac_tx_overflow",
+};
+#else
+#define umac_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 umac_int0_k2_attn_idx[2] = {
+       0, 1,
+};
+
+static struct attn_hw_reg umac_int0_k2 = {
+       0, 2, umac_int0_k2_attn_idx, 0x51180, 0x5118c, 0x51188, 0x51184
+};
+
+static struct attn_hw_reg *umac_int_k2_regs[1] = {
+       &umac_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_int_attn_desc[1] = {
+       "dbg_address_error",
+};
+#else
+#define dbg_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_int0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_a0 = {
+       0, 1, dbg_int0_bb_a0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_a0_regs[1] = {
+       &dbg_int0_bb_a0,
+};
+
+static const u16 dbg_int0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dbg_int0_bb_b0 = {
+       0, 1, dbg_int0_bb_b0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = {
+       &dbg_int0_bb_b0,
+};
+
+static const u16 dbg_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dbg_int0_k2 = {
+       0, 1, dbg_int0_k2_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184
+};
+
+static struct attn_hw_reg *dbg_int_k2_regs[1] = {
+       &dbg_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *dbg_prty_attn_desc[1] = {
+       "dbg_mem001_i_mem_prty",
+};
+#else
+#define dbg_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 dbg_prty1_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_a0 = {
+       0, 1, dbg_prty1_bb_a0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_a0_regs[1] = {
+       &dbg_prty1_bb_a0,
+};
+
+static const u16 dbg_prty1_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dbg_prty1_bb_b0 = {
+       0, 1, dbg_prty1_bb_b0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = {
+       &dbg_prty1_bb_b0,
+};
+
+static const u16 dbg_prty1_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg dbg_prty1_k2 = {
+       0, 1, dbg_prty1_k2_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204
+};
+
+static struct attn_hw_reg *dbg_prty_k2_regs[1] = {
+       &dbg_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_int_attn_desc[196] = {
+       "nig_address_error",
+       "nig_debug_fifo_error",
+       "nig_dorq_fifo_error",
+       "nig_dbg_syncfifo_error_wr",
+       "nig_dorq_syncfifo_error_wr",
+       "nig_storm_syncfifo_error_wr",
+       "nig_dbgmux_syncfifo_error_wr",
+       "nig_msdm_syncfifo_error_wr",
+       "nig_tsdm_syncfifo_error_wr",
+       "nig_usdm_syncfifo_error_wr",
+       "nig_xsdm_syncfifo_error_wr",
+       "nig_ysdm_syncfifo_error_wr",
+       "nig_tx_sopq0_error",
+       "nig_tx_sopq1_error",
+       "nig_tx_sopq2_error",
+       "nig_tx_sopq3_error",
+       "nig_tx_sopq4_error",
+       "nig_tx_sopq5_error",
+       "nig_tx_sopq6_error",
+       "nig_tx_sopq7_error",
+       "nig_tx_sopq8_error",
+       "nig_tx_sopq9_error",
+       "nig_tx_sopq10_error",
+       "nig_tx_sopq11_error",
+       "nig_tx_sopq12_error",
+       "nig_tx_sopq13_error",
+       "nig_tx_sopq14_error",
+       "nig_tx_sopq15_error",
+       "nig_lb_sopq0_error",
+       "nig_lb_sopq1_error",
+       "nig_lb_sopq2_error",
+       "nig_lb_sopq3_error",
+       "nig_lb_sopq4_error",
+       "nig_lb_sopq5_error",
+       "nig_lb_sopq6_error",
+       "nig_lb_sopq7_error",
+       "nig_lb_sopq8_error",
+       "nig_lb_sopq9_error",
+       "nig_lb_sopq10_error",
+       "nig_lb_sopq11_error",
+       "nig_lb_sopq12_error",
+       "nig_lb_sopq13_error",
+       "nig_lb_sopq14_error",
+       "nig_lb_sopq15_error",
+       "nig_p0_purelb_sopq_error",
+       "nig_p0_rx_macfifo_error",
+       "nig_p0_tx_macfifo_error",
+       "nig_p0_tx_bmb_fifo_error",
+       "nig_p0_lb_bmb_fifo_error",
+       "nig_p0_tx_btb_fifo_error",
+       "nig_p0_lb_btb_fifo_error",
+       "nig_p0_rx_llh_dfifo_error",
+       "nig_p0_tx_llh_dfifo_error",
+       "nig_p0_lb_llh_dfifo_error",
+       "nig_p0_rx_llh_hfifo_error",
+       "nig_p0_tx_llh_hfifo_error",
+       "nig_p0_lb_llh_hfifo_error",
+       "nig_p0_rx_llh_rfifo_error",
+       "nig_p0_tx_llh_rfifo_error",
+       "nig_p0_lb_llh_rfifo_error",
+       "nig_p0_storm_fifo_error",
+       "nig_p0_storm_dscr_fifo_error",
+       "nig_p0_tx_gnt_fifo_error",
+       "nig_p0_lb_gnt_fifo_error",
+       "nig_p0_tx_pause_too_long_int",
+       "nig_p0_tc0_pause_too_long_int",
+       "nig_p0_tc1_pause_too_long_int",
+       "nig_p0_tc2_pause_too_long_int",
+       "nig_p0_tc3_pause_too_long_int",
+       "nig_p0_tc4_pause_too_long_int",
+       "nig_p0_tc5_pause_too_long_int",
+       "nig_p0_tc6_pause_too_long_int",
+       "nig_p0_tc7_pause_too_long_int",
+       "nig_p0_lb_tc0_pause_too_long_int",
+       "nig_p0_lb_tc1_pause_too_long_int",
+       "nig_p0_lb_tc2_pause_too_long_int",
+       "nig_p0_lb_tc3_pause_too_long_int",
+       "nig_p0_lb_tc4_pause_too_long_int",
+       "nig_p0_lb_tc5_pause_too_long_int",
+       "nig_p0_lb_tc6_pause_too_long_int",
+       "nig_p0_lb_tc7_pause_too_long_int",
+       "nig_p0_lb_tc8_pause_too_long_int",
+       "nig_p1_purelb_sopq_error",
+       "nig_p1_rx_macfifo_error",
+       "nig_p1_tx_macfifo_error",
+       "nig_p1_tx_bmb_fifo_error",
+       "nig_p1_lb_bmb_fifo_error",
+       "nig_p1_tx_btb_fifo_error",
+       "nig_p1_lb_btb_fifo_error",
+       "nig_p1_rx_llh_dfifo_error",
+       "nig_p1_tx_llh_dfifo_error",
+       "nig_p1_lb_llh_dfifo_error",
+       "nig_p1_rx_llh_hfifo_error",
+       "nig_p1_tx_llh_hfifo_error",
+       "nig_p1_lb_llh_hfifo_error",
+       "nig_p1_rx_llh_rfifo_error",
+       "nig_p1_tx_llh_rfifo_error",
+       "nig_p1_lb_llh_rfifo_error",
+       "nig_p1_storm_fifo_error",
+       "nig_p1_storm_dscr_fifo_error",
+       "nig_p1_tx_gnt_fifo_error",
+       "nig_p1_lb_gnt_fifo_error",
+       "nig_p1_tx_pause_too_long_int",
+       "nig_p1_tc0_pause_too_long_int",
+       "nig_p1_tc1_pause_too_long_int",
+       "nig_p1_tc2_pause_too_long_int",
+       "nig_p1_tc3_pause_too_long_int",
+       "nig_p1_tc4_pause_too_long_int",
+       "nig_p1_tc5_pause_too_long_int",
+       "nig_p1_tc6_pause_too_long_int",
+       "nig_p1_tc7_pause_too_long_int",
+       "nig_p1_lb_tc0_pause_too_long_int",
+       "nig_p1_lb_tc1_pause_too_long_int",
+       "nig_p1_lb_tc2_pause_too_long_int",
+       "nig_p1_lb_tc3_pause_too_long_int",
+       "nig_p1_lb_tc4_pause_too_long_int",
+       "nig_p1_lb_tc5_pause_too_long_int",
+       "nig_p1_lb_tc6_pause_too_long_int",
+       "nig_p1_lb_tc7_pause_too_long_int",
+       "nig_p1_lb_tc8_pause_too_long_int",
+       "nig_p2_purelb_sopq_error",
+       "nig_p2_rx_macfifo_error",
+       "nig_p2_tx_macfifo_error",
+       "nig_p2_tx_bmb_fifo_error",
+       "nig_p2_lb_bmb_fifo_error",
+       "nig_p2_tx_btb_fifo_error",
+       "nig_p2_lb_btb_fifo_error",
+       "nig_p2_rx_llh_dfifo_error",
+       "nig_p2_tx_llh_dfifo_error",
+       "nig_p2_lb_llh_dfifo_error",
+       "nig_p2_rx_llh_hfifo_error",
+       "nig_p2_tx_llh_hfifo_error",
+       "nig_p2_lb_llh_hfifo_error",
+       "nig_p2_rx_llh_rfifo_error",
+       "nig_p2_tx_llh_rfifo_error",
+       "nig_p2_lb_llh_rfifo_error",
+       "nig_p2_storm_fifo_error",
+       "nig_p2_storm_dscr_fifo_error",
+       "nig_p2_tx_gnt_fifo_error",
+       "nig_p2_lb_gnt_fifo_error",
+       "nig_p2_tx_pause_too_long_int",
+       "nig_p2_tc0_pause_too_long_int",
+       "nig_p2_tc1_pause_too_long_int",
+       "nig_p2_tc2_pause_too_long_int",
+       "nig_p2_tc3_pause_too_long_int",
+       "nig_p2_tc4_pause_too_long_int",
+       "nig_p2_tc5_pause_too_long_int",
+       "nig_p2_tc6_pause_too_long_int",
+       "nig_p2_tc7_pause_too_long_int",
+       "nig_p2_lb_tc0_pause_too_long_int",
+       "nig_p2_lb_tc1_pause_too_long_int",
+       "nig_p2_lb_tc2_pause_too_long_int",
+       "nig_p2_lb_tc3_pause_too_long_int",
+       "nig_p2_lb_tc4_pause_too_long_int",
+       "nig_p2_lb_tc5_pause_too_long_int",
+       "nig_p2_lb_tc6_pause_too_long_int",
+       "nig_p2_lb_tc7_pause_too_long_int",
+       "nig_p2_lb_tc8_pause_too_long_int",
+       "nig_p3_purelb_sopq_error",
+       "nig_p3_rx_macfifo_error",
+       "nig_p3_tx_macfifo_error",
+       "nig_p3_tx_bmb_fifo_error",
+       "nig_p3_lb_bmb_fifo_error",
+       "nig_p3_tx_btb_fifo_error",
+       "nig_p3_lb_btb_fifo_error",
+       "nig_p3_rx_llh_dfifo_error",
+       "nig_p3_tx_llh_dfifo_error",
+       "nig_p3_lb_llh_dfifo_error",
+       "nig_p3_rx_llh_hfifo_error",
+       "nig_p3_tx_llh_hfifo_error",
+       "nig_p3_lb_llh_hfifo_error",
+       "nig_p3_rx_llh_rfifo_error",
+       "nig_p3_tx_llh_rfifo_error",
+       "nig_p3_lb_llh_rfifo_error",
+       "nig_p3_storm_fifo_error",
+       "nig_p3_storm_dscr_fifo_error",
+       "nig_p3_tx_gnt_fifo_error",
+       "nig_p3_lb_gnt_fifo_error",
+       "nig_p3_tx_pause_too_long_int",
+       "nig_p3_tc0_pause_too_long_int",
+       "nig_p3_tc1_pause_too_long_int",
+       "nig_p3_tc2_pause_too_long_int",
+       "nig_p3_tc3_pause_too_long_int",
+       "nig_p3_tc4_pause_too_long_int",
+       "nig_p3_tc5_pause_too_long_int",
+       "nig_p3_tc6_pause_too_long_int",
+       "nig_p3_tc7_pause_too_long_int",
+       "nig_p3_lb_tc0_pause_too_long_int",
+       "nig_p3_lb_tc1_pause_too_long_int",
+       "nig_p3_lb_tc2_pause_too_long_int",
+       "nig_p3_lb_tc3_pause_too_long_int",
+       "nig_p3_lb_tc4_pause_too_long_int",
+       "nig_p3_lb_tc5_pause_too_long_int",
+       "nig_p3_lb_tc6_pause_too_long_int",
+       "nig_p3_lb_tc7_pause_too_long_int",
+       "nig_p3_lb_tc8_pause_too_long_int",
+};
+#else
+#define nig_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_int0_bb_a0_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_a0 = {
+       0, 12, nig_int0_bb_a0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_a0_attn_idx[32] = {
+       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+       30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_a0 = {
+       1, 32, nig_int1_bb_a0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_a0_attn_idx[20] = {
+       44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+       62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_a0 = {
+       2, 20, nig_int2_bb_a0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_a0_attn_idx[18] = {
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_a0 = {
+       3, 18, nig_int3_bb_a0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_a0_attn_idx[20] = {
+       82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+       100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_a0 = {
+       4, 20, nig_int4_bb_a0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_a0_attn_idx[18] = {
+       102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+       116,
+       117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_a0 = {
+       5, 18, nig_int5_bb_a0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_a0_regs[6] = {
+       &nig_int0_bb_a0, &nig_int1_bb_a0, &nig_int2_bb_a0, &nig_int3_bb_a0,
+       &nig_int4_bb_a0, &nig_int5_bb_a0,
+};
+
+static const u16 nig_int0_bb_b0_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_bb_b0 = {
+       0, 12, nig_int0_bb_b0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_bb_b0_attn_idx[32] = {
+       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+       30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_bb_b0 = {
+       1, 32, nig_int1_bb_b0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_bb_b0_attn_idx[20] = {
+       44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+       62, 63,
+};
+
+static struct attn_hw_reg nig_int2_bb_b0 = {
+       2, 20, nig_int2_bb_b0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_bb_b0_attn_idx[18] = {
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_bb_b0 = {
+       3, 18, nig_int3_bb_b0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_bb_b0_attn_idx[20] = {
+       82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+       100, 101,
+};
+
+static struct attn_hw_reg nig_int4_bb_b0 = {
+       4, 20, nig_int4_bb_b0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_bb_b0_attn_idx[18] = {
+       102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+       116,
+       117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_bb_b0 = {
+       5, 18, nig_int5_bb_b0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static struct attn_hw_reg *nig_int_bb_b0_regs[6] = {
+       &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0,
+       &nig_int4_bb_b0, &nig_int5_bb_b0,
+};
+
+static const u16 nig_int0_k2_attn_idx[12] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+};
+
+static struct attn_hw_reg nig_int0_k2 = {
+       0, 12, nig_int0_k2_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044
+};
+
+static const u16 nig_int1_k2_attn_idx[32] = {
+       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+       30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+};
+
+static struct attn_hw_reg nig_int1_k2 = {
+       1, 32, nig_int1_k2_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054
+};
+
+static const u16 nig_int2_k2_attn_idx[20] = {
+       44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+       62, 63,
+};
+
+static struct attn_hw_reg nig_int2_k2 = {
+       2, 20, nig_int2_k2_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064
+};
+
+static const u16 nig_int3_k2_attn_idx[18] = {
+       64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
+};
+
+static struct attn_hw_reg nig_int3_k2 = {
+       3, 18, nig_int3_k2_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074
+};
+
+static const u16 nig_int4_k2_attn_idx[20] = {
+       82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+       100, 101,
+};
+
+static struct attn_hw_reg nig_int4_k2 = {
+       4, 20, nig_int4_k2_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084
+};
+
+static const u16 nig_int5_k2_attn_idx[18] = {
+       102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
+       116,
+       117, 118, 119,
+};
+
+static struct attn_hw_reg nig_int5_k2 = {
+       5, 18, nig_int5_k2_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094
+};
+
+static const u16 nig_int6_k2_attn_idx[20] = {
+       120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133,
+       134,
+       135, 136, 137, 138, 139,
+};
+
+static struct attn_hw_reg nig_int6_k2 = {
+       6, 20, nig_int6_k2_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_int7_k2_attn_idx[18] = {
+       140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+       154,
+       155, 156, 157,
+};
+
+static struct attn_hw_reg nig_int7_k2 = {
+       7, 18, nig_int7_k2_attn_idx, 0x5000b0, 0x5000bc, 0x5000b8, 0x5000b4
+};
+
+static const u16 nig_int8_k2_attn_idx[20] = {
+       158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
+       172,
+       173, 174, 175, 176, 177,
+};
+
+static struct attn_hw_reg nig_int8_k2 = {
+       8, 20, nig_int8_k2_attn_idx, 0x5000c0, 0x5000cc, 0x5000c8, 0x5000c4
+};
+
+static const u16 nig_int9_k2_attn_idx[18] = {
+       178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
+       192,
+       193, 194, 195,
+};
+
+static struct attn_hw_reg nig_int9_k2 = {
+       9, 18, nig_int9_k2_attn_idx, 0x5000d0, 0x5000dc, 0x5000d8, 0x5000d4
+};
+
+static struct attn_hw_reg *nig_int_k2_regs[10] = {
+       &nig_int0_k2, &nig_int1_k2, &nig_int2_k2, &nig_int3_k2, &nig_int4_k2,
+       &nig_int5_k2, &nig_int6_k2, &nig_int7_k2, &nig_int8_k2, &nig_int9_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nig_prty_attn_desc[113] = {
+       "nig_datapath_parity_error",
+       "nig_mem107_i_mem_prty",
+       "nig_mem103_i_mem_prty",
+       "nig_mem104_i_mem_prty",
+       "nig_mem105_i_mem_prty",
+       "nig_mem106_i_mem_prty",
+       "nig_mem072_i_mem_prty",
+       "nig_mem071_i_mem_prty",
+       "nig_mem074_i_mem_prty",
+       "nig_mem073_i_mem_prty",
+       "nig_mem076_i_mem_prty",
+       "nig_mem075_i_mem_prty",
+       "nig_mem078_i_mem_prty",
+       "nig_mem077_i_mem_prty",
+       "nig_mem055_i_mem_prty",
+       "nig_mem062_i_mem_prty",
+       "nig_mem063_i_mem_prty",
+       "nig_mem064_i_mem_prty",
+       "nig_mem065_i_mem_prty",
+       "nig_mem066_i_mem_prty",
+       "nig_mem067_i_mem_prty",
+       "nig_mem068_i_mem_prty",
+       "nig_mem069_i_mem_prty",
+       "nig_mem070_i_mem_prty",
+       "nig_mem056_i_mem_prty",
+       "nig_mem057_i_mem_prty",
+       "nig_mem058_i_mem_prty",
+       "nig_mem059_i_mem_prty",
+       "nig_mem060_i_mem_prty",
+       "nig_mem061_i_mem_prty",
+       "nig_mem035_i_mem_prty",
+       "nig_mem046_i_mem_prty",
+       "nig_mem051_i_mem_prty",
+       "nig_mem052_i_mem_prty",
+       "nig_mem090_i_mem_prty",
+       "nig_mem089_i_mem_prty",
+       "nig_mem092_i_mem_prty",
+       "nig_mem091_i_mem_prty",
+       "nig_mem109_i_mem_prty",
+       "nig_mem110_i_mem_prty",
+       "nig_mem001_i_mem_prty",
+       "nig_mem008_i_mem_prty",
+       "nig_mem009_i_mem_prty",
+       "nig_mem010_i_mem_prty",
+       "nig_mem011_i_mem_prty",
+       "nig_mem012_i_mem_prty",
+       "nig_mem013_i_mem_prty",
+       "nig_mem014_i_mem_prty",
+       "nig_mem015_i_mem_prty",
+       "nig_mem016_i_mem_prty",
+       "nig_mem002_i_mem_prty",
+       "nig_mem003_i_mem_prty",
+       "nig_mem004_i_mem_prty",
+       "nig_mem005_i_mem_prty",
+       "nig_mem006_i_mem_prty",
+       "nig_mem007_i_mem_prty",
+       "nig_mem080_i_mem_prty",
+       "nig_mem081_i_mem_prty",
+       "nig_mem082_i_mem_prty",
+       "nig_mem083_i_mem_prty",
+       "nig_mem048_i_mem_prty",
+       "nig_mem049_i_mem_prty",
+       "nig_mem102_i_mem_prty",
+       "nig_mem087_i_mem_prty",
+       "nig_mem086_i_mem_prty",
+       "nig_mem088_i_mem_prty",
+       "nig_mem079_i_mem_prty",
+       "nig_mem047_i_mem_prty",
+       "nig_mem050_i_mem_prty",
+       "nig_mem053_i_mem_prty",
+       "nig_mem054_i_mem_prty",
+       "nig_mem036_i_mem_prty",
+       "nig_mem037_i_mem_prty",
+       "nig_mem038_i_mem_prty",
+       "nig_mem039_i_mem_prty",
+       "nig_mem040_i_mem_prty",
+       "nig_mem041_i_mem_prty",
+       "nig_mem042_i_mem_prty",
+       "nig_mem043_i_mem_prty",
+       "nig_mem044_i_mem_prty",
+       "nig_mem045_i_mem_prty",
+       "nig_mem093_i_mem_prty",
+       "nig_mem094_i_mem_prty",
+       "nig_mem027_i_mem_prty",
+       "nig_mem028_i_mem_prty",
+       "nig_mem029_i_mem_prty",
+       "nig_mem030_i_mem_prty",
+       "nig_mem017_i_mem_prty",
+       "nig_mem018_i_mem_prty",
+       "nig_mem095_i_mem_prty",
+       "nig_mem084_i_mem_prty",
+       "nig_mem085_i_mem_prty",
+       "nig_mem099_i_mem_prty",
+       "nig_mem100_i_mem_prty",
+       "nig_mem096_i_mem_prty",
+       "nig_mem097_i_mem_prty",
+       "nig_mem098_i_mem_prty",
+       "nig_mem031_i_mem_prty",
+       "nig_mem032_i_mem_prty",
+       "nig_mem033_i_mem_prty",
+       "nig_mem034_i_mem_prty",
+       "nig_mem019_i_mem_prty",
+       "nig_mem020_i_mem_prty",
+       "nig_mem021_i_mem_prty",
+       "nig_mem022_i_mem_prty",
+       "nig_mem101_i_mem_prty",
+       "nig_mem023_i_mem_prty",
+       "nig_mem024_i_mem_prty",
+       "nig_mem025_i_mem_prty",
+       "nig_mem026_i_mem_prty",
+       "nig_mem108_i_mem_prty",
+       "nig_mem031_ext_i_mem_prty",
+       "nig_mem034_ext_i_mem_prty",
+};
+#else
+#define nig_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nig_prty1_bb_a0_attn_idx[31] = {
+       1, 2, 5, 12, 13, 23, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+       52, 53, 54, 55, 56, 60, 61, 62, 63, 64, 65, 66,
+};
+
+static struct attn_hw_reg nig_prty1_bb_a0 = {
+       0, 31, nig_prty1_bb_a0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_a0_attn_idx[31] = {
+       33, 69, 70, 90, 91, 8, 11, 10, 14, 17, 18, 19, 20, 21, 22, 7, 6, 24, 25,
+       26, 27, 28, 29, 15, 16, 57, 58, 59, 9, 94, 95,
+};
+
+static struct attn_hw_reg nig_prty2_bb_a0 = {
+       1, 31, nig_prty2_bb_a0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_a0_attn_idx[31] = {
+       96, 97, 98, 103, 104, 92, 93, 105, 106, 107, 108, 109, 80, 31, 67, 83,
+       84,
+       3, 68, 85, 86, 89, 77, 78, 79, 4, 32, 36, 81, 82, 87,
+};
+
+static struct attn_hw_reg nig_prty3_bb_a0 = {
+       2, 31, nig_prty3_bb_a0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_a0_attn_idx[14] = {
+       88, 101, 102, 75, 71, 74, 76, 73, 72, 34, 37, 99, 30, 100,
+};
+
+static struct attn_hw_reg nig_prty4_bb_a0 = {
+       3, 14, nig_prty4_bb_a0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_a0_regs[4] = {
+       &nig_prty1_bb_a0, &nig_prty2_bb_a0, &nig_prty3_bb_a0, &nig_prty4_bb_a0,
+};
+
+static const u16 nig_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg nig_prty0_bb_b0 = {
+       0, 1, nig_prty0_bb_b0_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4
+};
+
+static const u16 nig_prty1_bb_b0_attn_idx[31] = {
+       4, 5, 9, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+       48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+};
+
+static struct attn_hw_reg nig_prty1_bb_b0 = {
+       1, 31, nig_prty1_bb_b0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_bb_b0_attn_idx[31] = {
+       90, 91, 64, 63, 65, 8, 11, 10, 13, 12, 66, 14, 17, 18, 19, 20, 21, 22,
+       23,
+       7, 6, 24, 25, 26, 27, 28, 29, 15, 16, 92, 93,
+};
+
+static struct attn_hw_reg nig_prty2_bb_b0 = {
+       2, 31, nig_prty2_bb_b0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_bb_b0_attn_idx[31] = {
+       94, 95, 96, 97, 99, 100, 103, 104, 105, 62, 108, 109, 80, 31, 1, 67, 60,
+       69, 83, 84, 2, 3, 110, 61, 68, 70, 85, 86, 111, 112, 89,
+};
+
+static struct attn_hw_reg nig_prty3_bb_b0 = {
+       3, 31, nig_prty3_bb_b0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_bb_b0_attn_idx[17] = {
+       106, 107, 87, 88, 81, 82, 101, 102, 75, 71, 74, 76, 77, 78, 79, 73, 72,
+};
+
+static struct attn_hw_reg nig_prty4_bb_b0 = {
+       4, 17, nig_prty4_bb_b0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = {
+       &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, &nig_prty3_bb_b0,
+       &nig_prty4_bb_b0,
+};
+
+static const u16 nig_prty0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg nig_prty0_k2 = {
+       0, 1, nig_prty0_k2_attn_idx, 0x5000e0, 0x5000ec, 0x5000e8, 0x5000e4
+};
+
+static const u16 nig_prty1_k2_attn_idx[31] = {
+       1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+       21,
+       22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+};
+
+static struct attn_hw_reg nig_prty1_k2 = {
+       1, 31, nig_prty1_k2_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204
+};
+
+static const u16 nig_prty2_k2_attn_idx[31] = {
+       67, 60, 61, 68, 32, 33, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+       37, 36, 81, 82, 83, 84, 85, 86, 48, 49, 87, 88, 89,
+};
+
+static struct attn_hw_reg nig_prty2_k2 = {
+       2, 31, nig_prty2_k2_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214
+};
+
+static const u16 nig_prty3_k2_attn_idx[31] = {
+       94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 92, 93, 105, 62, 106,
+       107, 108, 109, 59, 90, 91, 64, 55, 41, 42, 43, 63, 65, 35, 34,
+};
+
+static struct attn_hw_reg nig_prty3_k2 = {
+       3, 31, nig_prty3_k2_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224
+};
+
+static const u16 nig_prty4_k2_attn_idx[14] = {
+       44, 45, 46, 47, 40, 50, 66, 56, 57, 58, 51, 52, 53, 54,
+};
+
+static struct attn_hw_reg nig_prty4_k2 = {
+       4, 14, nig_prty4_k2_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234
+};
+
+static struct attn_hw_reg *nig_prty_k2_regs[5] = {
+       &nig_prty0_k2, &nig_prty1_k2, &nig_prty2_k2, &nig_prty3_k2,
+       &nig_prty4_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_int_attn_desc[1] = {
+       "wol_address_error",
+};
+#else
+#define wol_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg wol_int0_k2 = {
+       0, 1, wol_int0_k2_attn_idx, 0x600040, 0x60004c, 0x600048, 0x600044
+};
+
+static struct attn_hw_reg *wol_int_k2_regs[1] = {
+       &wol_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *wol_prty_attn_desc[24] = {
+       "wol_mem017_i_mem_prty",
+       "wol_mem018_i_mem_prty",
+       "wol_mem019_i_mem_prty",
+       "wol_mem020_i_mem_prty",
+       "wol_mem021_i_mem_prty",
+       "wol_mem022_i_mem_prty",
+       "wol_mem023_i_mem_prty",
+       "wol_mem024_i_mem_prty",
+       "wol_mem001_i_mem_prty",
+       "wol_mem008_i_mem_prty",
+       "wol_mem009_i_mem_prty",
+       "wol_mem010_i_mem_prty",
+       "wol_mem011_i_mem_prty",
+       "wol_mem012_i_mem_prty",
+       "wol_mem013_i_mem_prty",
+       "wol_mem014_i_mem_prty",
+       "wol_mem015_i_mem_prty",
+       "wol_mem016_i_mem_prty",
+       "wol_mem002_i_mem_prty",
+       "wol_mem003_i_mem_prty",
+       "wol_mem004_i_mem_prty",
+       "wol_mem005_i_mem_prty",
+       "wol_mem006_i_mem_prty",
+       "wol_mem007_i_mem_prty",
+};
+#else
+#define wol_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 wol_prty1_k2_attn_idx[24] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23,
+};
+
+static struct attn_hw_reg wol_prty1_k2 = {
+       0, 24, wol_prty1_k2_attn_idx, 0x600200, 0x60020c, 0x600208, 0x600204
+};
+
+static struct attn_hw_reg *wol_prty_k2_regs[1] = {
+       &wol_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *bmbn_int_attn_desc[1] = {
+       "bmbn_address_error",
+};
+#else
+#define bmbn_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 bmbn_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg bmbn_int0_k2 = {
+       0, 1, bmbn_int0_k2_attn_idx, 0x610040, 0x61004c, 0x610048, 0x610044
+};
+
+static struct attn_hw_reg *bmbn_int_k2_regs[1] = {
+       &bmbn_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_int_attn_desc[14] = {
+       "ipc_address_error",
+       "ipc_unused_0",
+       "ipc_vmain_por_assert",
+       "ipc_vmain_por_deassert",
+       "ipc_perst_assert",
+       "ipc_perst_deassert",
+       "ipc_otp_ecc_ded_0",
+       "ipc_otp_ecc_ded_1",
+       "ipc_otp_ecc_ded_2",
+       "ipc_otp_ecc_ded_3",
+       "ipc_otp_ecc_ded_4",
+       "ipc_otp_ecc_ded_5",
+       "ipc_otp_ecc_ded_6",
+       "ipc_otp_ecc_ded_7",
+};
+#else
+#define ipc_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_int0_bb_a0_attn_idx[5] = {
+       0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_bb_a0 = {
+       0, 5, ipc_int0_bb_a0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_a0_regs[1] = {
+       &ipc_int0_bb_a0,
+};
+
+static const u16 ipc_int0_bb_b0_attn_idx[13] = {
+       0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+};
+
+static struct attn_hw_reg ipc_int0_bb_b0 = {
+       0, 13, ipc_int0_bb_b0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510
+};
+
+static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = {
+       &ipc_int0_bb_b0,
+};
+
+static const u16 ipc_int0_k2_attn_idx[5] = {
+       0, 2, 3, 4, 5,
+};
+
+static struct attn_hw_reg ipc_int0_k2 = {
+       0, 5, ipc_int0_k2_attn_idx, 0x202dc, 0x202e8, 0x202e4, 0x202e0
+};
+
+static struct attn_hw_reg *ipc_int_k2_regs[1] = {
+       &ipc_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ipc_prty_attn_desc[1] = {
+       "ipc_fake_par_err",
+};
+#else
+#define ipc_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 ipc_prty0_bb_a0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_a0 = {
+       0, 1, ipc_prty0_bb_a0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_a0_regs[1] = {
+       &ipc_prty0_bb_a0,
+};
+
+static const u16 ipc_prty0_bb_b0_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ipc_prty0_bb_b0 = {
+       0, 1, ipc_prty0_bb_b0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520
+};
+
+static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = {
+       &ipc_prty0_bb_b0,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_int_attn_desc[18] = {
+       "nwm_address_error",
+       "nwm_tx_overflow_0",
+       "nwm_tx_underflow_0",
+       "nwm_tx_overflow_1",
+       "nwm_tx_underflow_1",
+       "nwm_tx_overflow_2",
+       "nwm_tx_underflow_2",
+       "nwm_tx_overflow_3",
+       "nwm_tx_underflow_3",
+       "nwm_unused_0",
+       "nwm_ln0_at_10M",
+       "nwm_ln0_at_100M",
+       "nwm_ln1_at_10M",
+       "nwm_ln1_at_100M",
+       "nwm_ln2_at_10M",
+       "nwm_ln2_at_100M",
+       "nwm_ln3_at_10M",
+       "nwm_ln3_at_100M",
+};
+#else
+#define nwm_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_int0_k2_attn_idx[17] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17,
+};
+
+static struct attn_hw_reg nwm_int0_k2 = {
+       0, 17, nwm_int0_k2_attn_idx, 0x800004, 0x800010, 0x80000c, 0x800008
+};
+
+static struct attn_hw_reg *nwm_int_k2_regs[1] = {
+       &nwm_int0_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nwm_prty_attn_desc[72] = {
+       "nwm_mem020_i_mem_prty",
+       "nwm_mem028_i_mem_prty",
+       "nwm_mem036_i_mem_prty",
+       "nwm_mem044_i_mem_prty",
+       "nwm_mem023_i_mem_prty",
+       "nwm_mem031_i_mem_prty",
+       "nwm_mem039_i_mem_prty",
+       "nwm_mem047_i_mem_prty",
+       "nwm_mem024_i_mem_prty",
+       "nwm_mem032_i_mem_prty",
+       "nwm_mem040_i_mem_prty",
+       "nwm_mem048_i_mem_prty",
+       "nwm_mem018_i_mem_prty",
+       "nwm_mem026_i_mem_prty",
+       "nwm_mem034_i_mem_prty",
+       "nwm_mem042_i_mem_prty",
+       "nwm_mem017_i_mem_prty",
+       "nwm_mem025_i_mem_prty",
+       "nwm_mem033_i_mem_prty",
+       "nwm_mem041_i_mem_prty",
+       "nwm_mem021_i_mem_prty",
+       "nwm_mem029_i_mem_prty",
+       "nwm_mem037_i_mem_prty",
+       "nwm_mem045_i_mem_prty",
+       "nwm_mem019_i_mem_prty",
+       "nwm_mem027_i_mem_prty",
+       "nwm_mem035_i_mem_prty",
+       "nwm_mem043_i_mem_prty",
+       "nwm_mem022_i_mem_prty",
+       "nwm_mem030_i_mem_prty",
+       "nwm_mem038_i_mem_prty",
+       "nwm_mem046_i_mem_prty",
+       "nwm_mem057_i_mem_prty",
+       "nwm_mem059_i_mem_prty",
+       "nwm_mem061_i_mem_prty",
+       "nwm_mem063_i_mem_prty",
+       "nwm_mem058_i_mem_prty",
+       "nwm_mem060_i_mem_prty",
+       "nwm_mem062_i_mem_prty",
+       "nwm_mem064_i_mem_prty",
+       "nwm_mem009_i_mem_prty",
+       "nwm_mem010_i_mem_prty",
+       "nwm_mem011_i_mem_prty",
+       "nwm_mem012_i_mem_prty",
+       "nwm_mem013_i_mem_prty",
+       "nwm_mem014_i_mem_prty",
+       "nwm_mem015_i_mem_prty",
+       "nwm_mem016_i_mem_prty",
+       "nwm_mem001_i_mem_prty",
+       "nwm_mem002_i_mem_prty",
+       "nwm_mem003_i_mem_prty",
+       "nwm_mem004_i_mem_prty",
+       "nwm_mem005_i_mem_prty",
+       "nwm_mem006_i_mem_prty",
+       "nwm_mem007_i_mem_prty",
+       "nwm_mem008_i_mem_prty",
+       "nwm_mem049_i_mem_prty",
+       "nwm_mem053_i_mem_prty",
+       "nwm_mem050_i_mem_prty",
+       "nwm_mem054_i_mem_prty",
+       "nwm_mem051_i_mem_prty",
+       "nwm_mem055_i_mem_prty",
+       "nwm_mem052_i_mem_prty",
+       "nwm_mem056_i_mem_prty",
+       "nwm_mem066_i_mem_prty",
+       "nwm_mem068_i_mem_prty",
+       "nwm_mem070_i_mem_prty",
+       "nwm_mem072_i_mem_prty",
+       "nwm_mem065_i_mem_prty",
+       "nwm_mem067_i_mem_prty",
+       "nwm_mem069_i_mem_prty",
+       "nwm_mem071_i_mem_prty",
+};
+#else
+#define nwm_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nwm_prty1_k2_attn_idx[31] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+       20,
+       21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+};
+
+static struct attn_hw_reg nwm_prty1_k2 = {
+       0, 31, nwm_prty1_k2_attn_idx, 0x800200, 0x80020c, 0x800208, 0x800204
+};
+
+static const u16 nwm_prty2_k2_attn_idx[31] = {
+       31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+       49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+};
+
+static struct attn_hw_reg nwm_prty2_k2 = {
+       1, 31, nwm_prty2_k2_attn_idx, 0x800210, 0x80021c, 0x800218, 0x800214
+};
+
+static const u16 nwm_prty3_k2_attn_idx[10] = {
+       62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
+};
+
+static struct attn_hw_reg nwm_prty3_k2 = {
+       2, 10, nwm_prty3_k2_attn_idx, 0x800220, 0x80022c, 0x800228, 0x800224
+};
+
+static struct attn_hw_reg *nwm_prty_k2_regs[3] = {
+       &nwm_prty1_k2, &nwm_prty2_k2, &nwm_prty3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_int_attn_desc[38] = {
+       "nws_address_error",
+       "nws_ln0_an_resolve_50g_cr2",
+       "nws_ln0_an_resolve_50g_kr2",
+       "nws_ln0_an_resolve_40g_cr4",
+       "nws_ln0_an_resolve_40g_kr4",
+       "nws_ln0_an_resolve_25g_gr",
+       "nws_ln0_an_resolve_25g_cr",
+       "nws_ln0_an_resolve_25g_kr",
+       "nws_ln0_an_resolve_10g_kr",
+       "nws_ln0_an_resolve_1g_kx",
+       "nws_unused_0",
+       "nws_ln1_an_resolve_50g_cr2",
+       "nws_ln1_an_resolve_50g_kr2",
+       "nws_ln1_an_resolve_40g_cr4",
+       "nws_ln1_an_resolve_40g_kr4",
+       "nws_ln1_an_resolve_25g_gr",
+       "nws_ln1_an_resolve_25g_cr",
+       "nws_ln1_an_resolve_25g_kr",
+       "nws_ln1_an_resolve_10g_kr",
+       "nws_ln1_an_resolve_1g_kx",
+       "nws_ln2_an_resolve_50g_cr2",
+       "nws_ln2_an_resolve_50g_kr2",
+       "nws_ln2_an_resolve_40g_cr4",
+       "nws_ln2_an_resolve_40g_kr4",
+       "nws_ln2_an_resolve_25g_gr",
+       "nws_ln2_an_resolve_25g_cr",
+       "nws_ln2_an_resolve_25g_kr",
+       "nws_ln2_an_resolve_10g_kr",
+       "nws_ln2_an_resolve_1g_kx",
+       "nws_ln3_an_resolve_50g_cr2",
+       "nws_ln3_an_resolve_50g_kr2",
+       "nws_ln3_an_resolve_40g_cr4",
+       "nws_ln3_an_resolve_40g_kr4",
+       "nws_ln3_an_resolve_25g_gr",
+       "nws_ln3_an_resolve_25g_cr",
+       "nws_ln3_an_resolve_25g_kr",
+       "nws_ln3_an_resolve_10g_kr",
+       "nws_ln3_an_resolve_1g_kx",
+};
+#else
+#define nws_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_int0_k2_attn_idx[10] = {
+       0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+};
+
+static struct attn_hw_reg nws_int0_k2 = {
+       0, 10, nws_int0_k2_attn_idx, 0x700180, 0x70018c, 0x700188, 0x700184
+};
+
+static const u16 nws_int1_k2_attn_idx[9] = {
+       11, 12, 13, 14, 15, 16, 17, 18, 19,
+};
+
+static struct attn_hw_reg nws_int1_k2 = {
+       1, 9, nws_int1_k2_attn_idx, 0x700190, 0x70019c, 0x700198, 0x700194
+};
+
+static const u16 nws_int2_k2_attn_idx[9] = {
+       20, 21, 22, 23, 24, 25, 26, 27, 28,
+};
+
+static struct attn_hw_reg nws_int2_k2 = {
+       2, 9, nws_int2_k2_attn_idx, 0x7001a0, 0x7001ac, 0x7001a8, 0x7001a4
+};
+
+static const u16 nws_int3_k2_attn_idx[9] = {
+       29, 30, 31, 32, 33, 34, 35, 36, 37,
+};
+
+static struct attn_hw_reg nws_int3_k2 = {
+       3, 9, nws_int3_k2_attn_idx, 0x7001b0, 0x7001bc, 0x7001b8, 0x7001b4
+};
+
+static struct attn_hw_reg *nws_int_k2_regs[4] = {
+       &nws_int0_k2, &nws_int1_k2, &nws_int2_k2, &nws_int3_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *nws_prty_attn_desc[4] = {
+       "nws_mem003_i_mem_prty",
+       "nws_mem001_i_mem_prty",
+       "nws_mem004_i_mem_prty",
+       "nws_mem002_i_mem_prty",
+};
+#else
+#define nws_prty_attn_desc OSAL_NULL
+#endif
+
+static const u16 nws_prty1_k2_attn_idx[4] = {
+       0, 1, 2, 3,
+};
+
+static struct attn_hw_reg nws_prty1_k2 = {
+       0, 4, nws_prty1_k2_attn_idx, 0x700200, 0x70020c, 0x700208, 0x700204
+};
+
+static struct attn_hw_reg *nws_prty_k2_regs[1] = {
+       &nws_prty1_k2,
+};
+
+#ifdef ATTN_DESC
+static const char *ms_int_attn_desc[1] = {
+       "ms_address_error",
+};
+#else
+#define ms_int_attn_desc OSAL_NULL
+#endif
+
+static const u16 ms_int0_k2_attn_idx[1] = {
+       0,
+};
+
+static struct attn_hw_reg ms_int0_k2 = {
+       0, 1, ms_int0_k2_attn_idx, 0x6a0180, 0x6a018c, 0x6a0188, 0x6a0184
+};
+
+static struct attn_hw_reg *ms_int_k2_regs[1] = {
+       &ms_int0_k2,
+};
+
+static struct attn_hw_block attn_blocks[] = {
+       {"grc", grc_int_attn_desc, grc_prty_attn_desc, {
+                                                       {1, 1,
+                                                        grc_int_bb_a0_regs,
+                                                        grc_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        grc_int_bb_b0_regs,
+                                                        grc_prty_bb_b0_regs},
+                                                       {1, 1, grc_int_k2_regs,
+                                                        grc_prty_k2_regs} } },
+       {"miscs", miscs_int_attn_desc, miscs_prty_attn_desc, {
+                                                             {2, 0,
+
+                                                       miscs_int_bb_a0_regs,
+                                                              OSAL_NULL},
+                                                             {2, 1,
+
+                                                       miscs_int_bb_b0_regs,
+
+                                                       miscs_prty_bb_b0_regs},
+                                                             {1, 1,
+
+                                                       miscs_int_k2_regs,
+
+                                               miscs_prty_k2_regs } } },
+       {"misc", misc_int_attn_desc, OSAL_NULL, {
+                                                {1, 0, misc_int_bb_a0_regs,
+                                                 OSAL_NULL},
+                                                {1, 0, misc_int_bb_b0_regs,
+                                                 OSAL_NULL},
+                                                {1, 0, misc_int_k2_regs,
+                                                 OSAL_NULL } } },
+       {"dbu", OSAL_NULL, OSAL_NULL, {
+                                      {0, 0, OSAL_NULL, OSAL_NULL},
+                                      {0, 0, OSAL_NULL, OSAL_NULL},
+                                      {0, 0, OSAL_NULL, OSAL_NULL } } },
+       {"pglue_b", pglue_b_int_attn_desc, pglue_b_prty_attn_desc, {
+                                                                   {1, 1,
+
+                                               pglue_b_int_bb_a0_regs,
+
+                                               pglue_b_prty_bb_a0_regs},
+                                                                   {1, 2,
+
+                                               pglue_b_int_bb_b0_regs,
+
+                                               pglue_b_prty_bb_b0_regs},
+                                                                   {1, 3,
+
+                                            pglue_b_int_k2_regs,
+
+                                            pglue_b_prty_k2_regs } } },
+       {"cnig", cnig_int_attn_desc, cnig_prty_attn_desc, {
+                                                          {1, 0,
+                                                   cnig_int_bb_a0_regs,
+                                                           OSAL_NULL},
+                                                          {1, 1,
+                                                   cnig_int_bb_b0_regs,
+
+                                                   cnig_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           cnig_int_k2_regs,
+
+                                                   cnig_prty_k2_regs } } },
+       {"cpmu", cpmu_int_attn_desc, OSAL_NULL, {
+                                                {1, 0, cpmu_int_bb_a0_regs,
+                                                 OSAL_NULL},
+                                                {1, 0, cpmu_int_bb_b0_regs,
+                                                 OSAL_NULL},
+                                                {1, 0, cpmu_int_k2_regs,
+                                                 OSAL_NULL } } },
+       {"ncsi", ncsi_int_attn_desc, ncsi_prty_attn_desc, {
+                                                          {1, 1,
+                                                   ncsi_int_bb_a0_regs,
+
+                                                   ncsi_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   ncsi_int_bb_b0_regs,
+
+                                                   ncsi_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           ncsi_int_k2_regs,
+
+                                                   ncsi_prty_k2_regs } } },
+       {"opte", OSAL_NULL, opte_prty_attn_desc, {
+                                                 {0, 1, OSAL_NULL,
+                                                  opte_prty_bb_a0_regs},
+                                                 {0, 2, OSAL_NULL,
+                                                  opte_prty_bb_b0_regs},
+                                                 {0, 2, OSAL_NULL,
+                                                  opte_prty_k2_regs } } },
+       {"bmb", bmb_int_attn_desc, bmb_prty_attn_desc, {
+                                                       {12, 2,
+                                                        bmb_int_bb_a0_regs,
+                                                        bmb_prty_bb_a0_regs},
+                                                       {12, 3,
+                                                        bmb_int_bb_b0_regs,
+                                                        bmb_prty_bb_b0_regs},
+                                               {12, 3, bmb_int_k2_regs,
+                                                        bmb_prty_k2_regs } } },
+       {"pcie", pcie_int_attn_desc, pcie_prty_attn_desc, {
+                                                          {0, 1, OSAL_NULL,
+
+                                                   pcie_prty_bb_a0_regs},
+                                                          {0, 1, OSAL_NULL,
+
+                                                   pcie_prty_bb_b0_regs},
+                                                          {1, 2,
+                                                           pcie_int_k2_regs,
+
+                                                   pcie_prty_k2_regs } } },
+       {"mcp", OSAL_NULL, OSAL_NULL, {
+                                      {0, 0, OSAL_NULL, OSAL_NULL},
+                                      {0, 0, OSAL_NULL, OSAL_NULL},
+                                      {0, 0, OSAL_NULL, OSAL_NULL } } },
+       {"mcp2", OSAL_NULL, mcp2_prty_attn_desc, {
+                                                 {0, 2, OSAL_NULL,
+                                                  mcp2_prty_bb_a0_regs},
+                                                 {0, 2, OSAL_NULL,
+                                                  mcp2_prty_bb_b0_regs},
+                                                 {0, 2, OSAL_NULL,
+                                                  mcp2_prty_k2_regs } } },
+       {"pswhst", pswhst_int_attn_desc, pswhst_prty_attn_desc, {
+                                                                {1, 1,
+
+                                                 pswhst_int_bb_a0_regs,
+
+                                                 pswhst_prty_bb_a0_regs},
+                                                                {1, 2,
+
+                                                 pswhst_int_bb_b0_regs,
+
+                                                 pswhst_prty_bb_b0_regs},
+                                                                {1, 2,
+
+                                                 pswhst_int_k2_regs,
+
+                                                 pswhst_prty_k2_regs } } },
+       {"pswhst2", pswhst2_int_attn_desc, pswhst2_prty_attn_desc, {
+                                                                   {1, 0,
+
+                                                    pswhst2_int_bb_a0_regs,
+                                                            OSAL_NULL},
+                                                                   {1, 1,
+
+                                                    pswhst2_int_bb_b0_regs,
+
+                                               pswhst2_prty_bb_b0_regs},
+                                                                   {1, 1,
+
+                                            pswhst2_int_k2_regs,
+
+                                            pswhst2_prty_k2_regs } } },
+       {"pswrd", pswrd_int_attn_desc, pswrd_prty_attn_desc, {
+                                                             {1, 0,
+
+                                             pswrd_int_bb_a0_regs,
+                                                              OSAL_NULL},
+                                                             {1, 1,
+
+                                                      pswrd_int_bb_b0_regs,
+
+                                                      pswrd_prty_bb_b0_regs},
+                                                             {1, 1,
+
+                                                      pswrd_int_k2_regs,
+
+                                                      pswrd_prty_k2_regs } } },
+       {"pswrd2", pswrd2_int_attn_desc, pswrd2_prty_attn_desc, {
+                                                                {1, 2,
+
+                                                 pswrd2_int_bb_a0_regs,
+
+                                                 pswrd2_prty_bb_a0_regs},
+                                                                {1, 3,
+
+                                                 pswrd2_int_bb_b0_regs,
+
+                                                 pswrd2_prty_bb_b0_regs},
+                                                                {1, 3,
+
+                                                 pswrd2_int_k2_regs,
+
+                                                 pswrd2_prty_k2_regs } } },
+       {"pswwr", pswwr_int_attn_desc, pswwr_prty_attn_desc, {
+                                                             {1, 0,
+
+                                              pswwr_int_bb_a0_regs,
+                                                              OSAL_NULL},
+                                                             {1, 1,
+
+                                              pswwr_int_bb_b0_regs,
+
+                                              pswwr_prty_bb_b0_regs},
+                                                             {1, 1,
+
+                                              pswwr_int_k2_regs,
+
+                                              pswwr_prty_k2_regs } } },
+       {"pswwr2", pswwr2_int_attn_desc, pswwr2_prty_attn_desc, {
+                                                                {1, 4,
+
+                                                 pswwr2_int_bb_a0_regs,
+
+                                                 pswwr2_prty_bb_a0_regs},
+                                                                {1, 5,
+
+                                                 pswwr2_int_bb_b0_regs,
+
+                                                 pswwr2_prty_bb_b0_regs},
+                                                                {1, 5,
+
+                                                 pswwr2_int_k2_regs,
+
+                                                 pswwr2_prty_k2_regs } } },
+       {"pswrq", pswrq_int_attn_desc, pswrq_prty_attn_desc, {
+                                                             {1, 0,
+
+                                              pswrq_int_bb_a0_regs,
+                                                              OSAL_NULL},
+                                                             {1, 1,
+
+                                              pswrq_int_bb_b0_regs,
+
+                                              pswrq_prty_bb_b0_regs},
+                                                             {1, 1,
+
+                                              pswrq_int_k2_regs,
+
+                                              pswrq_prty_k2_regs } } },
+       {"pswrq2", pswrq2_int_attn_desc, pswrq2_prty_attn_desc, {
+                                                                {1, 1,
+
+                                                 pswrq2_int_bb_a0_regs,
+
+                                                 pswrq2_prty_bb_a0_regs},
+                                                                {1, 1,
+
+                                                 pswrq2_int_bb_b0_regs,
+
+                                                 pswrq2_prty_bb_b0_regs},
+                                                                {1, 1,
+
+                                                 pswrq2_int_k2_regs,
+
+                                                 pswrq2_prty_k2_regs } } },
+       {"pglcs", pglcs_int_attn_desc, OSAL_NULL, {
+                                                  {1, 0, pglcs_int_bb_a0_regs,
+                                                   OSAL_NULL},
+                                                  {1, 0, pglcs_int_bb_b0_regs,
+                                                   OSAL_NULL},
+                                                  {1, 0, pglcs_int_k2_regs,
+                                                   OSAL_NULL } } },
+       {"dmae", dmae_int_attn_desc, dmae_prty_attn_desc, {
+                                                          {1, 1,
+                                                   dmae_int_bb_a0_regs,
+
+                                                   dmae_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   dmae_int_bb_b0_regs,
+
+                                                   dmae_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           dmae_int_k2_regs,
+
+                                           dmae_prty_k2_regs } } },
+       {"ptu", ptu_int_attn_desc, ptu_prty_attn_desc, {
+                                                       {1, 1,
+                                                        ptu_int_bb_a0_regs,
+                                                        ptu_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        ptu_int_bb_b0_regs,
+                                                        ptu_prty_bb_b0_regs},
+                                                       {1, 1, ptu_int_k2_regs,
+                                                        ptu_prty_k2_regs } } },
+       {"tcm", tcm_int_attn_desc, tcm_prty_attn_desc, {
+                                                       {3, 2,
+                                                        tcm_int_bb_a0_regs,
+                                                        tcm_prty_bb_a0_regs},
+                                                       {3, 2,
+                                                        tcm_int_bb_b0_regs,
+                                                        tcm_prty_bb_b0_regs},
+                                                       {3, 2, tcm_int_k2_regs,
+                                                        tcm_prty_k2_regs } } },
+       {"mcm", mcm_int_attn_desc, mcm_prty_attn_desc, {
+                                                       {3, 2,
+                                                        mcm_int_bb_a0_regs,
+                                                        mcm_prty_bb_a0_regs},
+                                                       {3, 2,
+                                                        mcm_int_bb_b0_regs,
+                                                        mcm_prty_bb_b0_regs},
+                                                       {3, 2, mcm_int_k2_regs,
+                                                        mcm_prty_k2_regs } } },
+       {"ucm", ucm_int_attn_desc, ucm_prty_attn_desc, {
+                                                       {3, 2,
+                                                        ucm_int_bb_a0_regs,
+                                                        ucm_prty_bb_a0_regs},
+                                                       {3, 2,
+                                                        ucm_int_bb_b0_regs,
+                                                        ucm_prty_bb_b0_regs},
+                                                       {3, 2, ucm_int_k2_regs,
+                                                        ucm_prty_k2_regs } } },
+       {"xcm", xcm_int_attn_desc, xcm_prty_attn_desc, {
+                                                       {3, 2,
+                                                        xcm_int_bb_a0_regs,
+                                                        xcm_prty_bb_a0_regs},
+                                                       {3, 2,
+                                                        xcm_int_bb_b0_regs,
+                                                        xcm_prty_bb_b0_regs},
+                                                       {3, 2, xcm_int_k2_regs,
+                                                        xcm_prty_k2_regs } } },
+       {"ycm", ycm_int_attn_desc, ycm_prty_attn_desc, {
+                                                       {3, 2,
+                                                        ycm_int_bb_a0_regs,
+                                                        ycm_prty_bb_a0_regs},
+                                                       {3, 2,
+                                                        ycm_int_bb_b0_regs,
+                                                        ycm_prty_bb_b0_regs},
+                                                       {3, 2, ycm_int_k2_regs,
+                                                        ycm_prty_k2_regs } } },
+       {"pcm", pcm_int_attn_desc, pcm_prty_attn_desc, {
+                                                       {3, 1,
+                                                        pcm_int_bb_a0_regs,
+                                                        pcm_prty_bb_a0_regs},
+                                                       {3, 1,
+                                                        pcm_int_bb_b0_regs,
+                                                        pcm_prty_bb_b0_regs},
+                                                       {3, 1, pcm_int_k2_regs,
+                                                        pcm_prty_k2_regs } } },
+       {"qm", qm_int_attn_desc, qm_prty_attn_desc, {
+                                                    {1, 4, qm_int_bb_a0_regs,
+                                                     qm_prty_bb_a0_regs},
+                                                    {1, 4, qm_int_bb_b0_regs,
+                                                     qm_prty_bb_b0_regs},
+                                                    {1, 4, qm_int_k2_regs,
+                                                     qm_prty_k2_regs } } },
+       {"tm", tm_int_attn_desc, tm_prty_attn_desc, {
+                                                    {2, 1, tm_int_bb_a0_regs,
+                                                     tm_prty_bb_a0_regs},
+                                                    {2, 1, tm_int_bb_b0_regs,
+                                                     tm_prty_bb_b0_regs},
+                                                    {2, 1, tm_int_k2_regs,
+                                                     tm_prty_k2_regs } } },
+       {"dorq", dorq_int_attn_desc, dorq_prty_attn_desc, {
+                                                          {1, 1,
+                                                   dorq_int_bb_a0_regs,
+
+                                                   dorq_prty_bb_a0_regs},
+                                                          {1, 2,
+                                                   dorq_int_bb_b0_regs,
+
+                                                   dorq_prty_bb_b0_regs},
+                                                          {1, 2,
+                                                           dorq_int_k2_regs,
+
+                                                   dorq_prty_k2_regs } } },
+       {"brb", brb_int_attn_desc, brb_prty_attn_desc, {
+                                                       {12, 2,
+                                                        brb_int_bb_a0_regs,
+                                                        brb_prty_bb_a0_regs},
+                                                       {12, 3,
+                                                        brb_int_bb_b0_regs,
+                                                        brb_prty_bb_b0_regs},
+                                               {12, 3, brb_int_k2_regs,
+                                                        brb_prty_k2_regs } } },
+       {"src", src_int_attn_desc, OSAL_NULL, {
+                                              {1, 0, src_int_bb_a0_regs,
+                                               OSAL_NULL},
+                                              {1, 0, src_int_bb_b0_regs,
+                                               OSAL_NULL},
+                                              {1, 0, src_int_k2_regs,
+                                               OSAL_NULL } } },
+       {"prs", prs_int_attn_desc, prs_prty_attn_desc, {
+                                                       {1, 3,
+                                                        prs_int_bb_a0_regs,
+                                                        prs_prty_bb_a0_regs},
+                                                       {1, 3,
+                                                        prs_int_bb_b0_regs,
+                                                        prs_prty_bb_b0_regs},
+                                                       {1, 3, prs_int_k2_regs,
+                                                        prs_prty_k2_regs } } },
+       {"tsdm", tsdm_int_attn_desc, tsdm_prty_attn_desc, {
+                                                          {1, 1,
+                                                   tsdm_int_bb_a0_regs,
+
+                                                   tsdm_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   tsdm_int_bb_b0_regs,
+
+                                                   tsdm_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   tsdm_int_k2_regs,
+
+                                                   tsdm_prty_k2_regs } } },
+       {"msdm", msdm_int_attn_desc, msdm_prty_attn_desc, {
+                                                          {1, 1,
+                                                   msdm_int_bb_a0_regs,
+
+                                                   msdm_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   msdm_int_bb_b0_regs,
+
+                                                   msdm_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           msdm_int_k2_regs,
+
+                                                   msdm_prty_k2_regs } } },
+       {"usdm", usdm_int_attn_desc, usdm_prty_attn_desc, {
+                                                          {1, 1,
+                                                   usdm_int_bb_a0_regs,
+
+                                                   usdm_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   usdm_int_bb_b0_regs,
+
+                                                   usdm_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           usdm_int_k2_regs,
+
+                                                   usdm_prty_k2_regs } } },
+       {"xsdm", xsdm_int_attn_desc, xsdm_prty_attn_desc, {
+                                                          {1, 1,
+                                                   xsdm_int_bb_a0_regs,
+
+                                                   xsdm_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   xsdm_int_bb_b0_regs,
+
+                                                   xsdm_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   xsdm_int_k2_regs,
+
+                                                   xsdm_prty_k2_regs } } },
+       {"ysdm", ysdm_int_attn_desc, ysdm_prty_attn_desc, {
+                                                          {1, 1,
+                                                   ysdm_int_bb_a0_regs,
+
+                                                   ysdm_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   ysdm_int_bb_b0_regs,
+
+                                                   ysdm_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   ysdm_int_k2_regs,
+
+                                                   ysdm_prty_k2_regs } } },
+       {"psdm", psdm_int_attn_desc, psdm_prty_attn_desc, {
+                                                          {1, 1,
+                                                   psdm_int_bb_a0_regs,
+
+                                                   psdm_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   psdm_int_bb_b0_regs,
+
+                                                   psdm_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   psdm_int_k2_regs,
+
+                                                   psdm_prty_k2_regs } } },
+       {"tsem", tsem_int_attn_desc, tsem_prty_attn_desc, {
+                                                          {3, 3,
+                                                   tsem_int_bb_a0_regs,
+
+                                                   tsem_prty_bb_a0_regs},
+                                                          {3, 3,
+                                                   tsem_int_bb_b0_regs,
+
+                                                   tsem_prty_bb_b0_regs},
+                                                          {3, 4,
+                                                   tsem_int_k2_regs,
+
+                                                   tsem_prty_k2_regs } } },
+       {"msem", msem_int_attn_desc, msem_prty_attn_desc, {
+                                                          {3, 2,
+                                                   msem_int_bb_a0_regs,
+
+                                                   msem_prty_bb_a0_regs},
+                                                          {3, 2,
+                                                   msem_int_bb_b0_regs,
+
+                                                   msem_prty_bb_b0_regs},
+                                                          {3, 3,
+                                                   msem_int_k2_regs,
+
+                                                   msem_prty_k2_regs } } },
+       {"usem", usem_int_attn_desc, usem_prty_attn_desc, {
+                                                          {3, 2,
+                                                   usem_int_bb_a0_regs,
+
+                                                   usem_prty_bb_a0_regs},
+                                                          {3, 2,
+                                                   usem_int_bb_b0_regs,
+
+                                                   usem_prty_bb_b0_regs},
+                                                          {3, 3,
+                                                   usem_int_k2_regs,
+
+                                                   usem_prty_k2_regs } } },
+       {"xsem", xsem_int_attn_desc, xsem_prty_attn_desc, {
+                                                          {3, 2,
+                                                   xsem_int_bb_a0_regs,
+
+                                                   xsem_prty_bb_a0_regs},
+                                                          {3, 2,
+                                                   xsem_int_bb_b0_regs,
+
+                                                   xsem_prty_bb_b0_regs},
+                                                          {3, 3,
+                                                   xsem_int_k2_regs,
+
+                                                   xsem_prty_k2_regs } } },
+       {"ysem", ysem_int_attn_desc, ysem_prty_attn_desc, {
+                                                          {3, 2,
+                                                   ysem_int_bb_a0_regs,
+
+                                                   ysem_prty_bb_a0_regs},
+                                                          {3, 2,
+                                                   ysem_int_bb_b0_regs,
+
+                                                   ysem_prty_bb_b0_regs},
+                                                          {3, 3,
+                                                   ysem_int_k2_regs,
+
+                                                   ysem_prty_k2_regs } } },
+       {"psem", psem_int_attn_desc, psem_prty_attn_desc, {
+                                                          {3, 3,
+                                                   psem_int_bb_a0_regs,
+
+                                                   psem_prty_bb_a0_regs},
+                                                          {3, 3,
+                                                   psem_int_bb_b0_regs,
+
+                                                   psem_prty_bb_b0_regs},
+                                                          {3, 4,
+                                                   psem_int_k2_regs,
+
+                                                   psem_prty_k2_regs } } },
+       {"rss", rss_int_attn_desc, rss_prty_attn_desc, {
+                                                       {1, 1,
+                                                        rss_int_bb_a0_regs,
+                                                        rss_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        rss_int_bb_b0_regs,
+                                                        rss_prty_bb_b0_regs},
+                                                       {1, 1, rss_int_k2_regs,
+                                                        rss_prty_k2_regs } } },
+       {"tmld", tmld_int_attn_desc, tmld_prty_attn_desc, {
+                                                          {1, 1,
+                                                   tmld_int_bb_a0_regs,
+
+                                                   tmld_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   tmld_int_bb_b0_regs,
+
+                                                   tmld_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           tmld_int_k2_regs,
+
+                                                   tmld_prty_k2_regs } } },
+       {"muld", muld_int_attn_desc, muld_prty_attn_desc, {
+                                                          {1, 1,
+                                                   muld_int_bb_a0_regs,
+
+                                                   muld_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   muld_int_bb_b0_regs,
+
+                                                   muld_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   muld_int_k2_regs,
+
+                                                   muld_prty_k2_regs } } },
+       {"yuld", yuld_int_attn_desc, yuld_prty_attn_desc, {
+                                                          {1, 1,
+                                                   yuld_int_bb_a0_regs,
+
+                                                   yuld_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   yuld_int_bb_b0_regs,
+
+                                                   yuld_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   yuld_int_k2_regs,
+
+                                                   yuld_prty_k2_regs } } },
+       {"xyld", xyld_int_attn_desc, xyld_prty_attn_desc, {
+                                                          {1, 1,
+                                                   xyld_int_bb_a0_regs,
+
+                                                   xyld_prty_bb_a0_regs},
+                                                          {1, 1,
+                                                   xyld_int_bb_b0_regs,
+
+                                                   xyld_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                   xyld_int_k2_regs,
+
+                                                   xyld_prty_k2_regs } } },
+       {"prm", prm_int_attn_desc, prm_prty_attn_desc, {
+                                                       {1, 1,
+                                                        prm_int_bb_a0_regs,
+                                                        prm_prty_bb_a0_regs},
+                                                       {1, 2,
+                                                        prm_int_bb_b0_regs,
+                                                        prm_prty_bb_b0_regs},
+                                                       {1, 2, prm_int_k2_regs,
+                                                        prm_prty_k2_regs } } },
+       {"pbf_pb1", pbf_pb1_int_attn_desc, pbf_pb1_prty_attn_desc, {
+                                                                   {1, 0,
+
+                                                    pbf_pb1_int_bb_a0_regs,
+                                                    OSAL_NULL},
+                                                                   {1, 1,
+
+                                                    pbf_pb1_int_bb_b0_regs,
+
+                                                    pbf_pb1_prty_bb_b0_regs},
+                                                                   {1, 1,
+
+                                                    pbf_pb1_int_k2_regs,
+
+                                                    pbf_pb1_prty_k2_regs } } },
+       {"pbf_pb2", pbf_pb2_int_attn_desc, pbf_pb2_prty_attn_desc, {
+                                                                   {1, 0,
+
+                                                    pbf_pb2_int_bb_a0_regs,
+                                                    OSAL_NULL},
+                                                                   {1, 1,
+
+                                                    pbf_pb2_int_bb_b0_regs,
+
+                                                    pbf_pb2_prty_bb_b0_regs},
+                                                                   {1, 1,
+
+                                                    pbf_pb2_int_k2_regs,
+
+                                                    pbf_pb2_prty_k2_regs } } },
+       {"rpb", rpb_int_attn_desc, rpb_prty_attn_desc, {
+                                                       {1, 0,
+                                                        rpb_int_bb_a0_regs,
+                                                        OSAL_NULL},
+                                                       {1, 1,
+                                                        rpb_int_bb_b0_regs,
+                                                        rpb_prty_bb_b0_regs},
+                                                       {1, 1, rpb_int_k2_regs,
+                                                        rpb_prty_k2_regs } } },
+       {"btb", btb_int_attn_desc, btb_prty_attn_desc, {
+                                                       {11, 1,
+                                                        btb_int_bb_a0_regs,
+                                                        btb_prty_bb_a0_regs},
+                                                       {11, 2,
+                                                        btb_int_bb_b0_regs,
+                                                        btb_prty_bb_b0_regs},
+                                               {11, 2, btb_int_k2_regs,
+                                                        btb_prty_k2_regs } } },
+       {"pbf", pbf_int_attn_desc, pbf_prty_attn_desc, {
+                                                       {1, 2,
+                                                        pbf_int_bb_a0_regs,
+                                                        pbf_prty_bb_a0_regs},
+                                                       {1, 3,
+                                                        pbf_int_bb_b0_regs,
+                                                        pbf_prty_bb_b0_regs},
+                                                       {1, 3, pbf_int_k2_regs,
+                                                        pbf_prty_k2_regs } } },
+       {"rdif", rdif_int_attn_desc, rdif_prty_attn_desc, {
+                                                          {1, 0,
+                                           rdif_int_bb_a0_regs,
+                                                           OSAL_NULL},
+                                                          {1, 1,
+                                           rdif_int_bb_b0_regs,
+
+                                           rdif_prty_bb_b0_regs},
+                                                          {1, 1,
+                                                           rdif_int_k2_regs,
+
+                                           rdif_prty_k2_regs } } },
+       {"tdif", tdif_int_attn_desc, tdif_prty_attn_desc, {
+                                                          {1, 1,
+                                           tdif_int_bb_a0_regs,
+
+                                           tdif_prty_bb_a0_regs},
+                                                          {1, 2,
+                                           tdif_int_bb_b0_regs,
+
+                                           tdif_prty_bb_b0_regs},
+                                                          {1, 2,
+                                           tdif_int_k2_regs,
+
+                                           tdif_prty_k2_regs } } },
+       {"cdu", cdu_int_attn_desc, cdu_prty_attn_desc, {
+                                                       {1, 1,
+                                                        cdu_int_bb_a0_regs,
+                                                        cdu_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        cdu_int_bb_b0_regs,
+                                                        cdu_prty_bb_b0_regs},
+                                       {1, 1, cdu_int_k2_regs,
+                                                        cdu_prty_k2_regs } } },
+       {"ccfc", ccfc_int_attn_desc, ccfc_prty_attn_desc, {
+                                                          {1, 2,
+                                           ccfc_int_bb_a0_regs,
+
+                                           ccfc_prty_bb_a0_regs},
+                                                          {1, 2,
+                                           ccfc_int_bb_b0_regs,
+
+                                           ccfc_prty_bb_b0_regs},
+                                                          {1, 2,
+                                           ccfc_int_k2_regs,
+
+                                           ccfc_prty_k2_regs } } },
+       {"tcfc", tcfc_int_attn_desc, tcfc_prty_attn_desc, {
+                                                          {1, 2,
+                                           tcfc_int_bb_a0_regs,
+
+                                           tcfc_prty_bb_a0_regs},
+                                                          {1, 2,
+                                           tcfc_int_bb_b0_regs,
+
+                                           tcfc_prty_bb_b0_regs},
+                                                          {1, 2,
+                                           tcfc_int_k2_regs,
+
+                                           tcfc_prty_k2_regs } } },
+       {"igu", igu_int_attn_desc, igu_prty_attn_desc, {
+                                                       {1, 3,
+                                                        igu_int_bb_a0_regs,
+                                                        igu_prty_bb_a0_regs},
+                                                       {1, 3,
+                                                        igu_int_bb_b0_regs,
+                                                        igu_prty_bb_b0_regs},
+                                                       {1, 2, igu_int_k2_regs,
+                                                        igu_prty_k2_regs } } },
+       {"cau", cau_int_attn_desc, cau_prty_attn_desc, {
+                                                       {1, 1,
+                                                        cau_int_bb_a0_regs,
+                                                        cau_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        cau_int_bb_b0_regs,
+                                                        cau_prty_bb_b0_regs},
+                                                       {1, 1, cau_int_k2_regs,
+                                                        cau_prty_k2_regs } } },
+       {"umac", umac_int_attn_desc, OSAL_NULL, {
+                                                {0, 0, OSAL_NULL, OSAL_NULL},
+                                                {0, 0, OSAL_NULL, OSAL_NULL},
+                                                {1, 0, umac_int_k2_regs,
+                                                 OSAL_NULL } } },
+       {"xmac", OSAL_NULL, OSAL_NULL, {
+                                       {0, 0, OSAL_NULL, OSAL_NULL},
+                                       {0, 0, OSAL_NULL, OSAL_NULL},
+                                       {0, 0, OSAL_NULL, OSAL_NULL } } },
+       {"dbg", dbg_int_attn_desc, dbg_prty_attn_desc, {
+                                                       {1, 1,
+                                                        dbg_int_bb_a0_regs,
+                                                        dbg_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        dbg_int_bb_b0_regs,
+                                                        dbg_prty_bb_b0_regs},
+                                                       {1, 1, dbg_int_k2_regs,
+                                                        dbg_prty_k2_regs } } },
+       {"nig", nig_int_attn_desc, nig_prty_attn_desc, {
+                                                       {6, 4,
+                                                        nig_int_bb_a0_regs,
+                                                        nig_prty_bb_a0_regs},
+                                                       {6, 5,
+                                                        nig_int_bb_b0_regs,
+                                                        nig_prty_bb_b0_regs},
+                                       {10, 5, nig_int_k2_regs,
+                                                        nig_prty_k2_regs } } },
+       {"wol", wol_int_attn_desc, wol_prty_attn_desc, {
+                                                       {0, 0, OSAL_NULL,
+                                                        OSAL_NULL},
+                                                       {0, 0, OSAL_NULL,
+                                                        OSAL_NULL},
+                                                       {1, 1, wol_int_k2_regs,
+                                                        wol_prty_k2_regs } } },
+       {"bmbn", bmbn_int_attn_desc, OSAL_NULL, {
+                                                {0, 0, OSAL_NULL, OSAL_NULL},
+                                                {0, 0, OSAL_NULL, OSAL_NULL},
+                                                {1, 0, bmbn_int_k2_regs,
+                                                 OSAL_NULL } } },
+       {"ipc", ipc_int_attn_desc, ipc_prty_attn_desc, {
+                                                       {1, 1,
+                                                        ipc_int_bb_a0_regs,
+                                                        ipc_prty_bb_a0_regs},
+                                                       {1, 1,
+                                                        ipc_int_bb_b0_regs,
+                                                        ipc_prty_bb_b0_regs},
+                                                       {1, 0, ipc_int_k2_regs,
+                                                        OSAL_NULL } } },
+       {"nwm", nwm_int_attn_desc, nwm_prty_attn_desc, {
+                                                       {0, 0, OSAL_NULL,
+                                                        OSAL_NULL},
+                                                       {0, 0, OSAL_NULL,
+                                                        OSAL_NULL},
+                                                       {1, 3, nwm_int_k2_regs,
+                                                        nwm_prty_k2_regs } } },
+       {"nws", nws_int_attn_desc, nws_prty_attn_desc, {
+                                                       {0, 0, OSAL_NULL,
+                                                        OSAL_NULL},
+                                                       {0, 0, OSAL_NULL,
+                                                        OSAL_NULL},
+                                                       {4, 1, nws_int_k2_regs,
+                                                        nws_prty_k2_regs } } },
+       {"ms", ms_int_attn_desc, OSAL_NULL, {
+                                            {0, 0, OSAL_NULL, OSAL_NULL},
+                                            {0, 0, OSAL_NULL, OSAL_NULL},
+                                            {1, 0, ms_int_k2_regs,
+                                             OSAL_NULL } } },
+       {"phy_pcie", OSAL_NULL, OSAL_NULL, {
+                                           {0, 0, OSAL_NULL, OSAL_NULL},
+                                           {0, 0, OSAL_NULL, OSAL_NULL},
+                                           {0, 0, OSAL_NULL, OSAL_NULL } } },
+       {"misc_aeu", OSAL_NULL, OSAL_NULL, {
+                                           {0, 0, OSAL_NULL, OSAL_NULL},
+                                           {0, 0, OSAL_NULL, OSAL_NULL},
+                                           {0, 0, OSAL_NULL, OSAL_NULL } } },
+       {"bar0_map", OSAL_NULL, OSAL_NULL, {
+                                           {0, 0, OSAL_NULL, OSAL_NULL},
+                                           {0, 0, OSAL_NULL, OSAL_NULL},
+                                           {0, 0, OSAL_NULL, OSAL_NULL } } },
+};
+
+#define NUM_INT_REGS 423
+#define NUM_PRTY_REGS 378
+
+#endif /* __PREVENT_INT_ATTN__ */
+
+#endif /* __ATTN_VALUES_H__ */
diff --git a/drivers/net/qede/ecore/ecore_chain.h 
b/drivers/net/qede/ecore/ecore_chain.h
new file mode 100644
index 0000000..8c8e8b4
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_chain.h
@@ -0,0 +1,724 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CHAIN_H__
+#define __ECORE_CHAIN_H__
+
+#include <assert.h>            /* @DPDK */
+
+#include "common_hsi.h"
+#include "ecore_utils.h"
+
+enum ecore_chain_mode {
+       /* Each Page contains a next pointer at its end */
+       ECORE_CHAIN_MODE_NEXT_PTR,
+
+       /* Chain is a single page (next ptr) is unrequired */
+       ECORE_CHAIN_MODE_SINGLE,
+
+       /* Page pointers are located in a side list */
+       ECORE_CHAIN_MODE_PBL,
+};
+
+enum ecore_chain_use_mode {
+       ECORE_CHAIN_USE_TO_PRODUCE,     /* Chain starts empty */
+       ECORE_CHAIN_USE_TO_CONSUME,     /* Chain starts full */
+       ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,     /* Chain starts empty */
+};
+
+enum ecore_chain_cnt_type {
+       /* The chain's size/prod/cons are kept in 16-bit variables */
+       ECORE_CHAIN_CNT_TYPE_U16,
+
+       /* The chain's size/prod/cons are kept in 32-bit variables  */
+       ECORE_CHAIN_CNT_TYPE_U32,
+};
+
+struct ecore_chain_next {
+       struct regpair next_phys;
+       void *next_virt;
+};
+
+struct ecore_chain_pbl_u16 {
+       u16 prod_page_idx;
+       u16 cons_page_idx;
+};
+
+struct ecore_chain_pbl_u32 {
+       u32 prod_page_idx;
+       u32 cons_page_idx;
+};
+
+struct ecore_chain_pbl {
+       /* Base address of a pre-allocated buffer for pbl */
+       dma_addr_t p_phys_table;
+       void *p_virt_table;
+
+       /* Table for keeping the virtual addresses of the chain pages,
+        * respectively to the physical addresses in the pbl table.
+        */
+       void **pp_virt_addr_tbl;
+
+       /* Index to current used page by producer/consumer */
+       union {
+               struct ecore_chain_pbl_u16 pbl16;
+               struct ecore_chain_pbl_u32 pbl32;
+       } u;
+};
+
+struct ecore_chain_u16 {
+       /* Cyclic index of next element to produce/consme */
+       u16 prod_idx;
+       u16 cons_idx;
+};
+
+struct ecore_chain_u32 {
+       /* Cyclic index of next element to produce/consme */
+       u32 prod_idx;
+       u32 cons_idx;
+};
+
+struct ecore_chain {
+       /* Address of first page of the chain */
+       void *p_virt_addr;
+       dma_addr_t p_phys_addr;
+
+       /* Point to next element to produce/consume */
+       void *p_prod_elem;
+       void *p_cons_elem;
+
+       enum ecore_chain_mode mode;
+       enum ecore_chain_use_mode intended_use;
+
+       enum ecore_chain_cnt_type cnt_type;
+       union {
+               struct ecore_chain_u16 chain16;
+               struct ecore_chain_u32 chain32;
+       } u;
+
+       u32 page_cnt;
+
+       /* Number of elements - capacity is for usable elements only,
+        * while size will contain total number of elements [for entire chain].
+        */
+       u32 capacity;
+       u32 size;
+
+       /* Elements information for fast calculations */
+       u16 elem_per_page;
+       u16 elem_per_page_mask;
+       u16 elem_unusable;
+       u16 usable_per_page;
+       u16 elem_size;
+       u16 next_page_mask;
+
+       struct ecore_chain_pbl pbl;
+};
+
+#define ECORE_CHAIN_PBL_ENTRY_SIZE     (8)
+#define ECORE_CHAIN_PAGE_SIZE          (0x1000)
+#define ELEMS_PER_PAGE(elem_size)      (ECORE_CHAIN_PAGE_SIZE/(elem_size))
+
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)               \
+         ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ?                \
+          (1 + ((sizeof(struct ecore_chain_next)-1) /          \
+          (elem_size))) : 0)
+
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode)                 \
+       ((u32) (ELEMS_PER_PAGE(elem_size) -                     \
+       UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+
+#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode)                \
+       DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+
+#define is_chain_u16(p)        ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p)        ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
+
+/* Accessors */
+static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
+{
+       OSAL_ASSERT(is_chain_u16(p_chain));
+       return p_chain->u.chain16.prod_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain 
*p_chain)
+{
+       OSAL_ASSERT(is_chain_u32(p_chain));
+       return p_chain->u.chain32.prod_idx;
+}
+
+static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
+{
+       OSAL_ASSERT(is_chain_u16(p_chain));
+       return p_chain->u.chain16.cons_idx;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain 
*p_chain)
+{
+       OSAL_ASSERT(is_chain_u32(p_chain));
+       return p_chain->u.chain32.cons_idx;
+}
+
+/* FIXME:
+ * Should create OSALs for the below definitions.
+ * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
+ * kernel versions that lack them.
+ */
+#define ECORE_U16_MAX  ((u16)~0U)
+#define ECORE_U32_MAX  ((u32)~0U)
+
+static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
+{
+       u16 used;
+
+       OSAL_ASSERT(is_chain_u16(p_chain));
+
+       used = (u16) (((u32) ECORE_U16_MAX + 1 +
+                      (u32) (p_chain->u.chain16.prod_idx)) -
+                     (u32) p_chain->u.chain16.cons_idx);
+       if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
+                   p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+
+       return (u16) (p_chain->capacity - used);
+}
+
+static OSAL_INLINE u32
+ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
+{
+       u32 used;
+
+       OSAL_ASSERT(is_chain_u32(p_chain));
+
+       used = (u32) (((u64) ECORE_U32_MAX + 1 +
+                      (u64) (p_chain->u.chain32.prod_idx)) -
+                     (u64) p_chain->u.chain32.cons_idx);
+       if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
+               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
+                   p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+
+       return p_chain->capacity - used;
+}
+
+static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
+{
+       if (is_chain_u16(p_chain))
+               return (ecore_chain_get_elem_left(p_chain) ==
+                       p_chain->capacity);
+       else
+               return (ecore_chain_get_elem_left_u32(p_chain) ==
+                       p_chain->capacity);
+}
+
+static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
+{
+       if (is_chain_u16(p_chain))
+               return (ecore_chain_get_elem_left(p_chain) == 0);
+       else
+               return (ecore_chain_get_elem_left_u32(p_chain) == 0);
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
+{
+       return p_chain->elem_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
+{
+       return p_chain->usable_per_page;
+}
+
+static OSAL_INLINE
+u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+{
+       return p_chain->elem_unusable;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
+{
+       return p_chain->size;
+}
+
+static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
+{
+       return p_chain->page_cnt;
+}
+
+static OSAL_INLINE
+dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
+{
+       return p_chain->pbl.p_phys_table;
+}
+
+/**
+ * @brief ecore_chain_advance_page -
+ *
+ * Advance the next element accros pages for a linked chain
+ *
+ * @param p_chain
+ * @param p_next_elem
+ * @param idx_to_inc
+ * @param page_to_inc
+ */
+static OSAL_INLINE void
+ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
+                        void *idx_to_inc, void *page_to_inc)
+{
+       struct ecore_chain_next *p_next = OSAL_NULL;
+       u32 page_index = 0;
+
+       switch (p_chain->mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               p_next = (struct ecore_chain_next *)(*p_next_elem);
+               *p_next_elem = p_next->next_virt;
+               if (is_chain_u16(p_chain))
+                       *(u16 *) idx_to_inc += p_chain->elem_unusable;
+               else
+                       *(u32 *) idx_to_inc += p_chain->elem_unusable;
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               *p_next_elem = p_chain->p_virt_addr;
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               if (is_chain_u16(p_chain)) {
+                       if (++(*(u16 *) page_to_inc) == p_chain->page_cnt)
+                               *(u16 *) page_to_inc = 0;
+                       page_index = *(u16 *) page_to_inc;
+               } else {
+                       if (++(*(u32 *) page_to_inc) == p_chain->page_cnt)
+                               *(u32 *) page_to_inc = 0;
+                       page_index = *(u32 *) page_to_inc;
+               }
+               *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+       }
+}
+
+#define is_unusable_idx(p, idx)                        \
+       (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_idx_u32(p, idx)            \
+       (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
+
+#define is_unusable_next_idx(p, idx)           \
+       ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
+       (p)->usable_per_page)
+
+#define is_unusable_next_idx_u32(p, idx)       \
+       ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \
+       == (p)->usable_per_page)
+
+#define test_and_skip(p, idx)                                          \
+       do {                                                            \
+               if (is_chain_u16(p)) {                                  \
+                       if (is_unusable_idx(p, idx))                    \
+                               (p)->u.chain16.idx += (p)->elem_unusable; \
+               } else {                                                \
+                       if (is_unusable_idx_u32(p, idx))                \
+                               (p)->u.chain32.idx += (p)->elem_unusable; \
+               }                                                       \
+       } while (0)
+
+/**
+ * @brief ecore_chain_return_multi_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ * @param num
+ */
+static OSAL_INLINE
+void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.cons_idx += (u16) num;
+       else
+               p_chain->u.chain32.cons_idx += num;
+       test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_return_produced -
+ *
+ * A chain in which the driver "Produces" elements should use this API
+ * to indicate previous produced elements are now consumed.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain 
*p_chain)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.cons_idx++;
+       else
+               p_chain->u.chain32.cons_idx++;
+       test_and_skip(p_chain, cons_idx);
+}
+
+/**
+ * @brief ecore_chain_produce -
+ *
+ * A chain in which the driver "Produces" elements should use this to get
+ * a pointer to the next element which can be "Produced". It's driver
+ * responsibility to validate that the chain has room for new element.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to next element
+ */
+static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
+{
+       void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
+
+       if (is_chain_u16(p_chain)) {
+               if ((p_chain->u.chain16.prod_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_prod_idx = &p_chain->u.chain16.prod_idx;
+                       p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                                p_prod_idx, p_prod_page_idx);
+               }
+               p_chain->u.chain16.prod_idx++;
+       } else {
+               if ((p_chain->u.chain32.prod_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_prod_idx = &p_chain->u.chain32.prod_idx;
+                       p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
+                                                p_prod_idx, p_prod_page_idx);
+               }
+               p_chain->u.chain32.prod_idx++;
+       }
+
+       p_ret = p_chain->p_prod_elem;
+       p_chain->p_prod_elem = (void *)(((u8 *) p_chain->p_prod_elem) +
+                                       p_chain->elem_size);
+
+       return p_ret;
+}
+
+/**
+ * @brief ecore_chain_get_capacity -
+ *
+ * Get the maximum number of BDs in chain
+ *
+ * @param p_chain
+ * @param num
+ *
+ * @return number of unusable BDs
+ */
+static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
+{
+       return p_chain->capacity;
+}
+
+/**
+ * @brief ecore_chain_recycle_consumed -
+ *
+ * Returns an element which was previously consumed;
+ * Increments producers so they could be written to FW.
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE
+void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
+{
+       test_and_skip(p_chain, prod_idx);
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.prod_idx++;
+       else
+               p_chain->u.chain32.prod_idx++;
+}
+
+/**
+ * @brief ecore_chain_consume -
+ *
+ * A Chain in which the driver utilizes data written by a different source
+ * (i.e., FW) should use this to access passed buffers.
+ *
+ * @param p_chain
+ *
+ * @return void*, a pointer to the next buffer written
+ */
+static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
+{
+       void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
+
+       if (is_chain_u16(p_chain)) {
+               if ((p_chain->u.chain16.cons_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_cons_idx = &p_chain->u.chain16.cons_idx;
+                       p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                                p_cons_idx, p_cons_page_idx);
+               }
+               p_chain->u.chain16.cons_idx++;
+       } else {
+               if ((p_chain->u.chain32.cons_idx &
+                    p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
+                       p_cons_idx = &p_chain->u.chain32.cons_idx;
+                       p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+                       ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
+                                                p_cons_idx, p_cons_page_idx);
+               }
+               p_chain->u.chain32.cons_idx++;
+       }
+
+       p_ret = p_chain->p_cons_elem;
+       p_chain->p_cons_elem = (void *)(((u8 *) p_chain->p_cons_elem) +
+                                       p_chain->elem_size);
+
+       return p_ret;
+}
+
+/**
+ * @brief ecore_chain_reset -
+ *
+ * Resets the chain to its start state
+ *
+ * @param p_chain pointer to a previously allocted chain
+ */
+static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
+{
+       u32 i;
+
+       if (is_chain_u16(p_chain)) {
+               p_chain->u.chain16.prod_idx = 0;
+               p_chain->u.chain16.cons_idx = 0;
+       } else {
+               p_chain->u.chain32.prod_idx = 0;
+               p_chain->u.chain32.cons_idx = 0;
+       }
+       p_chain->p_cons_elem = p_chain->p_virt_addr;
+       p_chain->p_prod_elem = p_chain->p_virt_addr;
+
+       if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+               /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+                * indices, to avoid unnecessary page advancing on the first
+                * call to ecore_chain_produce/consume. Instead, the indices
+                * will be advanced to page_cnt and then will be wrapped to 0.
+                */
+               u32 reset_val = p_chain->page_cnt - 1;
+
+               if (is_chain_u16(p_chain)) {
+                       p_chain->pbl.u.pbl16.prod_page_idx = (u16) reset_val;
+                       p_chain->pbl.u.pbl16.cons_page_idx = (u16) reset_val;
+               } else {
+                       p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
+                       p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+               }
+       }
+
+       switch (p_chain->intended_use) {
+       case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+       case ECORE_CHAIN_USE_TO_PRODUCE:
+               /* Do nothing */
+               break;
+
+       case ECORE_CHAIN_USE_TO_CONSUME:
+               /* produce empty elements */
+               for (i = 0; i < p_chain->capacity; i++)
+                       ecore_chain_recycle_consumed(p_chain);
+               break;
+       }
+}
+
+/**
+ * @brief ecore_chain_init_params -
+ *
+ * Initalizes a basic chain struct
+ *
+ * @param p_chain
+ * @param page_cnt     number of pages in the allocated buffer
+ * @param elem_size    size of each element in the chain
+ * @param intended_use
+ * @param mode
+ * @param cnt_type
+ */
+static OSAL_INLINE void
+ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 
elem_size,
+                       enum ecore_chain_use_mode intended_use,
+                       enum ecore_chain_mode mode,
+                       enum ecore_chain_cnt_type cnt_type)
+{
+       /* chain fixed parameters */
+       p_chain->p_virt_addr = OSAL_NULL;
+       p_chain->p_phys_addr = 0;
+       p_chain->elem_size = elem_size;
+       p_chain->intended_use = intended_use;
+       p_chain->mode = mode;
+       p_chain->cnt_type = cnt_type;
+
+       p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
+       p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
+       p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
+       p_chain->next_page_mask = (p_chain->usable_per_page &
+                                  p_chain->elem_per_page_mask);
+
+       p_chain->page_cnt = page_cnt;
+       p_chain->capacity = p_chain->usable_per_page * page_cnt;
+       p_chain->size = p_chain->elem_per_page * page_cnt;
+
+       p_chain->pbl.p_phys_table = 0;
+       p_chain->pbl.p_virt_table = OSAL_NULL;
+       p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+}
+
+/**
+ * @brief ecore_chain_init_mem -
+ *
+ * Initalizes a basic chain struct with its chain buffers
+ *
+ * @param p_chain
+ * @param p_virt_addr  virtual address of allocated buffer's beginning
+ * @param p_phys_addr  physical address of allocated buffer's beginning
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
+                                            void *p_virt_addr,
+                                            dma_addr_t p_phys_addr)
+{
+       p_chain->p_virt_addr = p_virt_addr;
+       p_chain->p_phys_addr = p_phys_addr;
+}
+
+/**
+ * @brief ecore_chain_init_pbl_mem -
+ *
+ * Initalizes a basic chain struct with its pbl buffers
+ *
+ * @param p_chain
+ * @param p_virt_pbl   pointer to a pre allocated side table which will hold
+ *                      virtual page addresses.
+ * @param p_phys_pbl   pointer to a pre-allocated side table which will hold
+ *                      physical page addresses.
+ * @param pp_virt_addr_tbl
+ *                      pointer to a pre-allocated side table which will hold
+ *                      the virtual addresses of the chain pages.
+ *
+ */
+static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
+                                                void *p_virt_pbl,
+                                                dma_addr_t p_phys_pbl,
+                                                void **pp_virt_addr_tbl)
+{
+       p_chain->pbl.p_phys_table = p_phys_pbl;
+       p_chain->pbl.p_virt_table = p_virt_pbl;
+       p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+}
+
+/**
+ * @brief ecore_chain_init_next_ptr_elem -
+ *
+ * Initalizes a next pointer element
+ *
+ * @param p_chain
+ * @param p_virt_curr  virtual address of a chain page of which the next
+ *                      pointer element is initialized
+ * @param p_virt_next  virtual address of the next chain page
+ * @param p_phys_next  physical address of the next chain page
+ *
+ */
+static OSAL_INLINE void
+ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
+                              void *p_virt_next, dma_addr_t p_phys_next)
+{
+       struct ecore_chain_next *p_next;
+       u32 size;
+
+       size = p_chain->elem_size * p_chain->usable_per_page;
+       p_next = (struct ecore_chain_next *)((u8 *) p_virt_curr + size);
+
+       DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
+
+       p_next->next_virt = p_virt_next;
+}
+
+/**
+ * @brief ecore_chain_get_last_elem -
+ *
+ * Returns a pointer to the last element of the chain
+ *
+ * @param p_chain
+ *
+ * @return void*
+ */
+static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
+{
+       struct ecore_chain_next *p_next = OSAL_NULL;
+       void *p_virt_addr = OSAL_NULL;
+       u32 size, last_page_idx;
+
+       if (!p_chain->p_virt_addr)
+               goto out;
+
+       switch (p_chain->mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               size = p_chain->elem_size * p_chain->usable_per_page;
+               p_virt_addr = p_chain->p_virt_addr;
+               p_next = (struct ecore_chain_next *)((u8 *) p_virt_addr + size);
+               while (p_next->next_virt != p_chain->p_virt_addr) {
+                       p_virt_addr = p_next->next_virt;
+                       p_next =
+                           (struct ecore_chain_next *)((u8 *) p_virt_addr +
+                                                       size);
+               }
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               p_virt_addr = p_chain->p_virt_addr;
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               last_page_idx = p_chain->page_cnt - 1;
+               p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+               break;
+       }
+       /* p_virt_addr points at this stage to the last page of the chain */
+       size = p_chain->elem_size * (p_chain->usable_per_page - 1);
+       p_virt_addr = ((u8 *) p_virt_addr + size);
+out:
+       return p_virt_addr;
+}
+
+/**
+ * @brief ecore_chain_set_prod - sets the prod to the given value
+ *
+ * @param prod_idx
+ * @param p_prod_elem
+ */
+static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
+                                            u32 prod_idx, void *p_prod_elem)
+{
+       if (is_chain_u16(p_chain))
+               p_chain->u.chain16.prod_idx = (u16) prod_idx;
+       else
+               p_chain->u.chain32.prod_idx = prod_idx;
+       p_chain->p_prod_elem = p_prod_elem;
+}
+
+/**
+ * @brief ecore_chain_pbl_zero_mem - set chain memory to 0
+ *
+ * @param p_chain
+ */
+static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
+{
+       u32 i, page_cnt;
+
+       if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
+               return;
+
+       page_cnt = ecore_chain_get_page_cnt(p_chain);
+
+       for (i = 0; i < page_cnt; i++)
+               OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
+                             ECORE_CHAIN_PAGE_SIZE);
+}
+
+#endif /* __ECORE_CHAIN_H__ */
diff --git a/drivers/net/qede/ecore/ecore_cxt.c 
b/drivers/net/qede/ecore/ecore_cxt.c
new file mode 100644
index 0000000..261be09
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_cxt.c
@@ -0,0 +1,2164 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_rt_defs.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_init_ops.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_hw.h"
+#include "ecore_dev_api.h"
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES         PROTOCOLID_COMMON
+#define NUM_TASK_TYPES         2
+#define NUM_TASK_PF_SEGMENTS   4
+#define NUM_TASK_VF_SEGMENTS   1
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT       7
+#define TM_ALIGN       (1 << TM_SHIFT)
+#define TM_ELEM_SIZE   4
+
+/* ILT constants */
+/* If for some reason, HW P size is modified to be less than 32K,
+ * special handling needs to be made for CDU initialization
+ */
+#ifdef CONFIG_ECORE_ROCE
+/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. Can 
be
+ * optimized with resource management scheme
+ */
+#define ILT_DEFAULT_HW_P_SIZE  4
+#else
+#define ILT_DEFAULT_HW_P_SIZE  3
+#endif
+
+#define ILT_PAGE_IN_BYTES(hw_p_size)   (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg)          PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK                0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_SHIFT       0
+#define ILT_ENTRY_VALID_MASK           0x1ULL
+#define ILT_ENTRY_VALID_SHIFT          52
+#define ILT_ENTRY_IN_REGS              2
+#define ILT_REG_SIZE_IN_BYTES          4
+
+/* connection context union */
+union conn_context {
+       struct core_conn_context core_ctx;
+       struct eth_conn_context eth_ctx;
+#ifdef CONFIG_ECORE_ROCE
+       struct roce_conn_context roce_ctx;
+#endif
+};
+
+/* TYPE-0 task context - iSCSI, FCOE */
+union type0_task_context {
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+#ifdef CONFIG_ECORE_ROCE
+       struct roce_task_context roce_ctx;
+#endif
+};
+
+struct src_ent {
+       u8 opaque[56];
+       u64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3    /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
+#define CONN_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(union type1_task_context, p_hwfn)
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS   (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct ecore_tid_seg {
+       u32 count;
+       u8 type;
+       bool has_fl_mem;
+};
+
+struct ecore_conn_type_cfg {
+       u32 cid_count;
+       u32 cid_start;
+       u32 cids_per_vf;
+       struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS      (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS      (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK               (0)
+#define CDUT_SEG_BLK(n)                (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X)  (1 + (n) + NUM_TASK_##X##_SEGMENTS)
+
+enum ilt_clients {
+       ILT_CLI_CDUC,
+       ILT_CLI_CDUT,
+       ILT_CLI_QM,
+       ILT_CLI_TM,
+       ILT_CLI_SRC,
+       ILT_CLI_MAX
+};
+
+struct ilt_cfg_pair {
+       u32 reg;
+       u32 val;
+};
+
+struct ecore_ilt_cli_blk {
+       u32 total_size;         /* 0 means not active */
+       u32 real_size_in_page;
+       u32 start_line;
+       u32 dynamic_line_cnt;
+};
+
+struct ecore_ilt_client_cfg {
+       bool active;
+
+       /* ILT boundaries */
+       struct ilt_cfg_pair first;
+       struct ilt_cfg_pair last;
+       struct ilt_cfg_pair p_size;
+
+       /* ILT client blocks for PF */
+       struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+       u32 pf_total_lines;
+
+       /* ILT client blocks for VFs */
+       struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+       u32 vf_total_lines;
+};
+
+/* Per Path -
+ *      ILT shadow table
+ *      Protocol acquired CID lists
+ *      PF start line in ILT
+ */
+struct ecore_dma_mem {
+       dma_addr_t p_phys;
+       void *p_virt;
+       osal_size_t size;
+};
+
+#define MAP_WORD_SIZE          sizeof(unsigned long)
+#define BITS_PER_MAP_WORD      (MAP_WORD_SIZE * 8)
+
+struct ecore_cid_acquired_map {
+       u32 start_cid;
+       u32 max_count;
+       unsigned long *cid_map;
+};
+
+struct ecore_cxt_mngr {
+       /* Per protocl configuration */
+       struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+       /* computed ILT structure */
+       struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
+
+       /* Task type sizes */
+       u32 task_type_size[NUM_TASK_TYPES];
+
+       /* total number of VFs for this hwfn -
+        * ALL VFs are symmetric in terms of HW resources
+        */
+       u32 vf_count;
+
+       /* Acquired CIDs */
+       struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+       /* ILT  shadow table */
+       struct ecore_dma_mem *ilt_shadow;
+       u32 pf_start_line;
+
+       /* SRC T2 */
+       struct ecore_dma_mem *t2;
+       u32 t2_num_pages;
+       u64 first_free;
+       u64 last_free;
+};
+
+/* check if resources/configuration is required according to protocol type */
+static OSAL_INLINE bool src_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_ISCSI ||
+           type == PROTOCOLID_TOE || type == PROTOCOLID_ROCE;
+}
+
+static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
+{
+       return type == PROTOCOLID_ISCSI ||
+           type == PROTOCOLID_TOE || type == PROTOCOLID_ROCE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct ecore_cdu_iids {
+       u32 pf_cids;
+       u32 per_vf_cids;
+};
+
+static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
+                              struct ecore_cdu_iids *iids)
+{
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+               iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+       }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct ecore_src_iids {
+       u32 pf_cids;
+       u32 per_vf_cids;
+};
+
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+                                          struct ecore_src_iids *iids)
+{
+       u32 i;
+
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               if (!src_proto(i))
+                       continue;
+
+               iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+               iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+       }
+}
+
+/* counts the iids for the Timers block configuration */
+struct ecore_tm_iids {
+       u32 pf_cids;
+       u32 pf_tids[NUM_TASK_PF_SEGMENTS];      /* per segment */
+       u32 pf_tids_total;
+       u32 per_vf_cids;
+       u32 per_vf_tids;
+};
+
+static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+                                         struct ecore_tm_iids *iids)
+{
+       u32 i, j;
+
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+               if (tm_cid_proto(i)) {
+                       iids->pf_cids += p_cfg->cid_count;
+                       iids->per_vf_cids += p_cfg->cids_per_vf;
+               }
+
+       }
+
+       iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
+       iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
+       iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
+
+       for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+               iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
+               iids->pf_tids_total += iids->pf_tids[j];
+       }
+}
+
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_tid_seg *segs;
+       u32 vf_cids = 0, type, j;
+       u32 vf_tids = 0;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               iids->cids += p_mngr->conn_cfg[type].cid_count;
+               vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+               segs = p_mngr->conn_cfg[type].tid_seg;
+               /* for each segment there is at most one
+                * protocol for which count is not 0.
+                */
+               for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+                       iids->tids += segs[j].count;
+
+               /* The last array elelment is for the VFs. As for PF
+                * segments there can be only one protocol for
+                * which this value is not 0.
+                */
+               vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+       }
+
+       iids->vf_cids += vf_cids * p_mngr->vf_count;
+       iids->tids += vf_tids * p_mngr->vf_count;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+                  iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
+                                                   u32 seg)
+{
+       struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+       u32 i;
+
+       /* Find the protocol with tid count > 0 for this segment.
+          Note: there can only be one and this is already validated.
+        */
+       for (i = 0; i < MAX_CONN_TYPES; i++) {
+               if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+                       return &p_cfg->conn_cfg[i].tid_seg[seg];
+       }
+       return OSAL_NULL;
+}
+
+/* set the iids (cid/tid) count per protocol */
+void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                  enum protocol_type type,
+                                  u32 cid_count, u32 vf_cid_cnt)
+{
+       struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+       struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+       p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
+       p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
+
+       if (type == PROTOCOLID_ROCE) {
+               u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
+               u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
+               u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+
+               p_conn->cid_count = ROUNDUP(p_conn->cid_count, elems_per_page);
+       }
+}
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type, u32 *vf_cid)
+{
+       if (vf_cid)
+               *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+       return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type)
+{
+       return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+static u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+                                        enum protocol_type type)
+{
+       u32 cnt = 0;
+       int i;
+
+       for (i = 0; i < TASK_SEGMENTS; i++)
+               cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+       return cnt;
+}
+
+static OSAL_INLINE void
+ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
+                             enum protocol_type proto,
+                             u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+       p_seg->count = count;
+       p_seg->has_fl_mem = has_fl;
+       p_seg->type = seg_type;
+}
+
+/* the *p_line parameter must be either 0 for the first invocation or the
+   value returned in the previous invocation.
+ */
+static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
+                                  struct ecore_ilt_cli_blk *p_blk,
+                                  u32 start_line,
+                                  u32 total_size, u32 elem_size)
+{
+       u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+       /* verfiy called once for each block */
+       if (p_blk->total_size)
+               return;
+
+       p_blk->total_size = total_size;
+       p_blk->real_size_in_page = 0;
+       if (elem_size)
+               p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+       p_blk->start_line = start_line;
+}
+
+static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ilt_client_cfg *p_cli,
+                                  struct ecore_ilt_cli_blk *p_blk,
+                                  u32 *p_line, enum ilt_clients client_id)
+{
+       if (!p_blk->total_size)
+               return;
+
+       if (!p_cli->active)
+               p_cli->first.val = *p_line;
+
+       p_cli->active = true;
+       *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+       p_cli->last.val = *p_line - 1;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x 
[Real %08x] Start line %d\n",
+                  client_id, p_cli->first.val, p_cli->last.val,
+                  p_blk->total_size, p_blk->real_size_in_page,
+                  p_blk->start_line);
+}
+
+static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
+                                         enum ilt_clients ilt_client)
+{
+       u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+       struct ecore_ilt_client_cfg *p_cli;
+       u32 lines_to_skip = 0;
+       u32 cxts_per_p;
+
+       /* TBD MK: ILT code should be simplified once PROTO enum is changed */
+
+       if (ilt_client == ILT_CLI_CDUC) {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+               cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+                   (u32) CONN_CXT_SIZE(p_hwfn);
+
+               lines_to_skip = cid_count / cxts_per_p;
+       }
+
+       return lines_to_skip;
+}
+
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 curr_line, total, i, task_size, line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_blk;
+       struct ecore_cdu_iids cdu_iids;
+       struct ecore_src_iids src_iids;
+       struct ecore_qm_iids qm_iids;
+       struct ecore_tm_iids tm_iids;
+       struct ecore_tid_seg *p_seg;
+
+       OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
+       OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
+       OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+       OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+
+       p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "hwfn [%d] - Set context manager starting line to be 
0x%08x\n",
+                  p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+       /* CDUC */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUC];
+
+       curr_line = p_mngr->pf_start_line;
+
+       /* CDUC PF */
+       p_cli->pf_total_lines = 0;
+
+       /* get the counters for the CDUC,CDUC and QM clients  */
+       ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+       p_blk = &p_cli->pf_blks[CDUC_BLK];
+
+       total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                              total, CONN_CXT_SIZE(p_hwfn));
+
+       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
+                                                                ILT_CLI_CDUC);
+
+       /* CDUC VF */
+       p_blk = &p_cli->vf_blks[CDUC_BLK];
+       total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+       ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                              total, CONN_CXT_SIZE(p_hwfn));
+
+       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+       p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+       for (i = 1; i < p_mngr->vf_count; i++)
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUC);
+
+       /* CDUT PF */
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       p_cli->first.val = curr_line;
+
+       /* first the 'working' task memory */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+                                      p_mngr->task_type_size[p_seg->type]);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUT);
+       }
+
+       /* next the 'init' task memory (forced load memory) */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg || p_seg->count == 0)
+                       continue;
+
+               p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+               if (!p_seg->has_fl_mem) {
+                       /* The segment is active (total size pf 'working'
+                        * memory is > 0) but has no FL (forced-load, Init)
+                        * memory. Thus:
+                        *
+                        * 1.   The total-size in the corrsponding FL block of
+                        *      the ILT client is set to 0 - No ILT line are
+                        *      provisioned and no ILT memory allocated.
+                        *
+                        * 2.   The start-line of said block is set to the
+                        *      start line of the matching working memory
+                        *      block in the ILT client. This is later used to
+                        *      configure the CDU segment offset registers and
+                        *      results in an FL command for TIDs of this
+                        *      segement behaves as regular load commands
+                        *      (loading TIDs from the working memory).
+                        */
+                       line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+                       ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+                       continue;
+               }
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+               ecore_ilt_cli_blk_fill(p_cli, p_blk,
+                                      curr_line, total,
+                                      p_mngr->task_type_size[p_seg->type]);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUT);
+       }
+       p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+       /* CDUT VF */
+       p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+       if (p_seg && p_seg->count) {
+               /* Stricly speaking we need to iterate over all VF
+                * task segment types, but a VF has only 1 segment
+                */
+
+               /* 'working' memory */
+               total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+               p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk,
+                                      curr_line, total,
+                                      p_mngr->task_type_size[p_seg->type]);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_CDUT);
+
+               /* 'init' memory */
+               p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+               if (!p_seg->has_fl_mem) {
+                       /* see comment above */
+                       line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+                       ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+               } else {
+                       task_size = p_mngr->task_type_size[p_seg->type];
+                       ecore_ilt_cli_blk_fill(p_cli, p_blk,
+                                              curr_line, total, task_size);
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_CDUT);
+               }
+               p_cli->vf_total_lines = curr_line -
+                   p_cli->vf_blks[0].start_line;
+
+               /* Now for the rest of the VFs */
+               for (i = 1; i < p_mngr->vf_count; i++) {
+                       p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_CDUT);
+
+                       p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_CDUT);
+               }
+       }
+
+       /* QM */
+       p_cli = &p_mngr->clients[ILT_CLI_QM];
+       p_blk = &p_cli->pf_blks[0];
+
+       ecore_cxt_qm_iids(p_hwfn, &qm_iids);
+       total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+                                    qm_iids.vf_cids, qm_iids.tids,
+                                    p_hwfn->qm_info.num_pqs,
+                                    p_hwfn->qm_info.num_vf_pqs);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
+                  " num_vf_pqs=%d, memory_size=%d)\n",
+                  qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
+                  p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+       ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
+                              QM_PQ_ELEMENT_SIZE);
+
+       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+       p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+       /* SRC */
+       p_cli = &p_mngr->clients[ILT_CLI_SRC];
+       ecore_cxt_src_iids(p_mngr, &src_iids);
+
+       /* Both the PF and VFs searcher connections are stored in the per PF
+        * database. Thus sum the PF searcher cids and all the VFs searcher
+        * cids.
+        */
+       total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       if (total) {
+               u32 local_max = OSAL_MAX_T(u32, total,
+                                          SRC_MIN_NUM_ELEMS);
+
+               total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
+
+               p_blk = &p_cli->pf_blks[0];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                      total * sizeof(struct src_ent),
+                                      sizeof(struct src_ent));
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_SRC);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       /* TM PF */
+       p_cli = &p_mngr->clients[ILT_CLI_TM];
+       ecore_cxt_tm_iids(p_mngr, &tm_iids);
+       total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+       if (total) {
+               p_blk = &p_cli->pf_blks[0];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_TM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+       }
+
+       /* TM VF */
+       total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+       if (total) {
+               p_blk = &p_cli->vf_blks[0];
+               ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+                                      total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+               ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                      ILT_CLI_TM);
+               p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+               for (i = 1; i < p_mngr->vf_count; i++) {
+                       ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+                                              ILT_CLI_TM);
+               }
+       }
+
+       if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+           RESC_NUM(p_hwfn, ECORE_ILT)) {
+               DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
+                      curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 i;
+
+       if (!p_mngr->t2)
+               return;
+
+       for (i = 0; i < p_mngr->t2_num_pages; i++)
+               if (p_mngr->t2[i].p_virt)
+                       OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                              p_mngr->t2[i].p_virt,
+                                              p_mngr->t2[i].p_phys,
+                                              p_mngr->t2[i].size);
+
+       OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
+       p_mngr->t2 = OSAL_NULL;
+}
+
+static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_num, total_size, ent_per_page, psz, i;
+       struct ecore_ilt_client_cfg *p_src;
+       struct ecore_src_iids src_iids;
+       struct ecore_dma_mem *p_t2;
+       enum _ecore_status_t rc;
+
+       OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+
+       /* if the SRC ILT client is inactive - there are no connection
+        * requiring the searcer, leave.
+        */
+       p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+       if (!p_src->active)
+               return ECORE_SUCCESS;
+
+       ecore_cxt_src_iids(p_mngr, &src_iids);
+       conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       total_size = conn_num * sizeof(struct src_ent);
+
+       /* use the same page size as the SRC ILT client */
+       psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+       p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+       /* allocate t2 */
+       p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                p_mngr->t2_num_pages *
+                                sizeof(struct ecore_dma_mem));
+       if (!p_mngr->t2) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+               rc = ECORE_NOMEM;
+               goto t2_fail;
+       }
+
+       /* allocate t2 pages */
+       for (i = 0; i < p_mngr->t2_num_pages; i++) {
+               u32 size = OSAL_MIN_T(u32, total_size, psz);
+               void **p_virt = &p_mngr->t2[i].p_virt;
+
+               *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                                 &p_mngr->t2[i].p_phys, size);
+               if (!p_mngr->t2[i].p_virt) {
+                       rc = ECORE_NOMEM;
+                       goto t2_fail;
+               }
+               OSAL_MEM_ZERO(*p_virt, size);
+               p_mngr->t2[i].size = size;
+               total_size -= size;
+       }
+
+       /* Set the t2 pointers */
+
+       /* entries per page - must be a power of two */
+       ent_per_page = psz / sizeof(struct src_ent);
+
+       p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+
+       p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+       p_mngr->last_free = (u64) p_t2->p_phys +
+           ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+       for (i = 0; i < p_mngr->t2_num_pages; i++) {
+               u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
+               struct src_ent *entries = p_mngr->t2[i].p_virt;
+               u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+               u32 j;
+
+               for (j = 0; j < ent_num - 1; j++) {
+                       val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+                       entries[j].next = OSAL_CPU_TO_BE64(val);
+               }
+
+               if (i < p_mngr->t2_num_pages - 1)
+                       val = (u64) p_mngr->t2[i + 1].p_phys;
+               else
+                       val = 0;
+               entries[j].next = OSAL_CPU_TO_BE64(val);
+
+               conn_num -= ent_per_page;
+       }
+
+       return ECORE_SUCCESS;
+
+t2_fail:
+       ecore_cxt_src_t2_free(p_hwfn);
+       return rc;
+}
+
+/* Total number of ILT lines used by this PF */
+static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
+{
+       u32 size = 0;
+       u32 i;
+
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!ilt_clients[i].active) {
+                       continue;
+               } else
+                       size += (ilt_clients[i].last.val -
+                               ilt_clients[i].first.val + 1);
+
+       return size;
+}
+
+static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 ilt_size, i;
+
+       ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
+
+       for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+               struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
+
+               if (p_dma->p_virt)
+                       OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                              p_dma->p_virt,
+                                              p_dma->p_phys, p_dma->size);
+               p_dma->p_virt = OSAL_NULL;
+       }
+       OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+}
+
+static enum _ecore_status_t
+ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ilt_cli_blk *p_blk,
+                   enum ilt_clients ilt_client, u32 start_line_offset)
+{
+       struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+       u32 lines, line, sz_left, lines_to_skip = 0;
+
+       /* Special handling for RoCE that supports dynamic allocation */
+       if ((p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) &&
+           (ilt_client == ILT_CLI_CDUT))
+               return ECORE_SUCCESS;
+
+       lines_to_skip = p_blk->dynamic_line_cnt;
+
+       if (!p_blk->total_size)
+               return ECORE_SUCCESS;
+
+       sz_left = p_blk->total_size;
+       lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+       line = p_blk->start_line + start_line_offset -
+           p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+       for (; lines; lines--) {
+               dma_addr_t p_phys;
+               void *p_virt;
+               u32 size;
+
+               size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
+
+/* @DPDK */
+#define ILT_BLOCK_ALIGN_SIZE 0x1000
+               p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
+                                                        &p_phys, size,
+                                                        ILT_BLOCK_ALIGN_SIZE);
+               if (!p_virt)
+                       return ECORE_NOMEM;
+               OSAL_MEM_ZERO(p_virt, size);
+
+               ilt_shadow[line].p_phys = p_phys;
+               ilt_shadow[line].p_virt = p_virt;
+               ilt_shadow[line].size = size;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                          "ILT shadow: Line [%d] Physical 0x%lx"
+                          "Virtual %p Size %d\n",
+                          line, (u64) p_phys, p_virt, size);
+
+               sz_left -= size;
+               line++;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_ilt_client_cfg *clients = p_mngr->clients;
+       struct ecore_ilt_cli_blk *p_blk;
+       enum _ecore_status_t rc;
+       u32 size, i, j, k;
+
+       size = ecore_cxt_ilt_shadow_size(clients);
+       p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                        size * sizeof(struct ecore_dma_mem));
+
+       if (!p_mngr->ilt_shadow) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table");
+               rc = ECORE_NOMEM;
+               goto ilt_shadow_fail;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                  "Allocated 0x%x bytes for ilt shadow\n",
+                  (u32) (size * sizeof(struct ecore_dma_mem)));
+
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!clients[i].active) {
+                       continue;
+               } else {
+               for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+                       p_blk = &clients[i].pf_blks[j];
+                       rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+                       if (rc != ECORE_SUCCESS)
+                               goto ilt_shadow_fail;
+               }
+               for (k = 0; k < p_mngr->vf_count; k++) {
+                       for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+                               u32 lines = clients[i].vf_total_lines * k;
+
+                               p_blk = &clients[i].vf_blks[j];
+                               rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
+                                                        i, lines);
+                               if (rc != ECORE_SUCCESS)
+                                       goto ilt_shadow_fail;
+                       }
+               }
+       }
+
+       return ECORE_SUCCESS;
+
+ilt_shadow_fail:
+       ecore_ilt_shadow_free(p_hwfn);
+       return rc;
+}
+
+static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+               p_mngr->acquired[type].max_count = 0;
+               p_mngr->acquired[type].start_cid = 0;
+       }
+}
+
+static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 start_cid = 0;
+       u32 type;
+
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+               u32 size;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
+               p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
+                                                            GFP_KERNEL, size);
+               if (!p_mngr->acquired[type].cid_map)
+                       goto cid_map_fail;
+
+               p_mngr->acquired[type].max_count = cid_cnt;
+               p_mngr->acquired[type].start_cid = start_cid;
+
+               p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+                          "Type %08x start: %08x count %08x\n",
+                          type, p_mngr->acquired[type].start_cid,
+                          p_mngr->acquired[type].max_count);
+               start_cid += cid_cnt;
+       }
+
+       return ECORE_SUCCESS;
+
+cid_map_fail:
+       ecore_cid_map_free(p_hwfn);
+       return ECORE_NOMEM;
+}
+
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr;
+       u32 i;
+
+       p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
+       if (!p_mngr) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_cxt_mngr'\n");
+               return ECORE_NOMEM;
+       }
+
+       /* Initialize ILT client registers */
+       p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+       p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+       p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+       p_mngr->clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+       p_mngr->clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+       p_mngr->clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+       p_mngr->clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+       p_mngr->clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+       /* default ILT page size for all clients is 32K */
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+       /* Initialize task sizes */
+#if 0
+       p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
+       p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
+#endif
+       /* due to removal of ISCSI/FCoE files union type0_task_context
+        * task_type_size will be 0. So hardcoded for now.
+        */
+       p_mngr->task_type_size[0] = 512;        /* @DPDK */
+       p_mngr->task_type_size[1] = 128;        /* @DPDK */
+
+       p_mngr->vf_count = p_hwfn->p_dev->sriov_info.total_vfs;
+       /* Set the cxt mangr pointer priori to further allocations */
+       p_hwfn->p_cxt_mngr = p_mngr;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc;
+
+       /* Allocate the ILT shadow table */
+       rc = ecore_ilt_shadow_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate the T2  table */
+       rc = ecore_cxt_src_t2_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+               goto tables_alloc_fail;
+       }
+
+       /* Allocate and initalize the acquired cids bitmaps */
+       rc = ecore_cid_map_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+               goto tables_alloc_fail;
+       }
+
+       return ECORE_SUCCESS;
+
+tables_alloc_fail:
+       ecore_cxt_mngr_free(p_hwfn);
+       return rc;
+}
+
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_cxt_mngr)
+               return;
+
+       ecore_cid_map_free(p_hwfn);
+       ecore_cxt_src_t2_free(p_hwfn);
+       ecore_ilt_shadow_free(p_hwfn);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
+
+       p_hwfn->p_cxt_mngr = OSAL_NULL;
+}
+
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       int type;
+
+       /* Reset acquired cids */
+       for (type = 0; type < MAX_CONN_TYPES; type++) {
+               u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+               u32 i;
+
+               if (cid_cnt == 0)
+                       continue;
+
+               for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
+                       p_mngr->acquired[type].cid_map[i] = 0;
+       }
+}
+
+/* HW initialization helper (per Block, per phase) */
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT                                            \
+       CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK                                             \
+       (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT                                         \
+       CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK                                          \
+       (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT                                                        
\
+       CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK                                                 \
+       (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT                                      \
+       CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK                                       \
+       (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >>                         \
+       CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT                                   \
+       CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK                                    \
+       (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >>                  \
+       CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT                                          \
+       CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK                                           \
+       (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >>                \
+       CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT                                      \
+       CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK                                       \
+       (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >>                         \
+       CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT                                   \
+       CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK                                    \
+       (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >>                  \
+       CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT                                          \
+       CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK                                           \
+       (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >>                \
+       CDUT_TYPE1_NCIB_SHIFT)
+
+static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
+{
+       u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+       /* CDUC - connection configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+       cxt_size = CONN_CXT_SIZE(p_hwfn);
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+       SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+       SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+       /* CDUT - type-0 tasks configuration */
+       page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+       cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       /* cxt size and block-waste are multipes of 8 */
+       cdu_params = 0;
+       SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+       /* CDUT - type-1 tasks configuration */
+       cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+       elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+       block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+       /* cxt size and block-waste are multipes of 8 */
+       cdu_params = 0;
+       SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+       SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+       STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT         CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK          0x1
+#define CDU_SEG_REG_OFFSET_SHIFT       0
+#define CDU_SEG_REG_OFFSET_MASK                
CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_tid_seg *p_seg;
+       u32 cdu_seg_params, offset;
+       int i;
+
+       static const u32 rt_type_offset_arr[] = {
+               CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+       };
+
+       static const u32 rt_type_offset_fl_arr[] = {
+               CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+               CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+       };
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+       /* There are initializations only for CDUT during pf Phase */
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               /* Segment 0 */
+               p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
+               if (!p_seg)
+                       continue;
+
+               /* Note: start_line is already adjusted for the CDU
+                * segment register granularity, so we just need to
+                * divide. Adjustment is implicit as we assume ILT
+                * Page size is larger than 32K!
+                */
+               offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+                         (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+                          p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+               cdu_seg_params = 0;
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+               STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+               offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+                         (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+                          p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+               cdu_seg_params = 0;
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+               SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+               STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+
+       }
+}
+
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       struct ecore_qm_iids iids;
+
+       OSAL_MEM_ZERO(&iids, sizeof(iids));
+       ecore_cxt_qm_iids(p_hwfn, &iids);
+
+       ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+                           p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+                           p_hwfn->first_on_engine,
+                           iids.cids, iids.vf_cids, iids.tids,
+                           qm_info->start_pq,
+                           qm_info->num_pqs - qm_info->num_vf_pqs,
+                           qm_info->num_vf_pqs,
+                           qm_info->start_vport,
+                           qm_info->num_vports, qm_info->pf_wfq,
+                           qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+                           p_hwfn->qm_info.qm_vport_params);
+}
+
+/* CM PF */
+static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       union ecore_qm_pq_params pq_params;
+       u16 pq;
+
+       /* XCM pure-LB queue */
+       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
+
+       return ECORE_SUCCESS;
+}
+
+/* DQ PF */
+static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+       dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+       dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+       /* Connection types 6 & 7 are not in use, yet they must be configured
+        * as the highest possible connection. Not configuring them means the
+        * defaults will be  used, and with a large number of cids a bug may
+        * occur, if the defaults will be smaller than dq_pf_max_cid /
+        * dq_vf_max_cid.
+        */
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+       STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+       STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *ilt_clients;
+       int i;
+
+       ilt_clients = p_hwfn->p_cxt_mngr->clients;
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!ilt_clients[i].active) {
+                       continue;
+               } else {
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].first.reg,
+                            ilt_clients[i].first.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].last.reg, ilt_clients[i].last.val);
+               STORE_RT_REG(p_hwfn,
+                            ilt_clients[i].p_size.reg,
+                            ilt_clients[i].p_size.val);
+       }
+}
+
+static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *p_cli;
+       u32 blk_factor;
+
+       /* For simplicty  we set the 'block' to be an ILT page */
+       STORE_RT_REG(p_hwfn,
+                    PSWRQ2_REG_VF_BASE_RT_OFFSET,
+                    p_hwfn->hw_info.first_vf_in_pf);
+       STORE_RT_REG(p_hwfn,
+                    PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+                    p_hwfn->hw_info.first_vf_in_pf +
+                    p_hwfn->p_dev->sriov_info.total_vfs);
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+       blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+                            blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+       blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+                            blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+
+       p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+       blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+       if (p_cli->active) {
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+                            p_cli->pf_total_lines);
+               STORE_RT_REG(p_hwfn,
+                            PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+                            p_cli->vf_total_lines);
+       }
+}
+
+/* ILT (PSWRQ2) PF */
+static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ilt_client_cfg *clients;
+       struct ecore_cxt_mngr *p_mngr;
+       struct ecore_dma_mem *p_shdw;
+       u32 line, rt_offst, i;
+
+       ecore_ilt_bounds_init(p_hwfn);
+       ecore_ilt_vf_bounds_init(p_hwfn);
+
+       p_mngr = p_hwfn->p_cxt_mngr;
+       p_shdw = p_mngr->ilt_shadow;
+       clients = p_hwfn->p_cxt_mngr->clients;
+
+       for (i = 0; i < ILT_CLI_MAX; i++)
+               if (!clients[i].active) {
+                       continue;
+               } else {
+               /* Client's 1st val and RT array are absolute, ILT shadows'
+                * lines are relative.
+                */
+               line = clients[i].first.val - p_mngr->pf_start_line;
+               rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+                   clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+               for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+                    line++, rt_offst += ILT_ENTRY_IN_REGS) {
+                       u64 ilt_hw_entry = 0;
+
+                       /** p_virt could be OSAL_NULL incase of dynamic
+                        *  allocation
+                        */
+                       if (p_shdw[line].p_virt != OSAL_NULL) {
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+                               SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+                                         (p_shdw[line].p_phys >> 12));
+
+                               DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+                                       "Setting RT[0x%08x] from"
+                                       " ILT[0x%08x] [Client is %d] to"
+                                       " Physical addr: 0x%lx\n",
+                                       rt_offst, line, i,
+                                       (u64) (p_shdw[line].p_phys >> 12));
+                       }
+
+                       STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+               }
+       }
+}
+
+/* SRC (Searcher) PF */
+static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rounded_conn_num, conn_num, conn_max;
+       struct ecore_src_iids src_iids;
+
+       OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
+       ecore_cxt_src_iids(p_mngr, &src_iids);
+       conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+       if (!conn_num)
+               return;
+
+       conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
+       rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
+
+       STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+       STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+                    OSAL_LOG2(rounded_conn_num));
+
+       STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+                        p_hwfn->p_cxt_mngr->first_free);
+       STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+                        p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT           0
+#define TM_CFG_NUM_IDS_MASK            0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT   16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK    0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT         25
+#define TM_CFG_PARENT_PF_MASK          0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK  0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT                30
+#define TM_CFG_TID_OFFSET_MASK         0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK  0x1FFULL
+
+static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 active_seg_mask = 0, tm_offset, rt_reg;
+       struct ecore_tm_iids tm_iids;
+       u64 cfg_word;
+       u8 i;
+
+       OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
+       ecore_cxt_tm_iids(p_mngr, &tm_iids);
+
+       /* @@@TBD No pre-scan for now */
+
+       /* Note: We assume consecutive VFs for a PF */
+       for (i = 0; i < p_mngr->vf_count; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+               SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+
+               rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (p_hwfn->hw_info.first_vf_in_pf + i);
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+       }
+
+       cfg_word = 0;
+       SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+       SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+       SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);       /* n/a for PF */
+       SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+
+       rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+           (sizeof(cfg_word) / sizeof(u32)) *
+           (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
+       STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+       /* enale scan */
+       STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+                    tm_iids.pf_cids ? 0x1 : 0x0);
+
+       /* @@@TBD how to enable the scan for the VFs */
+
+       tm_offset = tm_iids.per_vf_cids;
+
+       /* Note: We assume consecutive VFs for a PF */
+       for (i = 0; i < p_mngr->vf_count; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+               SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+               SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+               rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (p_hwfn->hw_info.first_vf_in_pf + i);
+
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+       }
+
+       tm_offset = tm_iids.pf_cids;
+       for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+               cfg_word = 0;
+               SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+               SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+               SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+               SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+               SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+               rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+                   (sizeof(cfg_word) / sizeof(u32)) *
+                   (NUM_OF_VFS(p_hwfn->p_dev) +
+                    p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+               STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+               active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+               tm_offset += tm_iids.pf_tids[i];
+       }
+
+       if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE)
+               active_seg_mask = 0;
+
+       STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+       /* @@@TBD how to enable the scan for the VFs */
+}
+
+static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
+{
+}
+
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
+{
+       /* CDU configuration */
+       ecore_cdu_init_common(p_hwfn);
+       ecore_prs_init_common(p_hwfn);
+}
+
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+{
+       ecore_qm_init_pf(p_hwfn);
+       ecore_cm_init_pf(p_hwfn);
+       ecore_dq_init_pf(p_hwfn);
+       ecore_cdu_init_pf(p_hwfn);
+       ecore_ilt_init_pf(p_hwfn);
+       ecore_src_init_pf(p_hwfn);
+       ecore_tm_init_pf(p_hwfn);
+}
+
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+                                          enum protocol_type type, u32 *p_cid)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 rel_cid;
+
+       if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+               DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+               return ECORE_INVAL;
+       }
+
+       rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
+                                          p_mngr->acquired[type].max_count);
+
+       if (rel_cid >= p_mngr->acquired[type].max_count) {
+               DP_NOTICE(p_hwfn, false, "no CID available for protocol %d",
+                         type);
+               return ECORE_NORESOURCES;
+       }
+
+       OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+
+       *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+
+       return ECORE_SUCCESS;
+}
+
+static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
+                                       u32 cid, enum protocol_type *p_type)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_cid_acquired_map *p_map;
+       enum protocol_type p;
+       u32 rel_cid;
+
+       /* Iterate over protocols and find matching cid range */
+       for (p = 0; p < MAX_CONN_TYPES; p++) {
+               p_map = &p_mngr->acquired[p];
+
+               if (!p_map->cid_map)
+                       continue;
+               if (cid >= p_map->start_cid &&
+                   cid < p_map->start_cid + p_map->max_count) {
+                       break;
+               }
+       }
+       *p_type = p;
+
+       if (p == MAX_CONN_TYPES) {
+               DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
+               return false;
+       }
+       rel_cid = cid - p_map->start_cid;
+       if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
+               DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
+               return false;
+       }
+       return true;
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       enum protocol_type type;
+       bool b_acquired;
+       u32 rel_cid;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
+
+       if (!b_acquired)
+               return;
+
+       rel_cid = cid - p_mngr->acquired[type].start_cid;
+       OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+}
+
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_cxt_info *p_info)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+       enum protocol_type type;
+       bool b_acquired;
+
+       /* Test acquired and find matching per-protocol map */
+       b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+
+       if (!b_acquired)
+               return ECORE_INVAL;
+
+       /* set the protocl type */
+       p_info->type = type;
+
+       /* compute context virtual pointer */
+       hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+       conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+       cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+       line = p_info->iid / cxts_per_p;
+
+       /* Make sure context is allocated (dynamic allocation) */
+       if (!p_mngr->ilt_shadow[line].p_virt)
+               return ECORE_INVAL;
+
+       p_info->p_cxt = (u8 *) p_mngr->ilt_shadow[line].p_virt +
+           p_info->iid % cxts_per_p * conn_cxt_size;
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
+               "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+               (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
+{
+       /* Set the number of required CORE connections */
+       u32 core_cids = 1;      /* SPQ */
+
+       if (p_hwfn->using_ll2)
+               core_cids += 4; /* @@@TBD Use the proper #define */
+
+       ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ETH_ROCE:
+               {
+                       struct ecore_roce_pf_params *p_params =
+                           &p_hwfn->pf_params.roce_pf_params;
+
+                       if (p_params->num_cons && p_params->num_tasks) {
+                               ecore_cxt_set_proto_cid_count(p_hwfn,
+                                                             PROTOCOLID_ROCE,
+                                                             p_params->
+                                                             num_cons, 0);
+
+                               ecore_cxt_set_proto_tid_count(p_hwfn,
+                                       PROTOCOLID_ROCE,
+                                       ECORE_CXT_ROCE_TID_SEG, 1,
+                                       /* roce segment type */
+                                       p_params->num_tasks, false);
+                       } else {
+                               DP_INFO(p_hwfn->p_dev,
+                                       "Roce personality used without"
+                                       " setting params!\n");
+                       }
+                       /* no need for break since roce coexist with ethernet */
+               }
+       case ECORE_PCI_ETH:
+               {
+                       struct ecore_eth_pf_params *p_params =
+                           &p_hwfn->pf_params.eth_pf_params;
+
+                       ecore_cxt_set_proto_cid_count(p_hwfn,
+                               PROTOCOLID_ETH,
+                               p_params->num_cons, 1); /* FIXME VF count... */
+
+                       break;
+               }
+       case ECORE_PCI_ISCSI:
+               {
+                       struct ecore_iscsi_pf_params *p_params;
+
+                       p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+                       if (p_params->num_cons && p_params->num_tasks) {
+                               ecore_cxt_set_proto_cid_count(p_hwfn,
+                                                             PROTOCOLID_ISCSI,
+                                                             p_params->
+                                                             num_cons, 0);
+
+                               ecore_cxt_set_proto_tid_count(p_hwfn,
+                                       PROTOCOLID_ISCSI,
+                                       ECORE_CXT_ISCSI_TID_SEG, 0,
+                                       /* segment type */
+                                       p_params->num_tasks, true);
+                       } else {
+                               DP_INFO(p_hwfn->p_dev,
+                                       "Iscsi personality used without"
+                                       " setting params!\n");
+                       }
+                       break;
+               }
+       default:
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_tid_mem *p_info)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       u32 proto, seg, total_lines, i, shadow_line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_fl_seg;
+       struct ecore_tid_seg *p_seg_info;
+
+       /* Verify the personality */
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ISCSI:
+               proto = PROTOCOLID_ISCSI;
+               seg = ECORE_CXT_ISCSI_TID_SEG;
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       if (!p_cli->active)
+               return ECORE_INVAL;
+
+       p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+       if (!p_seg_info->has_fl_mem)
+               return ECORE_INVAL;
+
+       p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+       total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+                                  p_fl_seg->real_size_in_page);
+
+       for (i = 0; i < total_lines; i++) {
+               shadow_line = i + p_fl_seg->start_line -
+                   p_hwfn->p_cxt_mngr->pf_start_line;
+               p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+       }
+       p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+           p_fl_seg->real_size_in_page;
+       p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+       p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+           p_info->tid_size;
+
+       return ECORE_SUCCESS;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+                           enum ecore_cxt_elem_type elem_type, u32 iid)
+{
+       u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_blk;
+       struct ecore_ptt *p_ptt;
+       dma_addr_t p_phys;
+       u64 ilt_hw_entry;
+       void *p_virt;
+
+       if (elem_type == ECORE_ELEM_CXT) {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+               elem_size = CONN_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUC_BLK];
+       } else {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+               elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+       }
+
+       /* Calculate line in ilt */
+       hw_p_size = p_cli->p_size.val;
+       elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+       line = p_blk->start_line + (iid / elems_per_p);
+       shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+       /* If line is already allocated, do nothing, otherwise allocate it and
+        * write it to the PSWRQ2 registers
+        */
+       if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+               return ECORE_SUCCESS;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn, false,
+                         "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+               return ECORE_TIMEOUT;
+       }
+
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                        &p_phys, p_blk->real_size_in_page);
+       if (!p_virt) {
+               ecore_ptt_release(p_hwfn, p_ptt);
+               return ECORE_NOMEM;
+       }
+       OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
+
+       p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+       p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+       p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+           p_blk->real_size_in_page;
+
+       /* compute absolute offset */
+       reg_offset = PSWRQ2_REG_ILT_MEMORY +
+           (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+       ilt_hw_entry = 0;
+       SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+       SET_FIELD(ilt_hw_entry,
+                 ILT_ENTRY_PHY_ADDR,
+                 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+       /* writing - 32 lsb bits to low rt offset, 32 msb to high rt offset */
+       ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry));
+       ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES,
+                U64_HI(ilt_hw_entry));
+
+       if (elem_type == ECORE_ELEM_CXT) {
+               u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+                   elems_per_p;
+
+               /* Update the relevant register in the parser */
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+                        last_cid_allocated - 1);
+
+               if (!p_hwfn->b_roce_enabled_in_prs) {
+                       /* Enable RoCE search */
+                       ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 1);
+                       p_hwfn->b_roce_enabled_in_prs = true;
+               }
+       }
+
+       ecore_ptt_release(p_hwfn, p_ptt);
+       return ECORE_SUCCESS;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static enum _ecore_status_t
+ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
+                        enum ecore_cxt_elem_type elem_type,
+                        u32 start_iid, u32 count)
+{
+       u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+       u32 start_line, end_line, shadow_start_line, shadow_end_line;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_blk;
+       u32 end_iid = start_iid + count;
+       struct ecore_ptt *p_ptt;
+       u64 ilt_hw_entry = 0;
+       u32 i;
+
+       if (elem_type == ECORE_ELEM_CXT) {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+               elem_size = CONN_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUC_BLK];
+       } else {
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+               elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+               p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+       }
+
+       /* Calculate line in ilt */
+       hw_p_size = p_cli->p_size.val;
+       elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+       start_line = p_blk->start_line + (start_iid / elems_per_p);
+       end_line = p_blk->start_line + (end_iid / elems_per_p);
+       if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+               end_line--;
+
+       shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+       shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               DP_NOTICE(p_hwfn, false,
+                         "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+               return ECORE_TIMEOUT;
+       }
+
+       for (i = shadow_start_line; i < shadow_end_line; i++) {
+               if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+                       continue;
+
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+                                      p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
+                                      p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
+
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+               p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+               /* compute absolute offset */
+               reg_offset = PSWRQ2_REG_ILT_MEMORY +
+                   ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+                    ILT_ENTRY_IN_REGS);
+
+               ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry));
+               ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES,
+                        U64_HI(ilt_hw_entry));
+       }
+
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+                                             enum protocol_type proto)
+{
+       enum _ecore_status_t rc;
+       u32 cid;
+
+       /* Free Connection CXT */
+       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
+                                     ecore_cxt_get_proto_cid_start(p_hwfn,
+                                                                   proto),
+                                     ecore_cxt_get_proto_cid_count(p_hwfn,
+                                                                   proto,
+                                                                   &cid));
+
+       if (rc)
+               return rc;
+
+       /* Free Task CXT */
+       rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
+                                     ecore_cxt_get_proto_tid_count(p_hwfn,
+                                                                   proto));
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+                                           u32 tid,
+                                           u8 ctx_type, void **pp_task_ctx)
+{
+       struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+       struct ecore_ilt_client_cfg *p_cli;
+       struct ecore_ilt_cli_blk *p_seg;
+       struct ecore_tid_seg *p_seg_info;
+       u32 proto, seg;
+       u32 total_lines;
+       u32 tid_size, ilt_idx;
+       u32 num_tids_per_block;
+
+       /* Verify the personality */
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ISCSI:
+               proto = PROTOCOLID_ISCSI;
+               seg = ECORE_CXT_ISCSI_TID_SEG;
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+       if (!p_cli->active)
+               return ECORE_INVAL;
+
+       p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+       if (ctx_type == ECORE_CTX_WORKING_MEM) {
+               p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+       } else if (ctx_type == ECORE_CTX_FL_MEM) {
+               if (!p_seg_info->has_fl_mem)
+                       return ECORE_INVAL;
+               p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+       } else {
+               return ECORE_INVAL;
+       }
+       total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+       tid_size = p_mngr->task_type_size[p_seg_info->type];
+       num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+       if (total_lines < tid / num_tids_per_block)
+               return ECORE_INVAL;
+
+       ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+           p_mngr->pf_start_line;
+       *pp_task_ctx = (u8 *) p_mngr->ilt_shadow[ilt_idx].p_virt +
+           (tid % num_tids_per_block) * tid_size;
+
+       return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/ecore/ecore_cxt.h 
b/drivers/net/qede/ecore/ecore_cxt.h
new file mode 100644
index 0000000..5b0af18
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_cxt.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_CID_
+#define _ECORE_CID_
+
+#include "ecore_hsi_common.h"
+#include "ecore_proto_if.h"
+#include "ecore_cxt_api.h"
+
+/* Tasks segments definitions  */
+#define ECORE_CXT_ISCSI_TID_SEG                        PROTOCOLID_ISCSI        
/* 0 */
+#define ECORE_CXT_ROCE_TID_SEG                 PROTOCOLID_ROCE /* 2 */
+
+enum ecore_cxt_elem_type {
+       ECORE_ELEM_CXT,
+       ECORE_ELEM_TASK
+};
+
+u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type, u32 *vf_cid);
+
+u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
+                                 enum protocol_type type);
+
+/**
+ * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
+ *
+ * @param p_hwfn
+ * @param iids [out], a structure holding all the counters
+ */
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids);
+
+/**
+ * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_set_proto_cid_count - Set the max cids per protocol for 
cxt init
+ *
+ * @param p_hwfn
+ * @param type
+ * @param cid_cnt - number of pf cids
+ * @param vf_cid_cnt - number of vf cids
+ */
+void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+                                  enum protocol_type type,
+                                  u32 cid_cnt, u32 vf_cid_cnt);
+/**
+ * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_free
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired 
map
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per 
path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ *
+ * @param p_hwfn
+ */
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
+ *
+ * @param p_hwfn
+ */
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+
+ /**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_cxt_release - Release a cid
+*
+* @param p_hwfn
+* @param cid
+*/
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_get_tid_mem_info - function checks if the
+ *        page containing the iid in the ilt is already
+ *        allocated, if it is not it allocates the page.
+ *
+ * @param p_hwfn
+ * @param elem_type
+ * @param iid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+                           enum ecore_cxt_elem_type elem_type, u32 iid);
+
+/**
+ * @brief ecore_cxt_free_proto_ilt - function frees ilt pages
+ *        associated with the protocol passed.
+ *
+ * @param p_hwfn
+ * @param proto
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
+                                             enum protocol_type proto);
+
+#define ECORE_CTX_WORKING_MEM 0
+#define ECORE_CTX_FL_MEM 1
+enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
+                                           u32 tid,
+                                           u8 ctx_type, void **task_ctx);
+
+#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/ecore/ecore_cxt_api.h 
b/drivers/net/qede/ecore/ecore_cxt_api.h
new file mode 100644
index 0000000..be75cc1
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_cxt_api.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_CXT_API_H__
+#define __ECORE_CXT_API_H__
+
+struct ecore_hwfn;
+
+struct ecore_cxt_info {
+       void *p_cxt;
+       u32 iid;
+       enum protocol_type type;
+};
+
+#define MAX_TID_BLOCKS                 512
+struct ecore_tid_mem {
+       u32 tid_size;
+       u32 num_tids_per_block;
+       u32 waste;
+       u8 *blocks[MAX_TID_BLOCKS];     /* 4K */
+};
+
+static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid)
+{
+       /* note: waste is superfluous */
+       return (void *)(info->blocks[tid / info->num_tids_per_block] +
+                       (tid % info->num_tids_per_block) * info->tid_size);
+
+#if 0
+       /* more elaborate alternative with no modulo */
+       u32 mask = info->tid_size * info->num_tids_per_block + info->waste - 1;
+       u32 index = tid / info->num_tids_per_block;
+       u32 offset = tid * info->tid_size + index * info->waste;
+       return (void *)(blocks[index] + (offset & mask));
+#endif
+}
+
+/**
+* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+*
+* @param p_hwfn
+* @param type
+* @param p_cid
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+                                          enum protocol_type type,
+                                          u32 *p_cid);
+
+/**
+* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
+*
+*
+* @param p_hwfn
+* @param p_info in/out
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_cxt_info *p_info);
+
+/**
+* @brief ecore_cxt_get_tid_mem_info
+*
+* @param p_hwfn
+* @param p_info
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_tid_mem *p_info);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_dcbx.c 
b/drivers/net/qede/ecore/ecore_dcbx.c
new file mode 100644
index 0000000..8cda10f
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_dcbx.c
@@ -0,0 +1,950 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dcbx.h"
+#include "ecore_cxt.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#ifdef CONFIG_ECORE_ROCE
+#include "ecore_roce.h"
+#endif
+
+#define ECORE_DCBX_MAX_MIB_READ_TRY    (100)
+#define ECORE_MAX_PFC_PRIORITIES       8
+#define ECORE_ETH_TYPE_DEFAULT         (0)
+#define ECORE_ETH_TYPE_ROCE            (0x8915)
+#define ECORE_TCP_PORT_ISCSI           (0xCBC)
+
+#define ECORE_DCBX_INVALID_PRIORITY    0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+               ((u32)(pri_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+       return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+               DCBX_APP_SF_ETHTYPE) ? true : false;
+}
+
+static bool ecore_dcbx_app_port(u32 app_info_bitmap)
+{
+       return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+               DCBX_APP_SF_PORT) ? true : false;
+}
+
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+       return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
+               ECORE_ETH_TYPE_DEFAULT == proto_id) ? true : false;
+}
+
+static bool ecore_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+       return (ecore_dcbx_app_port(app_info_bitmap) &&
+               ECORE_TCP_PORT_ISCSI == proto_id) ? true : false;
+}
+
+static bool ecore_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+       return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
+               ECORE_ETH_TYPE_ROCE == proto_id) ? true : false;
+}
+
+static bool ecore_dcbx_enabled(u32 dcbx_cfg_bitmap)
+{
+       return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+               DCBX_CONFIG_VERSION_DISABLED) ? false : true;
+}
+
+static bool ecore_dcbx_cee(u32 dcbx_cfg_bitmap)
+{
+       return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+               DCBX_CONFIG_VERSION_CEE) ? true : false;
+}
+
+static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap)
+{
+       return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+               DCBX_CONFIG_VERSION_IEEE) ? true : false;
+}
+
+/* @@@TBD A0 Eagle workaround */
+void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt, bool set_to_pfc)
+{
+       if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
+               return;
+
+       ecore_wr(p_hwfn, p_ptt,
+                YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */  +
+                YSTORM_FLOW_CONTROL_MODE_OFFSET,
+                set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE,
+                EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE);
+}
+
+static void
+ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
+                      struct ecore_dcbx_results *p_data)
+{
+       struct ecore_hw_info *p_info = &p_hwfn->hw_info;
+       enum dcbx_protocol_type id;
+       bool enable, update;
+       u8 prio, tc, size;
+       const char *name;       /* @DPDK */
+       int i;
+
+       size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
+
+       DP_INFO(p_hwfn, "DCBX negotiated: %d\n", p_data->dcbx_enabled);
+
+       for (i = 0; i < size; i++) {
+               id = ecore_dcbx_app_update[i].id;
+               name = ecore_dcbx_app_update[i].name;
+
+               enable = p_data->arr[id].enable;
+               update = p_data->arr[id].update;
+               tc = p_data->arr[id].tc;
+               prio = p_data->arr[id].priority;
+
+               DP_INFO(p_hwfn,
+                       "%s info: update %d, enable %d, prio %d, tc %d, num_tc 
%d\n",
+                       name, update, enable, prio, tc, p_info->num_tc);
+       }
+}
+
+static void
+ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
+                     u8 tc, enum ecore_pci_personality personality)
+{
+       /* QM reconf data */
+       if (p_info->personality == personality) {
+               if (personality == ECORE_PCI_ETH)
+                       p_info->non_offload_tc = tc;
+               else {
+                       p_info->offload_tc = tc;
+                       if (personality == ECORE_PCI_ISCSI)
+                               p_info->ooo_tc = DCBX_ISCSI_OOO_TC;
+               }
+       }
+}
+
+void
+ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
+                     struct ecore_hw_info *p_info,
+                     bool enable, bool update, u8 prio, u8 tc,
+                     enum dcbx_protocol_type type,
+                     enum ecore_pci_personality personality)
+{
+       /* PF update ramrod data */
+       p_data->arr[type].update = update;
+       p_data->arr[type].enable = enable;
+       p_data->arr[type].priority = prio;
+       p_data->arr[type].tc = tc;
+
+       ecore_dcbx_set_pf_tcs(p_info, tc, personality);
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
+                          struct ecore_hwfn *p_hwfn,
+                          bool enable, bool update, u8 prio, u8 tc,
+                          enum dcbx_protocol_type type)
+{
+       struct ecore_hw_info *p_info = &p_hwfn->hw_info;
+       enum ecore_pci_personality personality;
+       enum dcbx_protocol_type id;
+       const char *name;       /* @DPDK */
+       u8 size;
+       int i;
+
+       size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
+
+       for (i = 0; i < size; i++) {
+               id = ecore_dcbx_app_update[i].id;
+
+               if (type != id)
+                       continue;
+
+               personality = ecore_dcbx_app_update[i].personality;
+               name = ecore_dcbx_app_update[i].name;
+
+               ecore_dcbx_set_params(p_data, p_info, enable, update,
+                                     prio, tc, type, personality);
+       }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority)
+{
+       u32 pri_mask, pri = ECORE_MAX_PFC_PRIORITIES;
+       u32 index = ECORE_MAX_PFC_PRIORITIES - 1;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Bitmap 1 corresponds to priority 0, return priority 0 */
+       if (pri_bitmap == 1) {
+               *priority = 0;
+               return rc;
+       }
+
+       /* Choose the highest priority */
+       while ((ECORE_MAX_PFC_PRIORITIES == pri) && index) {
+               pri_mask = 1 << index;
+               if (pri_bitmap & pri_mask)
+                       pri = index;
+               index--;
+       }
+
+       if (pri < ECORE_MAX_PFC_PRIORITIES)
+               *priority = (u8) pri;
+       else
+               rc = ECORE_INVAL;
+
+       return rc;
+}
+
+static bool
+ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
+                                u32 app_prio_bitmap, u16 id, int *type)
+{
+       bool status = false;
+
+       if (ecore_dcbx_default_tlv(app_prio_bitmap, id)) {
+               *type = DCBX_PROTOCOL_ETH;
+               status = true;
+       } else if (ecore_dcbx_roce_tlv(app_prio_bitmap, id)) {
+               *type = DCBX_PROTOCOL_ROCE;
+               status = true;
+       } else if (ecore_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+               *type = DCBX_PROTOCOL_ISCSI;
+               status = true;
+       } else
+               DP_ERR(p_hwfn, "Unsupported protocol %d\n", id);
+
+       return status;
+}
+
+/*  Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
+                      struct ecore_dcbx_results *p_data,
+                      struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
+                      int count, bool dcbx_enabled)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u8 tc, priority, priority_map;
+       int i, type = -1;
+       u16 protocol_id;
+       bool enable;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count);
+
+       /* Parse APP TLV */
+       for (i = 0; i < count; i++) {
+               protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+                                                 DCBX_APP_PROTOCOL_ID);
+               priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+                                                  DCBX_APP_PRI_MAP);
+               rc = ecore_dcbx_get_app_priority(priority_map, &priority);
+               if (rc == ECORE_INVAL) {
+                       DP_ERR(p_hwfn, "Invalid priority\n");
+                       return rc;
+               }
+
+               tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
+               if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+                                                    protocol_id, &type)) {
+                       /* ETH always have the enable bit reset, as it gets
+                        * vlan information per packet. For other protocols,
+                        * should be set according to the dcbx_enabled
+                        * indication, but we only got here if there was an
+                        * app tlv for the protocol, so dcbx must be enabled.
+                        */
+                       enable = (type == DCBX_PROTOCOL_ETH ? false : true);
+
+                       ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+                                                  priority, tc, type);
+               }
+       }
+       /* Update ramrod protocol data and hw_info fields
+        * with default info when corresponding APP TLV's are not detected.
+        * The enabled field has a different logic for ethernet as only for
+        * ethernet dcb should disabled by default, as the information arrives
+        * from the OS (unless an explicit app tlv was present).
+        */
+       tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+       priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+       for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+               if (p_data->arr[type].update)
+                       continue;
+
+               enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+               ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+                                          priority, tc, type);
+       }
+
+       return ECORE_SUCCESS;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static enum _ecore_status_t
+ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
+{
+       struct dcbx_app_priority_feature *p_app;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_dcbx_results data = { 0 };
+       struct dcbx_app_priority_entry *p_tbl;
+       struct dcbx_ets_feature *p_ets;
+       struct ecore_hw_info *p_info;
+       u32 pri_tc_tbl, flags;
+       bool dcbx_enabled;
+       int num_entries;
+
+       /* If DCBx version is non zero, then negotiation was
+        * successfuly performed
+        */
+       flags = p_hwfn->p_dcbx_info->operational.flags;
+       dcbx_enabled = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != 0;
+
+       p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+       p_tbl = p_app->app_pri_tbl;
+
+       p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+       pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+       p_info = &p_hwfn->hw_info;
+       num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+       rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+                                   num_entries, dcbx_enabled);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_info->num_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+       data.pf_id = p_hwfn->rel_pf_id;
+       data.dcbx_enabled = dcbx_enabled;
+
+       ecore_dcbx_dp_protocol(p_hwfn, &data);
+
+       OSAL_MEMCPY(&p_hwfn->p_dcbx_info->results, &data,
+                   sizeof(struct ecore_dcbx_results));
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   struct ecore_dcbx_mib_meta_data *p_data,
+                   enum ecore_mib_read_type type)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 prefix_seq_num, suffix_seq_num;
+       int read_count = 0;
+
+       do {
+               if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
+                       ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+                                         p_data->addr, p_data->size);
+                       prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+                       suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+               } else {
+                       ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+                                         p_data->addr, p_data->size);
+                       prefix_seq_num = p_data->mib->prefix_seq_num;
+                       suffix_seq_num = p_data->mib->suffix_seq_num;
+               }
+               read_count++;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                          "mib type = %d, try count = %d prefix seq num  = %d 
suffix seq num = %d\n",
+                          type, read_count, prefix_seq_num, suffix_seq_num);
+       } while ((prefix_seq_num != suffix_seq_num) &&
+                (read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
+
+       if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
+               DP_ERR(p_hwfn,
+                      "MIB read err, mib type = %d, try count = %d prefix seq 
num = %d suffix seq num = %d\n",
+                      type, read_count, prefix_seq_num, suffix_seq_num);
+               rc = ECORE_IO;
+       }
+
+       return rc;
+}
+
+static void ecore_dcbx_invalidate_params(struct ecore_dcbx_app_prio *p_prio)
+{
+       p_prio->roce = ECORE_DCBX_INVALID_PRIORITY;
+       p_prio->iscsi = ECORE_DCBX_INVALID_PRIORITY;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
+                            struct ecore_dcbx_app_prio *p_prio,
+                            struct ecore_dcbx_results *p_results)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       ecore_dcbx_invalidate_params(p_prio);
+
+       if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+           p_results->arr[DCBX_PROTOCOL_ROCE].enable) {
+               p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                          "Priority: roce %d\n", p_prio->roce);
+       }
+
+       if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+           p_results->arr[DCBX_PROTOCOL_ISCSI].enable) {
+               p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                          "Priority: iscsi %d\n", p_prio->iscsi);
+       }
+
+       if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+           p_results->arr[DCBX_PROTOCOL_ETH].enable) {
+               p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                          "Priority: eth %d\n", p_prio->eth);
+       }
+
+       return rc;
+}
+
+static void
+ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
+                       struct dcbx_app_priority_feature *p_app,
+                       struct dcbx_app_priority_entry *p_tbl,
+                       struct ecore_dcbx_params *p_params)
+{
+       int i;
+
+       p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
+                                                   DCBX_APP_WILLING);
+       p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
+                                                 DCBX_APP_ENABLED);
+       p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
+                                                       DCBX_APP_ENABLED);
+       for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+               p_params->app_bitmap[i] = p_tbl[i].entry;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                  "APP params: willing %d, valid %d\n",
+                  p_params->app_willing, p_params->app_valid);
+}
+
+static void
+ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
+                       u32 pfc, struct ecore_dcbx_params *p_params)
+{
+       p_params->pfc_willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+       p_params->max_pfc_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+       p_params->pfc_enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+       p_params->pfc_bitmap = pfc;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                  "PFC params: willing %d, pfc_bitmap %d\n",
+                  p_params->pfc_willing, p_params->pfc_bitmap);
+}
+
+static void
+ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
+                       struct dcbx_ets_feature *p_ets,
+                       struct ecore_dcbx_params *p_params)
+{
+       int i;
+
+       p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
+                                                   DCBX_ETS_WILLING);
+       p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
+                                                   DCBX_ETS_ENABLED);
+       p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
+                                                  DCBX_ETS_MAX_TCS);
+       p_params->ets_pri_tc_tbl[0] = p_ets->pri_tc_tbl[0];
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                  "ETS params: willing %d, pri_tc_tbl_0 %x max_ets_tc %d\n",
+                  p_params->ets_willing, p_params->ets_pri_tc_tbl[0],
+                  p_params->max_ets_tc);
+
+       /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+        * encoded in a type u32 array of size 2.
+        */
+       for (i = 0; i < 2; i++) {
+               p_params->ets_tc_tsa_tbl[i] = p_ets->tc_tsa_tbl[i];
+               p_params->ets_tc_bw_tbl[i] = p_ets->tc_bw_tbl[i];
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                          "elem %d  bw_tbl %x tsa_tbl %x\n",
+                          i, p_params->ets_tc_bw_tbl[i],
+                          p_params->ets_tc_tsa_tbl[i]);
+       }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
+                            struct dcbx_app_priority_feature *p_app,
+                            struct dcbx_app_priority_entry *p_tbl,
+                            struct dcbx_ets_feature *p_ets,
+                            u32 pfc, struct ecore_dcbx_params *p_params)
+{
+       ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+       ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+       ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           struct ecore_dcbx_get *params)
+{
+       struct ecore_dcbx_admin_params *p_local;
+       struct dcbx_app_priority_feature *p_app;
+       struct dcbx_app_priority_entry *p_tbl;
+       struct ecore_dcbx_params *p_data;
+       struct dcbx_ets_feature *p_ets;
+       u32 pfc;
+
+       p_local = &params->local;
+       p_data = &p_local->params;
+       p_app = &p_hwfn->p_dcbx_info->local_admin.features.app;
+       p_tbl = p_app->app_pri_tbl;
+       p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets;
+       pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc;
+
+       ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+       p_local->valid = true;
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            struct ecore_dcbx_get *params)
+{
+       struct ecore_dcbx_remote_params *p_remote;
+       struct dcbx_app_priority_feature *p_app;
+       struct dcbx_app_priority_entry *p_tbl;
+       struct ecore_dcbx_params *p_data;
+       struct dcbx_ets_feature *p_ets;
+       u32 pfc;
+
+       p_remote = &params->remote;
+       p_data = &p_remote->params;
+       p_app = &p_hwfn->p_dcbx_info->remote.features.app;
+       p_tbl = p_app->app_pri_tbl;
+       p_ets = &p_hwfn->p_dcbx_info->remote.features.ets;
+       pfc = p_hwfn->p_dcbx_info->remote.features.pfc;
+
+       ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+       p_remote->valid = true;
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt,
+                                 struct ecore_dcbx_get *params)
+{
+       struct ecore_dcbx_operational_params *p_operational;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct dcbx_app_priority_feature *p_app;
+       struct dcbx_app_priority_entry *p_tbl;
+       struct ecore_dcbx_results *p_results;
+       struct ecore_dcbx_params *p_data;
+       struct dcbx_ets_feature *p_ets;
+       bool enabled, err;
+       u32 pfc, flags;
+
+       flags = p_hwfn->p_dcbx_info->operational.flags;
+
+       /* If DCBx version is non zero, then negotiation
+        * was successfuly performed
+        */
+       p_operational = &params->operational;
+       enabled = ecore_dcbx_enabled(flags);
+       if (!enabled) {
+               p_operational->enabled = enabled;
+               p_operational->valid = false;
+               return ECORE_INVAL;
+       }
+
+       p_data = &p_operational->params;
+       p_results = &p_hwfn->p_dcbx_info->results;
+       p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+       p_tbl = p_app->app_pri_tbl;
+       p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+       pfc = p_hwfn->p_dcbx_info->operational.features.pfc;
+
+       p_operational->ieee = ecore_dcbx_ieee(flags);
+       p_operational->cee = ecore_dcbx_cee(flags);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                  "Version support: ieee %d, cee %d\n",
+                  p_operational->ieee, p_operational->cee);
+
+       ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+       ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
+                                    p_results);
+       err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+       p_operational->err = err;
+       p_operational->enabled = enabled;
+       p_operational->valid = true;
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                struct ecore_dcbx_get *params)
+{
+       struct ecore_dcbx_lldp_local *p_local;
+       osal_size_t size;
+       u32 *dest;
+
+       p_local = &params->lldp_local;
+
+       size = OSAL_ARRAY_SIZE(p_local->local_chassis_id);
+       dest = p_hwfn->p_dcbx_info->get.lldp_local.local_chassis_id;
+       OSAL_MEMCPY(dest, p_local->local_chassis_id, size);
+
+       size = OSAL_ARRAY_SIZE(p_local->local_port_id);
+       dest = p_hwfn->p_dcbx_info->get.lldp_local.local_port_id;
+       OSAL_MEMCPY(dest, p_local->local_port_id, size);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt,
+                                 struct ecore_dcbx_get *params)
+{
+       struct ecore_dcbx_lldp_remote *p_remote;
+       osal_size_t size;
+       u32 *dest;
+
+       p_remote = &params->lldp_remote;
+
+       size = OSAL_ARRAY_SIZE(p_remote->peer_chassis_id);
+       dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_chassis_id;
+       OSAL_MEMCPY(dest, p_remote->peer_chassis_id, size);
+
+       size = OSAL_ARRAY_SIZE(p_remote->peer_port_id);
+       dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_port_id;
+       OSAL_MEMCPY(dest, p_remote->peer_port_id, size);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, enum ecore_mib_read_type type)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_dcbx_get *p_params;
+
+       p_params = &p_hwfn->p_dcbx_info->get;
+
+       switch (type) {
+       case ECORE_DCBX_REMOTE_MIB:
+               ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+               break;
+       case ECORE_DCBX_LOCAL_MIB:
+               ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+               break;
+       case ECORE_DCBX_OPERATIONAL_MIB:
+               ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+               break;
+       case ECORE_DCBX_REMOTE_LLDP_MIB:
+               rc = ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+               break;
+       case ECORE_DCBX_LOCAL_LLDP_MIB:
+               rc = ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+               break;
+       default:
+               DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+               return ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_dcbx_mib_meta_data data;
+
+       data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+                                                          lldp_config_params);
+       data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+       data.size = sizeof(struct lldp_config_params_s);
+       ecore_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               enum ecore_mib_read_type type)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_dcbx_mib_meta_data data;
+
+       data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+                                                          lldp_status_params);
+       data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+       data.size = sizeof(struct lldp_status_params_s);
+       rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               enum ecore_mib_read_type type)
+{
+       struct ecore_dcbx_mib_meta_data data;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       data.addr = p_hwfn->mcp_info->port_addr +
+           offsetof(struct public_port, operational_dcbx_mib);
+       data.mib = &p_hwfn->p_dcbx_info->operational;
+       data.size = sizeof(struct dcbx_mib);
+       rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          enum ecore_mib_read_type type)
+{
+       struct ecore_dcbx_mib_meta_data data;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       data.addr = p_hwfn->mcp_info->port_addr +
+           offsetof(struct public_port, remote_dcbx_mib);
+       data.mib = &p_hwfn->p_dcbx_info->remote;
+       data.size = sizeof(struct dcbx_mib);
+       rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct ecore_dcbx_mib_meta_data data;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       data.addr = p_hwfn->mcp_info->port_addr +
+           offsetof(struct public_port, local_admin_dcbx_mib);
+       data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+       data.size = sizeof(struct dcbx_local_params);
+       ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin,
+                         data.addr, data.size);
+
+       return rc;
+}
+
+static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_ptt *p_ptt,
+                                               enum ecore_mib_read_type type)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       switch (type) {
+       case ECORE_DCBX_OPERATIONAL_MIB:
+               rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+               break;
+       case ECORE_DCBX_REMOTE_MIB:
+               rc = ecore_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+               break;
+       case ECORE_DCBX_LOCAL_MIB:
+               rc = ecore_dcbx_read_local_mib(p_hwfn, p_ptt);
+               break;
+       case ECORE_DCBX_REMOTE_LLDP_MIB:
+               rc = ecore_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+               break;
+       case ECORE_DCBX_LOCAL_LLDP_MIB:
+               rc = ecore_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+               break;
+       default:
+               DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+               return ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+/*
+ * Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                           enum ecore_mib_read_type type)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+       if (rc)
+               return rc;
+
+       if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+               rc = ecore_dcbx_process_mib_info(p_hwfn);
+               if (!rc) {
+                       bool enabled;
+
+                       /* reconfigure tcs of QM queues according
+                        * to negotiation results
+                        */
+                       ecore_qm_reconf(p_hwfn, p_ptt);
+
+                       /* update storm FW with negotiation results */
+                       ecore_sp_pf_update(p_hwfn);
+
+                       /* set eagle enigne 1 flow control workaround
+                        * according to negotiation results
+                        */
+                       enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
+                       ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled);
+
+#ifdef CONFIG_ECORE_ROCE
+                       /* for roce PFs, we may want to enable/disable DPM
+                        * when DCBx change occurs
+                        */
+                       if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE)
+                               ecore_roce_dpm_dcbx(p_hwfn, p_ptt);
+#endif
+               }
+       }
+       ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+       OSAL_DCBX_AEN(p_hwfn, type);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                         sizeof(struct ecore_dcbx_info));
+       if (!p_hwfn->p_dcbx_info) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_dcbx_info'");
+               rc = ECORE_NOMEM;
+       }
+
+       return rc;
+}
+
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
+                         struct ecore_dcbx_info *p_dcbx_info)
+{
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
+}
+
+static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+                                           struct ecore_dcbx_results *p_src,
+                                           enum dcbx_protocol_type type)
+{
+       p_data->dcb_enable_flag = p_src->arr[type].enable;
+       p_data->dcb_priority = p_src->arr[type].priority;
+       p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+                                    struct pf_update_ramrod_data *p_dest)
+{
+       struct protocol_dcb_data *p_dcb_data;
+       bool update_flag;
+
+       p_dest->pf_id = p_src->pf_id;
+
+       update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+       p_dest->update_roce_dcb_data_flag = update_flag;
+       update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+       p_dest->update_iscsi_dcb_data_flag = update_flag;
+       update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+       p_dest->update_eth_dcb_data_flag = update_flag;
+
+       p_dcb_data = &p_dest->roce_dcb_data;
+       ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+       p_dcb_data = &p_dest->iscsi_dcb_data;
+       ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+       p_dcb_data = &p_dest->eth_dcb_data;
+       ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
+
+static
+enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
+                                     enum ecore_mib_read_type type)
+{
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt) {
+               rc = ECORE_TIMEOUT;
+               DP_ERR(p_hwfn, "rc = %d\n", rc);
+               return rc;
+       }
+
+       rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       rc = ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+
+out:
+       ecore_ptt_release(p_hwfn, p_ptt);
+       return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_dcbx_get *p_get,
+                                            enum ecore_mib_read_type type)
+{
+       enum _ecore_status_t rc;
+
+       rc = ecore_dcbx_query(p_hwfn, type);
+       if (rc)
+               return rc;
+
+       if (p_get != OSAL_NULL)
+               OSAL_MEMCPY(p_get, &p_hwfn->p_dcbx_info->get,
+                           sizeof(struct ecore_dcbx_get));
+
+       return rc;
+}
diff --git a/drivers/net/qede/ecore/ecore_dcbx.h 
b/drivers/net/qede/ecore/ecore_dcbx.h
new file mode 100644
index 0000000..d577f4e
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_dcbx.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DCBX_H__
+#define __ECORE_DCBX_H__
+
+#include "ecore.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_hsi_common.h"
+#include "ecore_dcbx_api.h"
+
+#define ECORE_MFW_GET_FIELD(name, field) \
+       (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct ecore_dcbx_info {
+       struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+       struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+       struct dcbx_local_params local_admin;
+       struct ecore_dcbx_results results;
+       struct dcbx_mib operational;
+       struct dcbx_mib remote;
+       struct ecore_dcbx_set set;
+       struct ecore_dcbx_get get;
+       u8 dcbx_cap;
+};
+
+/* Upper layer driver interface routines */
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
+                                             struct ecore_ptt *,
+                                             struct ecore_dcbx_set *);
+
+/* ECORE local interface routines */
+enum _ecore_status_t
+ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
+                           enum ecore_mib_read_type);
+
+enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
+                                                struct ecore_ptt *);
+enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
+void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
+                                    struct pf_update_ramrod_data *p_dest);
+/* @@@TBD eagle phy workaround */
+void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
+                                bool set_to_pfc);
+
+#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/ecore/ecore_dcbx_api.h 
b/drivers/net/qede/ecore/ecore_dcbx_api.h
new file mode 100644
index 0000000..b3da69a
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_dcbx_api.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DCBX_API_H__
+#define __ECORE_DCBX_API_H__
+
+#include "ecore.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL   4
+
+enum ecore_mib_read_type {
+       ECORE_DCBX_OPERATIONAL_MIB,
+       ECORE_DCBX_REMOTE_MIB,
+       ECORE_DCBX_LOCAL_MIB,
+       ECORE_DCBX_REMOTE_LLDP_MIB,
+       ECORE_DCBX_LOCAL_LLDP_MIB
+};
+
+struct ecore_dcbx_app_data {
+       bool enable;            /* DCB enabled */
+       bool update;            /* Update indication */
+       u8 priority;            /* Priority */
+       u8 tc;                  /* Traffic Class */
+};
+
+#ifndef __EXTRACT__LINUX__
+enum dcbx_protocol_type {
+       DCBX_PROTOCOL_ISCSI,
+       DCBX_PROTOCOL_ROCE,
+       DCBX_PROTOCOL_ETH,
+       DCBX_MAX_PROTOCOL_TYPE
+};
+
+#ifdef LINUX_REMOVE
+/* We can't assume THE HSI values are avaiable to clients, so we need
+ * to redefine those here.
+ */
+#ifndef LLDP_CHASSIS_ID_STAT_LEN
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#endif
+#ifndef LLDP_PORT_ID_STAT_LEN
+#define LLDP_PORT_ID_STAT_LEN 4
+#endif
+#ifndef DCBX_MAX_APP_PROTOCOL
+#define DCBX_MAX_APP_PROTOCOL 32
+#endif
+
+#endif
+
+struct ecore_dcbx_lldp_remote {
+       u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+       bool enable_rx;
+       bool enable_tx;
+       u32 tx_interval;
+       u32 max_credit;
+};
+
+struct ecore_dcbx_lldp_local {
+       u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct ecore_dcbx_app_prio {
+       u8 roce;
+       u8 iscsi;
+       u8 eth;
+};
+
+struct ecore_dcbx_params {
+       u32 app_bitmap[DCBX_MAX_APP_PROTOCOL];
+       u16 num_app_entries;
+       bool app_willing;
+       bool app_valid;
+       bool ets_willing;
+       bool ets_enabled;
+       bool valid;             /* Indicate validity of params */
+       u32 ets_pri_tc_tbl[1];
+       u32 ets_tc_bw_tbl[2];
+       u32 ets_tc_tsa_tbl[2];
+       bool pfc_willing;
+       bool pfc_enabled;
+       u32 pfc_bitmap;
+       u8 max_pfc_tc;
+       u8 max_ets_tc;
+};
+
+struct ecore_dcbx_admin_params {
+       struct ecore_dcbx_params params;
+       bool valid;             /* Indicate validity of params */
+};
+
+struct ecore_dcbx_remote_params {
+       struct ecore_dcbx_params params;
+       bool valid;             /* Indicate validity of params */
+};
+
+struct ecore_dcbx_operational_params {
+       struct ecore_dcbx_app_prio app_prio;
+       struct ecore_dcbx_params params;
+       bool valid;             /* Indicate validity of params */
+       bool enabled;
+       bool ieee;
+       bool cee;
+       u32 err;
+};
+
+struct ecore_dcbx_get {
+       struct ecore_dcbx_operational_params operational;
+       struct ecore_dcbx_lldp_remote lldp_remote;
+       struct ecore_dcbx_lldp_local lldp_local;
+       struct ecore_dcbx_remote_params remote;
+       struct ecore_dcbx_admin_params local;
+};
+#endif
+
+struct ecore_dcbx_set {
+       struct ecore_dcbx_admin_params config;
+       bool enabled;
+       u32 ver_num;
+};
+
+struct ecore_dcbx_results {
+       bool dcbx_enabled;
+       u8 pf_id;
+       struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct ecore_dcbx_app_metadata {
+       enum dcbx_protocol_type id;
+       const char *name;       /* @DPDK */
+       enum ecore_pci_personality personality;
+};
+
+struct ecore_dcbx_mib_meta_data {
+       struct lldp_config_params_s *lldp_local;
+       struct lldp_status_params_s *lldp_remote;
+       struct dcbx_local_params *local_admin;
+       struct dcbx_mib *mib;
+       osal_size_t size;
+       u32 addr;
+};
+
+void
+ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
+                     struct ecore_hw_info *p_info,
+                     bool enable, bool update, u8 prio, u8 tc,
+                     enum dcbx_protocol_type type,
+                     enum ecore_pci_personality personality);
+
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
+                                            struct ecore_dcbx_get *,
+                                            enum ecore_mib_read_type);
+
+static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
+       {DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
+       {DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
+       {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}
+};
+
+#endif /* __ECORE_DCBX_API_H__ */
diff --git a/drivers/net/qede/ecore/ecore_dev.c 
b/drivers/net/qede/ecore/ecore_dev.c
new file mode 100644
index 0000000..94502b0
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_dev.c
@@ -0,0 +1,3907 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore.h"
+#include "ecore_chain.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_int.h"
+#include "ecore_cxt.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_mcp.h"
+#include "ecore_hw_defs.h"
+#include "mcp_public.h"
+#include "ecore_iro.h"
+#include "nvm_cfg.h"
+#include "ecore_dev_api.h"
+#include "ecore_attn_values.h"
+#include "ecore_dcbx.h"
+
+/* Configurable */
+#define ECORE_MIN_DPIS         (4)     /* The minimal number of DPIs required
+                                        * load the driver. The number was
+                                        * arbitrarily set.
+                                        */
+
+/* Derived */
+#define ECORE_MIN_PWM_REGION   ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
+
+enum BAR_ID {
+       BAR_ID_0,               /* used for GRC */
+       BAR_ID_1                /* Used for doorbells */
+};
+
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+       u32 bar_reg = (bar_id == BAR_ID_0 ?
+                      PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+       u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+
+       /* The above registers were updated in the past only in CMT mode. Since
+        * they were found to be useful MFW started updating them from 8.7.7.0.
+        * In older MFW versions they are set to 0 which means disabled.
+        */
+       if (!val) {
+               if (p_hwfn->p_dev->num_hwfns > 1) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "BAR size not configured. Assuming BAR"
+                                 " size of 256kB for GRC and 512kB for DB\n");
+                       return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+               } else {
+                       DP_NOTICE(p_hwfn, false,
+                                 "BAR size not configured. Assuming BAR"
+                                 " size of 512kB for GRC and 512kB for DB\n");
+                       return 512 * 1024;
+               }
+       }
+
+       return 1 << (val + 15);
+}
+
+void ecore_init_dp(struct ecore_dev *p_dev,
+                  u32 dp_module, u8 dp_level, void *dp_ctx)
+{
+       u32 i;
+
+       p_dev->dp_level = dp_level;
+       p_dev->dp_module = dp_module;
+       p_dev->dp_ctx = dp_ctx;
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               p_hwfn->dp_level = dp_level;
+               p_hwfn->dp_module = dp_module;
+               p_hwfn->dp_ctx = dp_ctx;
+       }
+}
+
+void ecore_init_struct(struct ecore_dev *p_dev)
+{
+       u8 i;
+
+       for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               p_hwfn->p_dev = p_dev;
+               p_hwfn->my_id = i;
+               p_hwfn->b_active = false;
+
+               OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+               OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+       }
+
+       /* hwfn 0 is always active */
+       p_dev->hwfns[0].b_active = true;
+
+       /* set the default cache alignment to 128 (may be overridden later) */
+       p_dev->cache_shift = 7;
+}
+
+static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+       OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
+       qm_info->qm_pq_params = OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
+       qm_info->qm_vport_params = OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
+       qm_info->qm_port_params = OSAL_NULL;
+       OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
+       qm_info->wfq_data = OSAL_NULL;
+}
+
+void ecore_resc_free(struct ecore_dev *p_dev)
+{
+       int i;
+
+       if (IS_VF(p_dev))
+               return;
+
+       OSAL_FREE(p_dev, p_dev->fw_data);
+       p_dev->fw_data = OSAL_NULL;
+
+       OSAL_FREE(p_dev, p_dev->reset_stats);
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
+               p_hwfn->p_tx_cids = OSAL_NULL;
+               OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
+               p_hwfn->p_rx_cids = OSAL_NULL;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               ecore_cxt_mngr_free(p_hwfn);
+               ecore_qm_info_free(p_hwfn);
+               ecore_spq_free(p_hwfn);
+               ecore_eq_free(p_hwfn, p_hwfn->p_eq);
+               ecore_consq_free(p_hwfn, p_hwfn->p_consq);
+               ecore_int_free(p_hwfn);
+#ifdef CONFIG_ECORE_LL2
+               ecore_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+               if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
+                       ecore_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
+                       ecore_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
+               }
+#endif
+               ecore_iov_free(p_hwfn);
+               ecore_dmae_info_free(p_hwfn);
+               ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+               /* @@@TBD Flush work-queue ? */
+       }
+}
+
+static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
+                                              bool b_sleepable)
+{
+       u8 num_vports, vf_offset = 0, i, vport_id, num_ports;
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       struct init_qm_port_params *p_qm_port;
+       u16 num_pqs, multi_cos_tcs = 1;
+#ifdef CONFIG_ECORE_SRIOV
+       u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+#else
+       u16 num_vfs = 0;
+#endif
+
+       OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+
+#ifndef ASIC_ONLY
+       /* @TMP - Don't allocate QM queues for VFs on emulation */
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "Emulation - skip configuring QM queues for VFs\n");
+               num_vfs = 0;
+       }
+#endif
+
+       num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
+       num_vports = (u8) RESC_NUM(p_hwfn, ECORE_VPORT);
+
+       if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE)
+               num_pqs++;      /* for RoCE queue */
+       if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI)
+               num_pqs += 2;   /* for iSCSI pure-ACK / OOO queue */
+
+       /* Sanity checking that setup requires legal number of resources */
+       if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
+               DP_ERR(p_hwfn,
+                      "Need too many Physical queues - 0x%04x when"
+                       " only %04x are available\n",
+                      num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
+               return ECORE_INVAL;
+       }
+
+       /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
+        * then special queues (iSCSI pure-ACK / RoCE), then per-VF PQ.
+        */
+       qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
+                                           b_sleepable ? GFP_KERNEL :
+                                           GFP_ATOMIC,
+                                           sizeof(struct init_qm_pq_params) *
+                                           num_pqs);
+       if (!qm_info->qm_pq_params)
+               goto alloc_err;
+
+       qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
+                                              b_sleepable ? GFP_KERNEL :
+                                              GFP_ATOMIC,
+                                              sizeof(struct
+                                                     init_qm_vport_params) *
+                                              num_vports);
+       if (!qm_info->qm_vport_params)
+               goto alloc_err;
+
+       qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
+                                             b_sleepable ? GFP_KERNEL :
+                                             GFP_ATOMIC,
+                                             sizeof(struct init_qm_port_params)
+                                             * MAX_NUM_PORTS);
+       if (!qm_info->qm_port_params)
+               goto alloc_err;
+
+       qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
+                                       b_sleepable ? GFP_KERNEL :
+                                       GFP_ATOMIC,
+                                       sizeof(struct ecore_wfq_data) *
+                                       num_vports);
+
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
+       vport_id = (u8) RESC_START(p_hwfn, ECORE_VPORT);
+
+       /* First init per-TC PQs */
+       for (i = 0; i < multi_cos_tcs; i++) {
+               struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+
+               if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
+                   p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
+                       params->vport_id = vport_id;
+                       params->tc_id = p_hwfn->hw_info.non_offload_tc;
+                       params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
+               } else {
+                       params->vport_id = vport_id;
+                       params->tc_id = p_hwfn->hw_info.offload_tc;
+                       params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
+               }
+       }
+
+       /* Then init pure-LB PQ */
+       qm_info->pure_lb_pq = i;
+       qm_info->qm_pq_params[i].vport_id =
+           (u8) RESC_START(p_hwfn, ECORE_VPORT);
+       qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
+       qm_info->qm_pq_params[i].wrr_group = 1;
+       i++;
+
+       qm_info->offload_pq = 0;
+       if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+               qm_info->offload_pq = i;
+               qm_info->qm_pq_params[i].vport_id = vport_id;
+               qm_info->qm_pq_params[i].tc_id = p_hwfn->hw_info.offload_tc;
+               qm_info->qm_pq_params[i].wrr_group = 1;
+               i++;
+       } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
+               qm_info->pure_ack_pq = i;
+               qm_info->qm_pq_params[i].vport_id = vport_id;
+               qm_info->qm_pq_params[i].tc_id = p_hwfn->hw_info.offload_tc;
+               qm_info->qm_pq_params[i].wrr_group = 1;
+               i++;
+               qm_info->ooo_pq = i;
+               qm_info->qm_pq_params[i].vport_id = vport_id;
+               qm_info->qm_pq_params[i].tc_id = DCBX_ISCSI_OOO_TC;
+               qm_info->qm_pq_params[i].wrr_group = 1;
+               i++;
+       }
+
+       /* Then init per-VF PQs */
+       vf_offset = i;
+       for (i = 0; i < num_vfs; i++) {
+               /* First vport is used by the PF */
+               qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id +
+                   i + 1;
+               qm_info->qm_pq_params[vf_offset + i].tc_id =
+                   p_hwfn->hw_info.non_offload_tc;
+               qm_info->qm_pq_params[vf_offset + i].wrr_group = 1;
+       };
+
+       qm_info->vf_queues_offset = vf_offset;
+       qm_info->num_pqs = num_pqs;
+       qm_info->num_vports = num_vports;
+
+       /* Initialize qm port parameters */
+       num_ports = p_hwfn->p_dev->num_ports_in_engines;
+       for (i = 0; i < num_ports; i++) {
+               p_qm_port = &qm_info->qm_port_params[i];
+               p_qm_port->active = 1;
+               /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
+                * be in place
+                */
+               if (num_ports == 4)
+                       p_qm_port->num_active_phys_tcs = 2;
+               else
+                       p_qm_port->num_active_phys_tcs = 5;
+               p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+               p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
+       }
+
+       if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
+               qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
+       else
+               qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+
+       qm_info->start_pq = (u16) RESC_START(p_hwfn, ECORE_PQ);
+
+       qm_info->num_vf_pqs = num_vfs;
+       qm_info->start_vport = (u8) RESC_START(p_hwfn, ECORE_VPORT);
+
+       for (i = 0; i < qm_info->num_vports; i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
+
+       qm_info->pf_wfq = 0;
+       qm_info->pf_rl = 0;
+       qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
+
+       return ECORE_SUCCESS;
+
+alloc_err:
+       DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+       ecore_qm_info_free(p_hwfn);
+       return ECORE_NOMEM;
+}
+
+/*
+static void ecore_dp_qm_pf_info(struct ecore_hwfn *p_hwfn)
+{
+       int i;
+       struct qm_pq_params *pq;
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+       for (i = 0; i < qm_info->num_pqs; i++) {
+               pq = &(qm_info->qm_pq_params[qm_info->start_pq + i]);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "pq num %d, vport_id %d,"
+                          "tc %d, wrr_grp %d\n", qm_info->start_pq + i,"
+                          "pq->vport_id, pq->tc, pq->wrr_group);
+       }
+}
+*/
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       enum _ecore_status_t rc;
+       bool b_rc;
+
+       /* qm_info is allocated in ecore_init_qm_info() which is already called
+        * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
+        * The allocated size may change each init, so we free it before next
+        * allocation.
+        */
+       ecore_qm_info_free(p_hwfn);
+
+       /* initilize ecore's qm data structure */
+       rc = ecore_init_qm_info(p_hwfn, false);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* stop PF's qm queues */
+       b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+                                     qm_info->start_pq, qm_info->num_pqs);
+       if (!b_rc)
+               return ECORE_INVAL;
+
+       /* clear the QM_PF runtime phase leftovers from previous init */
+       ecore_init_clear_rt_data(p_hwfn);
+
+       /* prepare QM portion of runtime array */
+       ecore_qm_init_pf(p_hwfn);
+
+       /* activate init tool on runtime array */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+                           p_hwfn->hw_info.hw_mode);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* start PF's qm queues */
+       b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+                                     qm_info->start_pq, qm_info->num_pqs);
+       if (!rc)
+               return ECORE_INVAL;
+
+       return ECORE_SUCCESS;
+}
+
+#ifdef CONFIG_ECORE_ROCE
+enum _ecore_status_t ecore_set_roce(struct ecore_hwfn *p_hwfn)
+{
+       u32 n_qps;
+
+       n_qps = p_hwfn->pf_params.roce_pf_params.min_qps;
+       n_qps = OSAL_MIN_T(u32, ECORE_ROCE_MAX_QPS, n_qps);
+       n_qps = OSAL_MAX_T(u32, ECORE_ROCE_MIN_QPS, n_qps);
+
+       p_hwfn->n_roce_qps = n_qps;
+       p_hwfn->n_roce_cqs = n_qps * 2;
+       p_hwfn->pf_params.roce_pf_params.num_cons = n_qps * 2;
+       p_hwfn->pf_params.roce_pf_params.num_tasks = ECORE_ROCE_MAX_MR;
+       p_hwfn->pf_params.roce_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX;
+
+       return ECORE_SUCCESS;
+}
+#endif
+
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_consq *p_consq;
+       struct ecore_eq *p_eq;
+#ifdef CONFIG_ECORE_LL2
+       struct ecore_ll2_info *p_ll2_info;
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+       struct ecore_iscsi_info *p_iscsi_info;
+       struct ecore_ooo_info *p_ooo_info;
+#endif
+#ifdef CONFIG_ECORE_FCOE
+       struct ecore_fcoe_info *p_fcoe_info;
+#endif
+       int i;
+
+       if (IS_VF(p_dev))
+               return rc;
+
+       p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+                                    sizeof(struct ecore_fw_data));
+       if (!p_dev->fw_data)
+               return ECORE_NOMEM;
+
+       /* Allocate Memory for the Queue->CID mapping */
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               /* @@@TMP - resc management, change to actual required size */
+               int tx_size = sizeof(struct ecore_hw_cid_data) *
+                   RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+               int rx_size = sizeof(struct ecore_hw_cid_data) *
+                   RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+
+               p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                               tx_size);
+               if (!p_hwfn->p_tx_cids) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for Tx Cids\n");
+                       goto alloc_no_mem;
+               }
+
+               p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                               rx_size);
+               if (!p_hwfn->p_rx_cids) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for Rx Cids\n");
+                       goto alloc_no_mem;
+               }
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               /* First allocate the context manager structure */
+               rc = ecore_cxt_mngr_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+#ifdef CONFIG_ECORE_ROCE
+               rc = ecore_set_roce(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+#endif
+
+               /* Set the HW cid/tid numbers (in the contest manager)
+                * Must be done prior to any further computations.
+                */
+               rc = ecore_cxt_set_pf_params(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* Prepare and process QM requirements */
+               rc = ecore_init_qm_info(p_hwfn, true);
+               if (rc)
+                       goto alloc_err;
+
+               /* Compute the ILT client partition */
+               rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* CID map / ILT shadow table / T2
+                * The talbes sizes are determined by the computations above
+                */
+               rc = ecore_cxt_tables_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SPQ, must follow ILT becuase initializes SPQ context */
+               rc = ecore_spq_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* SP status block allocation */
+               p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+                                                          RESERVED_PTT_DPC);
+
+               rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+               if (rc)
+                       goto alloc_err;
+
+               rc = ecore_iov_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
+               /* EQ */
+#ifdef CONFIG_ECORE_ROCE
+               if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+                       u32 n_eqes;
+
+                       /* Calculate the EQ size
+                        * ---------------------
+                        * Each ICID may generate up to one event at a time i.e.
+                        * the event must be handled/cleared before a new one
+                        * can be generated. We calculate the sum of events per
+                        * protocol and create an EQ deep enough to handle the
+                        * worst case:
+                        * - Core - according to SPQ.
+                        * - RoCE - per QP there are a couple of ICIDs, one
+                        *          responder and one requester, each can
+                        *          generate an EQE => n_eqes_qp = 2 * n_qp.
+                        *          Each CQ can generate an EQE. There are 2 CQs
+                        *          per QP => n_eqes_cq = 2 * n_qp.
+                        *          Hence the RoCE total is 4 * n_qp.
+                        * - ENet - There can be up to two events per VF. One
+                        *          for VF-PF channel and another for VF FLR
+                        *          initial cleanup. The number of VFs is
+                        *          bounded by MAX_NUM_VFS_BB, and is much
+                        *          smaller than RoCE's so we avoid exact
+                        *          calculation.
+                        */
+                       n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain)
+                           + 4 * p_hwfn->n_roce_qps + 2 * MAX_NUM_VFS_BB;
+                       if (n_eqes > 0xFFFF) {
+                               DP_ERR(p_hwfn,
+                                      "Cannot allocate 0x%0x EQ elements."
+                                       "The maximum of a u16 chain is 0x%0x\n",
+                                      n_eqes, 0xFFFF);
+                               goto alloc_err;
+                       }
+                       p_eq = ecore_eq_alloc(p_hwfn, (u16) n_eqes);
+               } else {
+#endif
+                       p_eq = ecore_eq_alloc(p_hwfn, 256);
+#ifdef CONFIG_ECORE_ROCE
+               }
+#endif
+
+               if (!p_eq)
+                       goto alloc_no_mem;
+               p_hwfn->p_eq = p_eq;
+
+               p_consq = ecore_consq_alloc(p_hwfn);
+               if (!p_consq)
+                       goto alloc_no_mem;
+               p_hwfn->p_consq = p_consq;
+
+#ifdef CONFIG_ECORE_LL2
+               if (p_hwfn->using_ll2) {
+                       p_ll2_info = ecore_ll2_alloc(p_hwfn);
+                       if (!p_ll2_info)
+                               goto alloc_no_mem;
+                       p_hwfn->p_ll2_info = p_ll2_info;
+               }
+#endif
+
+               /* DMA info initialization */
+               rc = ecore_dmae_info_alloc(p_hwfn);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for"
+                                 " dmae_info structure\n");
+                       goto alloc_err;
+               }
+
+               /* DCBX initialization */
+               rc = ecore_dcbx_info_alloc(p_hwfn);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate memory for dcbxstruct\n");
+                       goto alloc_err;
+               }
+       }
+
+       p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+                                        sizeof(struct ecore_eth_stats));
+       if (!p_dev->reset_stats) {
+               DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+               goto alloc_no_mem;
+       }
+
+       return ECORE_SUCCESS;
+
+alloc_no_mem:
+       rc = ECORE_NOMEM;
+alloc_err:
+       ecore_resc_free(p_dev);
+       return rc;
+}
+
+void ecore_resc_setup(struct ecore_dev *p_dev)
+{
+       int i;
+
+       if (IS_VF(p_dev))
+               return;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               ecore_cxt_mngr_setup(p_hwfn);
+               ecore_spq_setup(p_hwfn);
+               ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
+               ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
+
+               /* Read shadow of current MFW mailbox */
+               ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+               OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+                           p_hwfn->mcp_info->mfw_mb_cur,
+                           p_hwfn->mcp_info->mfw_mb_length);
+
+               ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+               ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_ECORE_LL2
+               if (p_hwfn->using_ll2)
+                       ecore_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
+       }
+}
+
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME        (10)
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 id, bool is_vf)
+{
+       u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+       enum _ecore_status_t rc = ECORE_TIMEOUT;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
+           CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       addr = GTT_BAR0_MAP_REG_USDM_RAM +
+           USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+
+       if (is_vf)
+               id += 0x10;
+
+       command |= X_FINAL_CLEANUP_AGG_INT <<
+           SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+       command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+       command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+       command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+       /* Make sure notification is not set before initiating final cleanup */
+       if (REG_RD(p_hwfn, addr)) {
+               DP_NOTICE(p_hwfn, false,
+                         "Unexpected; Found final cleanup notification"
+                         "before initiating final cleanup\n");
+               REG_WR(p_hwfn, addr, 0);
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+                  id, OSAL_CPU_TO_LE32(command));
+
+       ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
+                OSAL_CPU_TO_LE32(command));
+
+       /* Poll until completion */
+       while (!REG_RD(p_hwfn, addr) && count--)
+               OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
+
+       if (REG_RD(p_hwfn, addr))
+               rc = ECORE_SUCCESS;
+       else
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to receive FW final cleanup notification\n");
+
+       /* Cleanup afterwards */
+       REG_WR(p_hwfn, addr, 0);
+
+       return rc;
+}
+
+static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
+{
+       int hw_mode = 0;
+
+       switch (ECORE_GET_TYPE(p_hwfn->p_dev)) {
+       case CHIP_BB_A0:
+               hw_mode |= 1 << MODE_BB_A0;
+               break;
+       case CHIP_BB_B0:
+               hw_mode |= 1 << MODE_BB_B0;
+               break;
+       case CHIP_K2:
+               hw_mode |= 1 << MODE_K2;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n",
+                         ECORE_GET_TYPE(p_hwfn->p_dev));
+               return;
+       }
+
+       /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
+       switch (p_hwfn->p_dev->num_ports_in_engines) {
+       case 1:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+               break;
+       case 2:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+               break;
+       case 4:
+               hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true,
+                         "num_ports_in_engine = %d not supported\n",
+                         p_hwfn->p_dev->num_ports_in_engines);
+               return;
+       }
+
+       switch (p_hwfn->p_dev->mf_mode) {
+       case ECORE_MF_DEFAULT:
+       case ECORE_MF_NPAR:
+               hw_mode |= 1 << MODE_MF_SI;
+               break;
+       case ECORE_MF_OVLAN:
+               hw_mode |= 1 << MODE_MF_SD;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true,
+                         "Unsupported MF mode, init as DEFAULT\n");
+               hw_mode |= 1 << MODE_MF_SI;
+       }
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+                       hw_mode |= 1 << MODE_FPGA;
+               } else {
+                       if (p_hwfn->p_dev->b_is_emul_full)
+                               hw_mode |= 1 << MODE_EMUL_FULL;
+                       else
+                               hw_mode |= 1 << MODE_EMUL_REDUCED;
+               }
+       } else
+#endif
+               hw_mode |= 1 << MODE_ASIC;
+
+       if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
+               hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
+
+       if (p_hwfn->p_dev->num_hwfns > 1)
+               hw_mode |= 1 << MODE_100G;
+
+       p_hwfn->hw_info.hw_mode = hw_mode;
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
+                  "Configuring function for hw_mode: 0x%08x\n",
+                  p_hwfn->hw_info.hw_mode);
+}
+
+#ifndef ASIC_ONLY
+/* MFW-replacement initializations for non-ASIC */
+static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt)
+{
+       u32 pl_hv = 1;
+       int i;
+
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+               pl_hv |= 0x600;
+
+       ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
+
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
+
+       /* initialize interrupt masks */
+       for (i = 0;
+            i <
+            attn_blocks[BLOCK_MISCS].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+            num_of_int_regs; i++)
+               ecore_wr(p_hwfn, p_ptt,
+                        attn_blocks[BLOCK_MISCS].
+                        chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[i]->
+                        mask_addr, 0);
+
+       if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt,
+                        attn_blocks[BLOCK_CNIG].
+                        chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+                        mask_addr, 0);
+       ecore_wr(p_hwfn, p_ptt,
+                attn_blocks[BLOCK_PGLCS].
+                chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+                mask_addr, 0);
+       ecore_wr(p_hwfn, p_ptt,
+                attn_blocks[BLOCK_CPMU].
+                chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+                mask_addr, 0);
+       /* Currently A0 and B0 interrupt bits are the same in pglue_b;
+        * If this changes, need to set this according to chip type. <14/09/23>
+        */
+       ecore_wr(p_hwfn, p_ptt,
+                attn_blocks[BLOCK_PGLUE_B].
+                chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
+                mask_addr, 0x80000);
+
+       /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
+       /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
+       if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
+
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+               /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+               ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+                        (p_hwfn->p_dev->num_ports_in_engines >> 1));
+
+               ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+                        p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
+       }
+
+       /* Poll on RBC */
+       ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
+       for (i = 0; i < 100; i++) {
+               OSAL_UDELAY(50);
+               if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
+                       break;
+       }
+       if (i == 100)
+               DP_NOTICE(p_hwfn, true,
+                         "RBC done failed to complete in PSWRQ2\n");
+}
+#endif
+
+/* Init run time data for all PFs and their VFs on an engine.
+ * TBD - for VFs - Once we have parent PF info for each VF in
+ * shmem available as CAU requires knowledge of parent PF for each VF.
+ */
+static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
+{
+       u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+       int i, sb_id;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_igu_info *p_igu_info;
+               struct ecore_igu_block *p_block;
+               struct cau_sb_entry sb_entry;
+
+               p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+               for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+                    sb_id++) {
+                       p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+
+                       if (!p_block->is_pf)
+                               continue;
+
+                       ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                               p_block->function_id, 0, 0);
+                       STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+               }
+       }
+}
+
+static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                int hw_mode)
+{
+       struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       u8 vf_id, max_num_vfs;
+       u16 num_pfs, pf_id;
+       u32 concrete_fid;
+
+       ecore_init_cau_rt_data(p_dev);
+
+       /* Program GTT windows */
+       ecore_gtt_init(p_hwfn);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+#endif
+
+       if (p_hwfn->mcp_info) {
+               if (p_hwfn->mcp_info->func_info.bandwidth_max)
+                       qm_info->pf_rl_en = 1;
+               if (p_hwfn->mcp_info->func_info.bandwidth_min)
+                       qm_info->pf_wfq_en = 1;
+       }
+
+       ecore_qm_common_rt_init(p_hwfn,
+                               p_hwfn->p_dev->num_ports_in_engines,
+                               qm_info->max_phys_tcs_per_port,
+                               qm_info->pf_rl_en, qm_info->pf_wfq_en,
+                               qm_info->vport_rl_en, qm_info->vport_wfq_en,
+                               qm_info->qm_port_params);
+
+       ecore_cxt_hw_init_common(p_hwfn);
+
+       /* Close gate from NIG to BRB/Storm; By default they are open, but
+        * we close them to prevent NIG from passing data to reset blocks.
+        * Should have been done in the ENGINE phase, but init-tool lacks
+        * proper port-pretend capabilities.
+        */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
+       ecore_port_unpretend(p_hwfn, p_ptt);
+
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
+        * need to decide with which value, maybe runtime
+        */
+       ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+       ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+       if (ECORE_IS_BB(p_hwfn->p_dev)) {
+               num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+               if (num_pfs == 1)
+                       return rc;
+               /* Workaround clears ROCE search for all functions to prevent
+                * involving non intialized function in processing ROCE packet.
+                */
+               for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+                       ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
+                       ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               }
+               /* pretend to original PF */
+               ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+       }
+
+       /* Workaround for avoiding CCFC execution error when getting packets
+        * with CRC errors, and allowing instead the invoking of the FW error
+        * handler.
+        * This is not done inside the init tool since it currently can't
+        * perform a pretending to VFs.
+        */
+       max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
+           : MAX_NUM_VFS_BB;
+       for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+               concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
+               ecore_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+               ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+       }
+       /* pretend to original PF */
+       ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+       return rc;
+}
+
+#ifndef ASIC_ONLY
+#define MISC_REG_RESET_REG_2_XMAC_BIT (1<<4)
+#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1<<5)
+
+#define PMEG_IF_BYTE_COUNT     8
+
+static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            u32 addr, u64 data, u8 reg_type, u8 port)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
+                  ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
+                  (8 << PMEG_IF_BYTE_COUNT),
+                  (reg_type << 25) | (addr << 8) | port,
+                  (u32) ((data >> 32) & 0xffffffff),
+                  (u32) (data & 0xffffffff));
+
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
+                (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
+                 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
+                (reg_type << 25) | (addr << 8) | port);
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+                data & 0xffffffff);
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+                (data >> 32) & 0xffffffff);
+}
+
+#define XLPORT_MODE_REG        (0x20a)
+#define XLPORT_MAC_CONTROL (0x210)
+#define XLPORT_FLOW_CONTROL_CONFIG (0x207)
+#define XLPORT_ENABLE_REG (0x20b)
+
+#define XLMAC_CTRL (0x600)
+#define XLMAC_MODE (0x601)
+#define XLMAC_RX_MAX_SIZE (0x608)
+#define XLMAC_TX_CTRL (0x604)
+#define XLMAC_PAUSE_CTRL (0x60d)
+#define XLMAC_PFC_CTRL (0x60e)
+
+static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u8 port = p_hwfn->port_id;
+       u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+       ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
+                (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
+                (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
+                | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
+                1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
+                9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
+                0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
+                8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
+                (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
+                (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
+
+       ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt)
+{
+       u8 loopback = 0, port = p_hwfn->port_id * 2;
+
+       DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+       if (ECORE_IS_AH(p_hwfn->p_dev)) {
+               ecore_emul_link_init_ah(p_hwfn, p_ptt);
+               return;
+       }
+
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
+                               port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
+                        0x01000000800ULL | (0xa << 12) | ((u64) 1 << 38),
+                        0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
+                        0x30ffffc000ULL, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
+                       port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
+                       0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
+       ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
+}
+
+static void ecore_link_init(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt, u8 port)
+{
+       int port_offset = port ? 0x800 : 0;
+       u32 xmac_rxctrl = 0;
+
+       /* Reset of XMAC */
+       /* FIXME: move to common start */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+               MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+       OSAL_MSLEEP(1);
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+               MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+       /* Set the number of ports on the Warp Core to 10G */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+
+       /* Soft reset of XMAC */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
+                MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+       OSAL_MSLEEP(1);
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
+                MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
+
+       /* FIXME: move to common end */
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
+
+       /* Set Max packet size: initialize XMAC block register for port 0 */
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
+
+       /* CRC append for Tx packets: init XMAC block register for port 1 */
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
+
+       /* Enable TX and RX: initialize XMAC block register for port 1 */
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
+                XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
+       xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
+       xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
+       ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
+}
+#endif
+
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              int hw_mode)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Init sequence */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+                           hw_mode);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               if (ECORE_IS_AH(p_hwfn->p_dev))
+                       return ECORE_SUCCESS;
+               ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
+       } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               if (p_hwfn->p_dev->num_hwfns > 1) {
+                       /* Activate OPTE in CMT */
+                       u32 val;
+
+                       val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+                       val |= 0x10;
+                       ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+                       ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+                                0x55555555);
+               }
+
+               ecore_emul_link_init(p_hwfn, p_ptt);
+       } else {
+               DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+       }
+#endif
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+       u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+       u32 dpi_bit_shift, dpi_count;
+       u32 min_dpis;
+
+       /* Calculate DPI size
+        * ------------------
+        * The PWM region contains Doorbell Pages. The first is reserverd for
+        * the kernel for, e.g, L2. The others are free to be used by non-
+        * trusted applications, typically from user space. Each page, called a
+        * doorbell page is sectioned into windows that allow doorbells to be
+        * issued in parallel by the kernel/application. The size of such a
+        * window (a.k.a. WID) is 1kB.
+        * Summary:
+        *    1kB WID x N WIDS = DPI page size
+        *    DPI page size x N DPIs = PWM region size
+        * Notes:
+        * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
+        * in order to ensure that two applications won't share the same page.
+        * It also must contain at least one WID per CPU to allow parallelism.
+        * It also must be a power of 2, since it is stored as a bit shift.
+        *
+        * The DPI page size is stored in a register as 'dpi_bit_shift' so that
+        * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
+        * containing 4 WIDs.
+        */
+       dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
+       dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
+       dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
+       dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+       dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
+
+       dpi_count = pwm_region_size / dpi_page_size;
+
+       min_dpis = p_hwfn->pf_params.roce_pf_params.min_dpis;
+       min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
+
+       /* Update hwfn */
+       p_hwfn->dpi_size = dpi_page_size;
+       p_hwfn->dpi_count = dpi_count;
+
+       /* Update registers */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+       if (dpi_count < min_dpis)
+               return ECORE_NORESOURCES;
+
+       return ECORE_SUCCESS;
+}
+
+enum ECORE_ROCE_EDPM_MODE {
+       ECORE_ROCE_EDPM_MODE_ENABLE = 0,
+       ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
+       ECORE_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+static enum _ecore_status_t
+ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt)
+{
+       u32 pwm_regsize, norm_regsize;
+       u32 non_pwm_conn, min_addr_reg1;
+       u32 db_bar_size, n_cpus;
+       u32 roce_edpm_mode;
+       u32 pf_dems_shift;
+       int rc = ECORE_SUCCESS;
+
+       db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
+       if (p_hwfn->p_dev->num_hwfns > 1)
+               db_bar_size /= 2;
+
+       /* Calculate doorbell regions
+        * -----------------------------------
+        * The doorbell BAR is made of two regions. The first is called normal
+        * region and the second is called PWM region. In the normal region
+        * each ICID has its own set of addresses so that writing to that
+        * specific address identifies the ICID. In the Process Window Mode
+        * region the ICID is given in the data written to the doorbell. The
+        * above per PF register denotes the offset in the doorbell BAR in which
+        * the PWM region begins.
+        * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
+        * non-PWM connection. The calculation below computes the total non-PWM
+        * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
+        * in units of 4,096 bytes.
+        */
+       non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+           ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+                                         OSAL_NULL) +
+           ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
+       norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+       min_addr_reg1 = norm_regsize / 4096;
+       pwm_regsize = db_bar_size - norm_regsize;
+
+       /* Check that the normal and PWM sizes are valid */
+       if (db_bar_size < norm_regsize) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Doorbell BAR size 0x%x is too"
+                      " small (normal region is 0x%0x )\n",
+                      db_bar_size, norm_regsize);
+               return ECORE_NORESOURCES;
+       }
+       if (pwm_regsize < ECORE_MIN_PWM_REGION) {
+               DP_ERR(p_hwfn->p_dev,
+                      "PWM region size 0x%0x is too small."
+                      " Should be at least 0x%0x (Doorbell BAR size"
+                      " is 0x%x and normal region size is 0x%0x)\n",
+                      pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
+                      norm_regsize);
+               return ECORE_NORESOURCES;
+       }
+
+       /* Calculate number of DPIs */
+       roce_edpm_mode = p_hwfn->pf_params.roce_pf_params.roce_edpm_mode;
+       if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
+           ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
+               /* Either EDPM is mandatory, or we are attempting to allocate a
+                * WID per CPU.
+                */
+               n_cpus = OSAL_NUM_ACTIVE_CPU();
+               rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+       }
+
+       if (((rc) && (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+           (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE)) {
+               /* Either EDPM is disabled, or it is not mandatory and we failed
+                * allocated a WID per CPU.
+                */
+               n_cpus = 1;
+               rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+       }
+
+       /* Check return codes from above calls */
+       if (rc) {
+               DP_ERR(p_hwfn,
+                      "Failed to allocate enough DPIs. Allocated %d but the"
+                       "current minimum is %d. You can try reducing this"
+                       "down to %d via user configuration n_dpi or by"
+                       " disabling EDPM via user configuration roce_edpm\n",
+                      p_hwfn->dpi_count,
+                      p_hwfn->pf_params.roce_pf_params.min_dpis,
+                      ECORE_MIN_DPIS);
+               return ECORE_NORESOURCES;
+       }
+
+       /* Update hwfn */
+       p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
+                                                 * calculate the doorbell
+                                                 * address
+                                                 */
+
+       /* Update registers */
+       /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+       pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+       DP_INFO(p_hwfn,
+               "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n",
+               db_bar_size, norm_regsize, pwm_regsize);
+       DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size,
+               p_hwfn->dpi_count);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
+                struct ecore_ptt *p_ptt,
+                struct ecore_tunn_start_params *p_tunn,
+                int hw_mode,
+                bool b_hw_start,
+                enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u8 rel_pf_id = p_hwfn->rel_pf_id;
+       u32 prs_reg;
+       u16 ctrl;
+       int pos;
+
+       /* ILT/DQ/CM/QM */
+       if (p_hwfn->mcp_info) {
+               struct ecore_mcp_function_info *p_info;
+
+               p_info = &p_hwfn->mcp_info->func_info;
+               if (p_info->bandwidth_min)
+                       p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+               /* Update rate limit once we'll actually have a link */
+               p_hwfn->qm_info.pf_rl = 100;
+       }
+       ecore_cxt_hw_init_pf(p_hwfn);
+
+       ecore_int_igu_init_rt(p_hwfn);  /* @@@TBD TODO MichalS multi hwfn ?? */
+
+       /* Set VLAN in NIG if needed */
+       if (hw_mode & (1 << MODE_MF_SD)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+                            p_hwfn->hw_info.ovlan);
+       }
+
+       /* Enable classification by MAC if needed */
+       if (hw_mode & (1 << MODE_MF_SI)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                          "Configuring TAGMAC_CLS_TYPE\n");
+               STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
+                            1);
+       }
+
+       /* Protocl Configuration  - @@@TBD - should we set 0 otherwise? */
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+                    (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
+       STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+       /* perform debug configuration when chip is out of reset */
+       OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+       /* Cleanup chip from previous driver if such remains exist */
+       rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
+       if (rc != ECORE_SUCCESS) {
+               ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+               return rc;
+       }
+
+       /* PF Init sequence */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* QM_PF Init sequence (may be invoked seperately e.g. for DCB) */
+       rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+       if (rc)
+               return rc;
+
+       /* Pure runtime initializations - directly to the HW  */
+       ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+       /* PCI relaxed ordering causes a decrease in the performance on some
+        * systems. Till a root cause is found, disable this attribute in the
+        * PCI config space.
+        */
+#if 0                          /* @DPDK */
+       pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+       if (!pos) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to find the PCI Express"
+                         " Capability structure in the PCI config space\n");
+               return ECORE_IO;
+       }
+       OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+       ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+       OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);
+#endif /* @DPDK */
+
+#ifndef ASIC_ONLY
+       /*@@TMP - On B0 build 1, need to mask the datapath_registers parity */
+       if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev) &&
+           (p_hwfn->p_dev->chip_metal == 1)) {
+               u32 reg_addr, tmp;
+
+               reg_addr =
+                   attn_blocks[BLOCK_PGLUE_B].
+                   chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].prty_regs[0]->
+                   mask_addr;
+               DP_NOTICE(p_hwfn, false,
+                         "Masking datapath registers parity on"
+                         " B0 emulation [build 1]\n");
+               tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
+               tmp |= (1 << 0);        /* Was PRTY_MASK_DATAPATH_REGISTERS */
+               ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
+       }
+#endif
+
+       rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+       if (b_hw_start) {
+               /* enable interrupts */
+               ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+               /* send function start command */
+               rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+                                      allow_npar_tx_switch);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Function start ramrod failed\n");
+               } else {
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH register after start PFn\n");
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt,
+                                          PRS_REG_SEARCH_TCP_FIRST_FRAG);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+                                  prs_reg);
+                       prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+                                  "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+               }
+       }
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, u8 enable)
+{
+       u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+
+       /* Change PF in PXP */
+       ecore_wr(p_hwfn, p_ptt,
+                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+       /* wait until value is set - try for 1 second every 50us */
+       for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+               val = ecore_rd(p_hwfn, p_ptt,
+                              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+               if (val == set_val)
+                       break;
+
+               OSAL_UDELAY(50);
+       }
+
+       if (val != set_val) {
+               DP_NOTICE(p_hwfn, true,
+                         "PFID_ENABLE_MASTER wasn't changed after a second\n");
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_main_ptt)
+{
+       /* Read shadow of current MFW mailbox */
+       ecore_mcp_read_mb(p_hwfn, p_main_ptt);
+       OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
+                   p_hwfn->mcp_info->mfw_mb_cur,
+                   p_hwfn->mcp_info->mfw_mb_length);
+}
+
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+                                  struct ecore_tunn_start_params *p_tunn,
+                                  bool b_hw_start,
+                                  enum ecore_int_mode int_mode,
+                                  bool allow_npar_tx_switch,
+                                  const u8 *bin_fw_data)
+{
+       enum _ecore_status_t rc, mfw_rc;
+       u32 load_code, param;
+       int i, j;
+
+       if (IS_PF(p_dev)) {
+               rc = ecore_init_fw_data(p_dev, bin_fw_data);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               if (IS_VF(p_dev)) {
+                       rc = ecore_vf_pf_init(p_hwfn);
+                       if (rc)
+                               return rc;
+                       continue;
+               }
+
+               /* Enable DMAE in PXP */
+               rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+
+               ecore_calc_hw_mode(p_hwfn);
+               /* @@@TBD need to add here:
+                * Check for fan failure
+                * Prev_unload
+                */
+               rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed sending LOAD_REQ command\n");
+                       return rc;
+               }
+
+               /* CQ75580:
+                * When comming back from hiberbate state, the registers from
+                * which shadow is read initially are not initialized. It turns
+                * out that these registers get initialized during the call to
+                * ecore_mcp_load_req request. So we need to reread them here
+                * to get the proper shadow register value.
+                * Note: This is a workaround for the missinginig MFW
+                * initialization. It may be removed once the implementation
+                * is done.
+                */
+               ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "Load request was sent.Resp:0x%x, Load code: 0x%x\n",
+                          rc, load_code);
+
+               /* Only relevant for recovery:
+                * Clear the indication after the LOAD_REQ command is responded
+                * by the MFW.
+                */
+               p_dev->recov_in_prog = false;
+
+               p_hwfn->first_on_engine = (load_code ==
+                                          FW_MSG_CODE_DRV_LOAD_ENGINE);
+
+               switch (load_code) {
+               case FW_MSG_CODE_DRV_LOAD_ENGINE:
+                       rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+                                                 p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+                       /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_PORT:
+                       rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+                                               p_hwfn->hw_info.hw_mode);
+                       if (rc)
+                               break;
+
+                       if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
+                               struct init_nig_pri_tc_map_req tc_map;
+
+                               OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
+
+                               /* remove this once flow control is
+                                * implemented
+                                */
+                               for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
+                                       tc_map.pri[j].tc_id = 0;
+                                       tc_map.pri[j].valid = 1;
+                               }
+                               ecore_init_nig_pri_tc_map(p_hwfn,
+                                                         p_hwfn->p_main_ptt,
+                                                         &tc_map);
+                       }
+                       /* Fall into */
+               case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+                       rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+                                             p_tunn, p_hwfn->hw_info.hw_mode,
+                                             b_hw_start, int_mode,
+                                             allow_npar_tx_switch);
+                       break;
+               default:
+                       rc = ECORE_NOTIMPL;
+                       break;
+               }
+
+               if (rc != ECORE_SUCCESS)
+                       DP_NOTICE(p_hwfn, true,
+                                 "init phase failed loadcode 0x%x (rc %d)\n",
+                                 load_code, rc);
+
+               /* ACK mfw regardless of success or failure of initialization */
+               mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                      DRV_MSG_CODE_LOAD_DONE,
+                                      0, &load_code, &param);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+               if (mfw_rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed sending LOAD_DONE command\n");
+                       return mfw_rc;
+               }
+
+               /* send DCBX attention request command */
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+                          "sending phony dcbx set command to trigger DCBx"
+                          " attention handling\n");
+               mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                      DRV_MSG_CODE_SET_DCBX,
+                                      1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+                                      &load_code, &param);
+               if (mfw_rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to send DCBX attention request\n");
+                       return mfw_rc;
+               }
+
+               p_hwfn->hw_init_done = true;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+#define ECORE_HW_STOP_RETRY_LIMIT      (10)
+static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
+                                            struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt)
+{
+       int i;
+
+       /* close timers */
+       ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+       ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+       for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
+                                       !p_dev->recov_in_prog; i++) {
+               if ((!ecore_rd(p_hwfn, p_ptt,
+                              TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+                   (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+                       break;
+
+               /* Dependent on number of connection/tasks, possibly
+                * 1ms sleep is required between polls
+                */
+               OSAL_MSLEEP(1);
+       }
+       if (i == ECORE_HW_STOP_RETRY_LIMIT)
+               DP_NOTICE(p_hwfn, true,
+                         "Timers linear scans are not over"
+                         " [Connection %02x Tasks %02x]\n",
+                         (u8) ecore_rd(p_hwfn, p_ptt,
+                                       TM_REG_PF_SCAN_ACTIVE_CONN),
+                         (u8) ecore_rd(p_hwfn, p_ptt,
+                                       TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
+{
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+               struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+       }
+}
+
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+               struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
+
+               if (IS_VF(p_dev)) {
+                       ecore_vf_pf_int_cleanup(p_hwfn);
+                       continue;
+               }
+
+               /* mark the hw as uninitialized... */
+               p_hwfn->hw_init_done = false;
+
+               rc = ecore_sp_pf_stop(p_hwfn);
+               if (rc)
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to close PF against FW. Continue to"
+                                 " stop HW to prevent illegal host access"
+                                 " by the device\n");
+
+               /* perform debug action after PF stop was sent */
+               OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
+
+               /* close NIG to BRB gate */
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               /* close parser */
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               /* @@@TBD - clean transmission queues (5.b) */
+               /* @@@TBD - clean BTB (5.c) */
+
+               ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
+
+               /* @@@TBD - verify DMAE requests are done (8) */
+
+               /* Disable Attention Generation */
+               ecore_int_igu_disable_int(p_hwfn, p_ptt);
+               ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+               ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+               ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               OSAL_MSLEEP(1);
+       }
+
+       if (IS_PF(p_dev)) {
+               /* Disable DMAE in PXP - in CMT, this should only be done for
+                * first hw-function, and only after all transactions have
+                * stopped for all active hw-functions.
+                */
+               t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
+                                            p_dev->hwfns[0].p_main_ptt, false);
+               if (t_rc != ECORE_SUCCESS)
+                       rc = t_rc;
+       }
+
+       return rc;
+}
+
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+{
+       int j;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+               struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+               if (IS_VF(p_dev)) {
+                       ecore_vf_pf_int_cleanup(p_hwfn);
+                       continue;
+               }
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+                          "Shutting down the fastpath\n");
+
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+               /* @@@TBD - clean transmission queues (5.b) */
+               /* @@@TBD - clean BTB (5.c) */
+
+               /* @@@TBD - verify DMAE requests are done (8) */
+
+               ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+               /* Need to wait 1ms to guarantee SBs are cleared */
+               OSAL_MSLEEP(1);
+       }
+}
+
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return;
+
+       /* If roce info is allocated it means roce is initialized and should
+        * be enabled in searcher.
+        */
+       if (p_hwfn->p_roce_info) {
+               if (p_hwfn->b_roce_enabled_in_prs)
+                       ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x1);
+               ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1);
+       }
+
+       /* Re-open incoming traffic */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+}
+
+static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt, u32 reg,
+                                            bool expected)
+{
+       u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
+
+       if (assert_val != expected) {
+               DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
+                         reg, expected);
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       return 0;
+}
+
+enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 unload_resp, unload_param;
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               if (IS_VF(p_dev)) {
+                       rc = ecore_vf_pf_reset(p_hwfn);
+                       if (rc)
+                               return rc;
+                       continue;
+               }
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
+
+               /* Check for incorrect states */
+               if (!p_dev->recov_in_prog) {
+                       ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                                        QM_REG_USG_CNT_PF_TX, 0);
+                       ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
+                                        QM_REG_USG_CNT_PF_OTHER, 0);
+                       /* @@@TBD - assert on incorrect xCFC values (10.b) */
+               }
+
+               /* Disable PF in HW blocks */
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        TCFC_REG_STRONG_ENABLE_PF, 0);
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        CCFC_REG_STRONG_ENABLE_PF, 0);
+
+               if (p_dev->recov_in_prog) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+                                  "Recovery is in progress -> skip"
+                                  "sending unload_req/done\n");
+                       break;
+               }
+
+               /* Send unload command to MCP */
+               rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                  DRV_MSG_CODE_UNLOAD_REQ,
+                                  DRV_MB_PARAM_UNLOAD_WOL_MCP,
+                                  &unload_resp, &unload_param);
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "ecore_hw_reset: UNLOAD_REQ failed\n");
+                       /* @@TBD - what to do? for now, assume ENG. */
+                       unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
+               }
+
+               rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                  DRV_MSG_CODE_UNLOAD_DONE,
+                                  0, &unload_resp, &unload_param);
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn,
+                                 true, "ecore_hw_reset: UNLOAD_DONE failed\n");
+                       /* @@@TBD - Should it really ASSERT here ? */
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
+{
+       ecore_ptt_pool_free(p_hwfn);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
+}
+
+/* Setup bar access */
+static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
+{
+       /* clear indirect access */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+
+       /* Clean Previous errors if such exist */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+
+       /* enable internal target-read */
+       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct ecore_hwfn *p_hwfn)
+{
+       /* ME Register */
+       p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+                                                 PXP_PF_ME_OPAQUE_ADDR);
+
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+       /* Bits 16-19 from the ME registers are the pf_num */
+       /* @@ @TBD - check, may be wrong after B0 implementation for CMT */
+       p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+       p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                     PXP_CONCRETE_FID_PFID);
+       p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+                                   PXP_CONCRETE_FID_PORT);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                  "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+                  p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
+{
+       u32 *feat_num = p_hwfn->hw_info.feat_num;
+       int num_features = 1;
+
+#ifdef CONFIG_ECORE_ROCE
+       /* Roce CNQ require each: 1 status block. 1 CNQ, we divide the
+        * status blocks equally between L2 / RoCE but with consideration as
+        * to how many l2 queues / cnqs we have
+        */
+       if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+               num_features++;
+
+               feat_num[ECORE_ROCE_CNQ] =
+                   OSAL_MIN_T(u32,
+                              RESC_NUM(p_hwfn, ECORE_SB) / num_features,
+                              RESC_NUM(p_hwfn, ECORE_ROCE_CNQ_RAM));
+       }
+#endif
+
+       /* L2 Queues require each: 1 status block. 1 L2 queue */
+       feat_num[ECORE_PF_L2_QUE] =
+           OSAL_MIN_T(u32,
+                      RESC_NUM(p_hwfn, ECORE_SB) / num_features,
+                      RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                  "#PF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
+                  feat_num[ECORE_PF_L2_QUE],
+                  feat_num[ECORE_ROCE_CNQ],
+                  RESC_NUM(p_hwfn, ECORE_SB), num_features);
+}
+
+/* @@@TBD MK RESC: This info is currently hard code and set as if we were MF
+ * need to read it from shmem...
+ */
+static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
+{
+       u32 *resc_start = p_hwfn->hw_info.resc_start;
+       u8 num_funcs = p_hwfn->num_funcs_on_engine;
+       u32 *resc_num = p_hwfn->hw_info.resc_num;
+       int i, max_vf_vlan_filters;
+       struct ecore_sb_cnt_info sb_cnt_info;
+       bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+#ifndef ASIC_ONLY
+       /* For AH, an equal share of the ILT lines between the maximal number of
+        * PFs is not enough for RoCE. This would be solved by the future
+        * resource allocation scheme, but isn't currently present for
+        * FPGA/emulation. For now we keep a number that is sufficient for RoCE
+        * to work - the BB number of ILT lines divided by its max PFs number.
+        */
+       u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
+#endif
+
+       OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_ECORE_SRIOV
+       max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+       max_vf_vlan_filters = 0;
+#endif
+
+       ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+       resc_num[ECORE_SB] = OSAL_MIN_T(u32,
+                                       (MAX_SB_PER_PATH_BB / num_funcs),
+                                       sb_cnt_info.sb_cnt);
+
+       resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+                                   MAX_NUM_L2_QUEUES_BB) / num_funcs;
+       resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 :
+                                MAX_NUM_VPORTS_BB) / num_funcs;
+       resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+                                  ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+       resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+                             MAX_QM_TX_QUEUES_BB) / num_funcs;
+       resc_num[ECORE_RL] = 8;
+       resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
+       resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS -
+                               max_vf_vlan_filters +
+                               1 /*For vlan0 */) / num_funcs;
+
+       /* TODO - there will be a problem in AH - there are only 11k lines */
+       resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+                              PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+       resc_num[ECORE_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+
+#ifdef CONFIG_ECORE_ROCE
+       /* CNQ / CMDQS are actually the same resource... we separate
+        * the name for clarity, but providing the same equation for
+        * calculation assures no overlap between functions.
+        */
+       resc_num[ECORE_ROCE_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
+       resc_num[ECORE_CMDQS_CQS] = NUM_OF_CMDQS_CQS / num_funcs;
+#endif
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               /* Reduced build contains less PQs */
+               if (!(p_hwfn->p_dev->b_is_emul_full))
+                       resc_num[ECORE_PQ] = 32;
+
+               /* For AH emulation, since we have a possible maximal number of
+                * 16 enabled PFs, in case there are not enough ILT lines -
+                * allocate only first PF as RoCE and have all the other ETH
+                * only with less ILT lines.
+                */
+               if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
+                       resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
+                                                        resc_num[ECORE_ILT],
+                                                        roce_min_ilt_lines);
+       }
+#endif
+
+       for (i = 0; i < ECORE_MAX_RESC; i++)
+               resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+
+#ifndef ASIC_ONLY
+       /* Correct the common ILT calculation if PF0 has more */
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
+           p_hwfn->p_dev->b_is_emul_full &&
+           p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)
+               resc_start[ECORE_ILT] += roce_min_ilt_lines -
+                   resc_num[ECORE_ILT];
+#endif
+
+       /* Sanity for ILT */
+       if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+           (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+               DP_NOTICE(p_hwfn, true,
+                         "Can't assign ILT pages [%08x,...,%08x]\n",
+                         RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
+                                                                 ECORE_ILT) -
+                         1);
+               return ECORE_INVAL;
+       }
+
+       ecore_hw_set_feat(p_hwfn);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+                  "The numbers for each resource are:\n"
+                  "SB = %d start = %d\n"
+                  "L2_QUEUE = %d start = %d\n"
+                  "VPORT = %d start = %d\n"
+                  "PQ = %d start = %d\n"
+                  "RL = %d start = %d\n"
+                  "MAC = %d start = %d\n"
+                  "VLAN = %d start = %d\n"
+                  "CQ_CNQ_RAM = %d start = %d\n"
+                  "ILT = %d start = %d\n"
+                  "LL2_QUEUE = %d start = %d\n"
+                  "CMDQS_CQS = %d start = %d\n",
+                  RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB),
+                  RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
+                  RESC_START(p_hwfn, ECORE_L2_QUEUE),
+                  RESC_NUM(p_hwfn, ECORE_VPORT),
+                  RESC_START(p_hwfn, ECORE_VPORT),
+                  RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ),
+                  RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL),
+                  RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC),
+                  RESC_NUM(p_hwfn, ECORE_VLAN),
+                  RESC_START(p_hwfn, ECORE_VLAN),
+                  RESC_NUM(p_hwfn, ECORE_ROCE_CNQ_RAM),
+                  RESC_START(p_hwfn, ECORE_ROCE_CNQ_RAM),
+                  RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT),
+                  RESC_NUM(p_hwfn, ECORE_LL2_QUEUE),
+                  RESC_START(p_hwfn, ECORE_LL2_QUEUE),
+                  RESC_NUM(p_hwfn, ECORE_CMDQS_CQS),
+                  RESC_START(p_hwfn, ECORE_CMDQS_CQS));
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt)
+{
+       u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+       u32 port_cfg_addr, link_temp, device_capabilities;
+       struct ecore_mcp_link_params *link;
+
+       /* Read global nvm_cfg address */
+       u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+       /* Verify MCP has initialized it */
+       if (nvm_cfg_addr == 0) {
+               DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+               return ECORE_INVAL;
+       }
+
+       /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
+       nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, glob)+OFFSETOF(struct nvm_cfg1_glob,
+                                                    core_cfg);
+
+       core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
+
+       switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+               NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
+               break;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+               p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
+                         core_cfg);
+               break;
+       }
+
+       /* Read default link configuration */
+       link = &p_hwfn->mcp_info->link_input;
+       port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+       link_temp = ecore_rd(p_hwfn, p_ptt,
+                            port_cfg_addr +
+                            OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
+       link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+       link->speed.advertised_speeds = link_temp;
+
+       link_temp = link->speed.advertised_speeds;
+       p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+
+       link_temp = ecore_rd(p_hwfn, p_ptt,
+                            port_cfg_addr +
+                            OFFSETOF(struct nvm_cfg1_port, link_settings));
+       switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+               NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+               link->speed.autoneg = true;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+               link->speed.forced_speed = 1000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+               link->speed.forced_speed = 10000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+               link->speed.forced_speed = 25000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+               link->speed.forced_speed = 40000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+               link->speed.forced_speed = 50000;
+               break;
+       case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+               link->speed.forced_speed = 100000;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
+       }
+
+       link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
+       link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
+       link->pause.autoneg = !!(link_temp &
+                                 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+       link->pause.forced_rx = !!(link_temp &
+                                   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+       link->pause.forced_tx = !!(link_temp &
+                                   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+       link->loopback_mode = 0;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
+                  " AN: 0x%02x, PAUSE AN: 0x%02x\n",
+                  link->speed.forced_speed, link->speed.advertised_speeds,
+                  link->speed.autoneg, link->pause.autoneg);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, glob) +
+           OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+
+       generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
+
+       mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+           NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+       switch (mf_mode) {
+       case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+               p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+               p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
+               break;
+       case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+               p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
+               break;
+       }
+       DP_INFO(p_hwfn, "Multi function mode is %08x\n",
+               p_hwfn->p_dev->mf_mode);
+
+       /* Read Multi-function information from shmem */
+       addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+           OFFSETOF(struct nvm_cfg1, glob) +
+           OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+
+       device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+               OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
+                            &p_hwfn->hw_info.device_capabilities);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+               OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
+                            &p_hwfn->hw_info.device_capabilities);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+               OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
+                            &p_hwfn->hw_info.device_capabilities);
+       if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
+               OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
+                            &p_hwfn->hw_info.device_capabilities);
+
+       return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt)
+{
+       u8 num_funcs;
+       u32 tmp, mask;
+
+       num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2
+           : MAX_NUM_PFS_BB;
+
+       /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+        * in the other bits are selected.
+        * Bits 1-15 are for functions 1-15, respectively, and their value is
+        * '0' only for enabled functions (function 0 always exists and
+        * enabled).
+        * In case of CMT, only the "even" functions are enabled, and thus the
+        * number of functions for both hwfns is learnt from the same bits.
+        */
+
+       tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+       if (tmp & 0x1) {
+               if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) {
+                       num_funcs = 0;
+                       mask = 0xaaaa;
+               } else {
+                       num_funcs = 1;
+                       mask = 0x5554;
+               }
+
+               tmp = (tmp ^ 0xffffffff) & mask;
+               while (tmp) {
+                       if (tmp & 0x1)
+                               num_funcs++;
+                       tmp >>= 0x1;
+               }
+       }
+
+       p_hwfn->num_funcs_on_engine = num_funcs;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: Limit number of PFs to 4 [would affect"
+                         " resource allocation, needed for IOV]\n");
+               p_hwfn->num_funcs_on_engine = 4;
+       }
+#endif
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n",
+                  p_hwfn->num_funcs_on_engine);
+}
+
+static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+       u32 port_mode;
+
+#ifndef ASIC_ONLY
+       /* Read the port mode */
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               port_mode = 4;
+       else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
+                (p_hwfn->p_dev->num_hwfns > 1))
+               /* In CMT on emulation, assume 1 port */
+               port_mode = 1;
+       else
+#endif
+               port_mode = ecore_rd(p_hwfn, p_ptt,
+                                    CNIG_REG_NW_PORT_MODE_BB_B0);
+
+       if (port_mode < 3) {
+               p_hwfn->p_dev->num_ports_in_engines = 1;
+       } else if (port_mode <= 5) {
+               p_hwfn->p_dev->num_ports_in_engines = 2;
+       } else {
+               DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
+                         p_hwfn->p_dev->num_ports_in_engines);
+
+               /* Default num_ports_in_engines to something */
+               p_hwfn->p_dev->num_ports_in_engines = 1;
+       }
+}
+
+static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+       u32 port;
+       int i;
+
+       p_hwfn->p_dev->num_ports_in_engines = 0;
+
+       for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+               port = ecore_rd(p_hwfn, p_ptt,
+                               CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+               if (port & 1)
+                       p_hwfn->p_dev->num_ports_in_engines++;
+       }
+}
+
+static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt)
+{
+       if (ECORE_IS_BB(p_hwfn->p_dev))
+               ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
+       else
+               ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t
+ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
+                 struct ecore_ptt *p_ptt,
+                 enum ecore_pci_personality personality)
+{
+       enum _ecore_status_t rc;
+
+       rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc)
+               return rc;
+
+       /* TODO In get_hw_info, amoungst others:
+        * Get MCP FW revision and determine according to it the supported
+        * featrues (e.g. DCB)
+        * Get boot mode
+        * ecore_get_pcie_width_speed, WOL capability.
+        * Number of global CQ-s (for storage
+        */
+       ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+#endif
+               ecore_hw_get_nvm_info(p_hwfn, p_ptt);
+
+       rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
+       if (rc)
+               return rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
+#endif
+               OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
+                           p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
+#ifndef ASIC_ONLY
+       } else {
+               static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
+
+               OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
+               p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
+       }
+#endif
+
+       if (ecore_mcp_is_init(p_hwfn)) {
+               if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
+                       p_hwfn->hw_info.ovlan =
+                           p_hwfn->mcp_info->func_info.ovlan;
+
+               ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+
+       if (personality != ECORE_PCI_DEFAULT)
+               p_hwfn->hw_info.personality = personality;
+       else if (ecore_mcp_is_init(p_hwfn))
+               p_hwfn->hw_info.personality =
+                   p_hwfn->mcp_info->func_info.protocol;
+
+#ifndef ASIC_ONLY
+       /* To overcome ILT lack for emulation, until at least until we'll have
+        * a definite answer from system about it, allow only PF0 to be RoCE.
+        */
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+               if (!p_hwfn->rel_pf_id)
+                       p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
+               else
+                       p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+       }
+#endif
+
+       ecore_get_num_funcs(p_hwfn, p_ptt);
+
+       /* Feat num is dependent on personality and on the number of functions
+        * on the engine. Therefore it should be come after personality
+        * initialization and after getting the number of functions.
+        */
+       return ecore_hw_get_resc(p_hwfn);
+}
+
+/* @TMP - this should move to a proper .h */
+#define CHIP_NUM_AH                    0x8070
+
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 tmp;
+
+       /* Read Vendor Id / Device Id */
+       OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
+                                 &p_dev->vendor_id);
+       OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
+                                 &p_dev->device_id);
+
+       p_dev->chip_num = (u16) ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                        MISCS_REG_CHIP_NUM);
+       p_dev->chip_rev = (u16) ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                        MISCS_REG_CHIP_REV);
+
+       MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+
+       /* Determine type */
+       if (p_dev->device_id == CHIP_NUM_AH)
+               p_dev->type = ECORE_DEV_TYPE_AH;
+       else
+               p_dev->type = ECORE_DEV_TYPE_BB;
+
+       /* Learn number of HW-functions */
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                      MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+       if (tmp & (1 << p_hwfn->rel_pf_id)) {
+               DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
+               p_dev->num_hwfns = 2;
+       } else {
+               p_dev->num_hwfns = 1;
+       }
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_dev)) {
+               /* For some reason we have problems with this register
+                * in B0 emulation; Simply assume no CMT
+                */
+               DP_NOTICE(p_dev->hwfns, false,
+                         "device on emul - assume no CMT\n");
+               p_dev->num_hwfns = 1;
+       }
+#endif
+
+       p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                      MISCS_REG_CHIP_TEST_REG) >> 4;
+       MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
+       p_dev->chip_metal = (u16) ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                          MISCS_REG_CHIP_METAL);
+       MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+       DP_INFO(p_dev->hwfns,
+               "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
+               " Metal: %04x\n",
+               ECORE_IS_BB(p_dev) ? "BB" : "AH",
+               CHIP_REV_IS_A0(p_dev) ? 0 : 1,
+               p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
+               p_dev->chip_metal);
+
+       if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+               DP_NOTICE(p_dev->hwfns, false,
+                         "The chip type/rev (BB A0) is not supported!\n");
+               return ECORE_ABORTED;
+       }
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+
+       if (CHIP_REV_IS_EMUL(p_dev)) {
+               tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                              MISCS_REG_ECO_RESERVED);
+               if (tmp & (1 << 29)) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Emulation: Running on a FULL build\n");
+                       p_dev->b_is_emul_full = true;
+               } else {
+                       DP_NOTICE(p_hwfn, false,
+                                 "Emulation: Running on a REDUCED build\n");
+               }
+       }
+#endif
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_prepare_hibernate(struct ecore_dev *p_dev)
+{
+       int j;
+
+       if (IS_VF(p_dev))
+               return;
+
+       for_each_hwfn(p_dev, j) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
+                          "Mark hw/fw uninitialized\n");
+
+               p_hwfn->hw_init_done = false;
+               p_hwfn->first_on_engine = false;
+       }
+}
+
+static enum _ecore_status_t
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+                       void OSAL_IOMEM *p_regview,
+                       void OSAL_IOMEM *p_doorbells,
+                       enum ecore_pci_personality personality)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Split PCI bars evenly between hwfns */
+       p_hwfn->regview = p_regview;
+       p_hwfn->doorbells = p_doorbells;
+
+       /* Validate that chip access is feasible */
+       if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+               DP_ERR(p_hwfn,
+                      "Reading the ME register returns all Fs;"
+                      " Preventing further chip access\n");
+               return ECORE_INVAL;
+       }
+
+       get_function_id(p_hwfn);
+
+       /* Allocate PTT pool */
+       rc = ecore_ptt_pool_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+               goto err0;
+       }
+
+       /* Allocate the main PTT */
+       p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+       /* First hwfn learns basic information, e.g., number of hwfns */
+       if (!p_hwfn->my_id) {
+               rc = ecore_get_dev_info(p_hwfn->p_dev);
+               if (rc != ECORE_SUCCESS)
+                       goto err1;
+       }
+
+       ecore_hw_hwfn_prepare(p_hwfn);
+
+       /* Initialize MCP structure */
+       rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+               goto err1;
+       }
+
+       /* Read the device configuration information from the HW and SHMEM */
+       rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+               goto err2;
+       }
+
+       /* Allocate the init RT array and initialize the init-ops engine */
+       rc = ecore_init_alloc(p_hwfn);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+               goto err2;
+       }
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: workaround; Prevent DMAE parities\n");
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
+
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: workaround: Set VF bar0 size\n");
+               ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                        PGLUE_B_REG_VF_BAR0_SIZE, 4);
+       }
+#endif
+
+       return rc;
+err2:
+       ecore_mcp_free(p_hwfn);
+err1:
+       ecore_hw_hwfn_free(p_hwfn);
+err0:
+       return rc;
+}
+
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       enum _ecore_status_t rc;
+
+       if (IS_VF(p_dev))
+               return ecore_vf_hw_prepare(p_dev);
+
+       /* Store the precompiled init data ptrs */
+       ecore_init_iro_array(p_dev);
+
+       /* Initialize the first hwfn - will learn number of hwfns */
+       rc = ecore_hw_prepare_single(p_hwfn,
+                                    p_dev->regview,
+                                    p_dev->doorbells, personality);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       personality = p_hwfn->hw_info.personality;
+
+       /* initilalize 2nd hwfn if necessary */
+       if (p_dev->num_hwfns > 1) {
+               void OSAL_IOMEM *p_regview, *p_doorbell;
+               u8 OSAL_IOMEM *addr;
+
+               /* adjust bar offset for second engine */
+               addr = (u8 OSAL_IOMEM *) p_dev->regview +
+                   ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+               p_regview = (void OSAL_IOMEM *)addr;
+
+               addr = (u8 OSAL_IOMEM *) p_dev->doorbells +
+                   ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+               p_doorbell = (void OSAL_IOMEM *)addr;
+
+               /* prepare second hw function */
+               rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
+                                            p_doorbell, personality);
+
+               /* in case of error, need to free the previously
+                * initiliazed hwfn 0
+                */
+               if (rc != ECORE_SUCCESS) {
+                       ecore_init_free(p_hwfn);
+                       ecore_mcp_free(p_hwfn);
+                       ecore_hw_hwfn_free(p_hwfn);
+                       return rc;
+               }
+       }
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_hw_remove(struct ecore_dev *p_dev)
+{
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               if (IS_VF(p_dev)) {
+                       ecore_vf_pf_release(p_hwfn);
+                       continue;
+               }
+
+               ecore_init_free(p_hwfn);
+               ecore_hw_hwfn_free(p_hwfn);
+               ecore_mcp_free(p_hwfn);
+
+               OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+       }
+}
+
+static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
+                                     struct ecore_chain *p_chain)
+{
+       void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
+       dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+       struct ecore_chain_next *p_next;
+       u32 size, i;
+
+       if (!p_virt)
+               return;
+
+       size = p_chain->elem_size * p_chain->usable_per_page;
+
+       for (i = 0; i < p_chain->page_cnt; i++) {
+               if (!p_virt)
+                       break;
+
+               p_next = (struct ecore_chain_next *)((u8 *) p_virt + size);
+               p_virt_next = p_next->next_virt;
+               p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+               OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
+                                      ECORE_CHAIN_PAGE_SIZE);
+
+               p_virt = p_virt_next;
+               p_phys = p_phys_next;
+       }
+}
+
+static void ecore_chain_free_single(struct ecore_dev *p_dev,
+                                   struct ecore_chain *p_chain)
+{
+       if (!p_chain->p_virt_addr)
+               return;
+
+       OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
+                              p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
+}
+
+static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
+                                struct ecore_chain *p_chain)
+{
+       void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+       u8 *p_pbl_virt = (u8 *) p_chain->pbl.p_virt_table;
+       u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+
+       if (!pp_virt_addr_tbl)
+               return;
+
+       if (!p_chain->pbl.p_virt_table)
+               goto out;
+
+       for (i = 0; i < page_cnt; i++) {
+               if (!pp_virt_addr_tbl[i])
+                       break;
+
+               OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
+                                      *(dma_addr_t *) p_pbl_virt,
+                                      ECORE_CHAIN_PAGE_SIZE);
+
+               p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+       }
+
+       pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+       OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
+                              p_chain->pbl.p_phys_table, pbl_size);
+out:
+       OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+       switch (p_chain->mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               ecore_chain_free_next_ptr(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               ecore_chain_free_single(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               ecore_chain_free_pbl(p_dev, p_chain);
+               break;
+       }
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
+                              enum ecore_chain_cnt_type cnt_type,
+                              osal_size_t elem_size, u32 page_cnt)
+{
+       u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+       /* The actual chain size can be larger than the maximal possible value
+        * after rounding up the requested elements number to pages, and after
+        * taking into acount the unusuable elements (next-ptr elements).
+        * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+        * size/capacity fields are of a u32 type.
+        */
+       if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
+            chain_size > ((u32) ECORE_U16_MAX + 1)) ||
+           (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
+            chain_size > ECORE_U32_MAX)) {
+               DP_NOTICE(p_dev, true,
+                         "The actual chain size (0x%lx) is larger than"
+                         " the maximal possible value\n",
+                         chain_size);
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain 
*p_chain)
+{
+       void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
+       dma_addr_t p_phys = 0;
+       u32 i;
+
+       for (i = 0; i < p_chain->page_cnt; i++) {
+               p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+                                                ECORE_CHAIN_PAGE_SIZE);
+               if (!p_virt) {
+                       DP_NOTICE(p_dev, true,
+                                 "Failed to allocate chain memory\n");
+                       return ECORE_NOMEM;
+               }
+
+               if (i == 0) {
+                       ecore_chain_init_mem(p_chain, p_virt, p_phys);
+                       ecore_chain_reset(p_chain);
+               } else {
+                       ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+                                                      p_virt, p_phys);
+               }
+
+               p_virt_prev = p_virt;
+       }
+       /* Last page's next element should point to the beginning of the
+        * chain.
+        */
+       ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+                                      p_chain->p_virt_addr,
+                                      p_chain->p_phys_addr);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
+{
+       void *p_virt = OSAL_NULL;
+       dma_addr_t p_phys = 0;
+
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
+       if (!p_virt) {
+               DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+               return ECORE_NOMEM;
+       }
+
+       ecore_chain_init_mem(p_chain, p_virt, p_phys);
+       ecore_chain_reset(p_chain);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+                                                 struct ecore_chain *p_chain)
+{
+       void *p_virt = OSAL_NULL;
+       u8 *p_pbl_virt = OSAL_NULL;
+       void **pp_virt_addr_tbl = OSAL_NULL;
+       dma_addr_t p_phys = 0, p_pbl_phys = 0;
+       u32 page_cnt = p_chain->page_cnt, size, i;
+
+       size = page_cnt * sizeof(*pp_virt_addr_tbl);
+       pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
+       if (!pp_virt_addr_tbl) {
+               DP_NOTICE(p_dev, true,
+                         "Failed to allocate memory for the chain"
+                         " virtual addresses table\n");
+               return ECORE_NOMEM;
+       }
+       OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
+
+       /* The allocation of the PBL table is done with its full size, since it
+        * is expected to be successive.
+        */
+       size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
+       p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+       if (!p_pbl_virt) {
+               DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+               return ECORE_NOMEM;
+       }
+
+       ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+                                pp_virt_addr_tbl);
+
+       for (i = 0; i < page_cnt; i++) {
+               p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+                                                ECORE_CHAIN_PAGE_SIZE);
+               if (!p_virt) {
+                       DP_NOTICE(p_dev, true,
+                                 "Failed to allocate chain memory\n");
+                       return ECORE_NOMEM;
+               }
+
+               if (i == 0) {
+                       ecore_chain_init_mem(p_chain, p_virt, p_phys);
+                       ecore_chain_reset(p_chain);
+               }
+
+               /* Fill the PBL table with the physical address of the page */
+               *(dma_addr_t *) p_pbl_virt = p_phys;
+               /* Keep the virtual address of the page */
+               p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+               p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
+                                      enum ecore_chain_use_mode intended_use,
+                                      enum ecore_chain_mode mode,
+                                      enum ecore_chain_cnt_type cnt_type,
+                                      u32 num_elems, osal_size_t elem_size,
+                                      struct ecore_chain *p_chain)
+{
+       u32 page_cnt;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       if (mode == ECORE_CHAIN_MODE_SINGLE)
+               page_cnt = 1;
+       else
+               page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+
+       rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
+                                           page_cnt);
+       if (rc) {
+               DP_NOTICE(p_dev, true,
+                         "Cannot allocate a chain with the given arguments:\n"
+                         " [use_mode %d, mode %d, cnt_type %d, num_elems %d,"
+                         " elem_size %zu]\n",
+                         intended_use, mode, cnt_type, num_elems, elem_size);
+               return rc;
+       }
+
+       ecore_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
+                               mode, cnt_type);
+
+       switch (mode) {
+       case ECORE_CHAIN_MODE_NEXT_PTR:
+               rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_SINGLE:
+               rc = ecore_chain_alloc_single(p_dev, p_chain);
+               break;
+       case ECORE_CHAIN_MODE_PBL:
+               rc = ecore_chain_alloc_pbl(p_dev, p_chain);
+               break;
+       }
+       if (rc)
+               goto nomem;
+
+       return ECORE_SUCCESS;
+
+nomem:
+       ecore_chain_free(p_dev, p_chain);
+       return rc;
+}
+
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+                                      u16 src_id, u16 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+               u16 min, max;
+
+               min = (u16) RESC_START(p_hwfn, ECORE_L2_QUEUE);
+               max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+               DP_NOTICE(p_hwfn, true,
+                         "l2_queue id [%d] is not valid, available"
+                         " indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return ECORE_INVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+                                   u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+               u8 min, max;
+
+               min = (u8) RESC_START(p_hwfn, ECORE_VPORT);
+               max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
+               DP_NOTICE(p_hwfn, true,
+                         "vport id [%d] is not valid, available"
+                         " indices [%d - %d]\n",
+                         src_id, min, max);
+
+               return ECORE_INVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+                                     u8 src_id, u8 *dst_id)
+{
+       if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
+               u8 min, max;
+
+               min = (u8) RESC_START(p_hwfn, ECORE_RSS_ENG);
+               max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
+               DP_NOTICE(p_hwfn, true,
+                         "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
+                         src_id, min, max);
+
+               return ECORE_INVAL;
+       }
+
+       *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 *p_filter)
+{
+       u32 high, low, en;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return ECORE_SUCCESS;
+
+       high = p_filter[1] | (p_filter[0] << 8);
+       low = p_filter[5] | (p_filter[4] << 8) |
+           (p_filter[3] << 16) | (p_filter[2] << 24);
+
+       /* Find a free entry and utilize it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               en = ecore_rd(p_hwfn, p_ptt,
+                             NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+               if (en)
+                       continue;
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), low);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), high);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                        i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to find an empty LLH filter to utilize\n");
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
+                  p_filter[0], p_filter[1], p_filter[2],
+                  p_filter[3], p_filter[4], p_filter[5], i);
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+       u32 high, low;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       high = p_filter[1] | (p_filter[0] << 8);
+       low = p_filter[5] | (p_filter[4] << 8) |
+           (p_filter[3] << 16) | (p_filter[2] << 24);
+
+       /* Find the entry and clean it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            2 * i * sizeof(u32)) != low)
+                       continue;
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            (2 * i + 1) * sizeof(u32)) != high)
+                       continue;
+
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), 0);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+               DP_NOTICE(p_hwfn, false,
+                         "Tried to remove a non-configured filter\n");
+}
+
+enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt,
+                                                   u16 filter)
+{
+       u32 high, low, en;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return ECORE_SUCCESS;
+
+       high = filter;
+       low = 0;
+
+       /* Find a free entry and utilize it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               en = ecore_rd(p_hwfn, p_ptt,
+                             NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+               if (en)
+                       continue;
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), low);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), high);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+                        i * sizeof(u32), 1);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+               DP_NOTICE(p_hwfn, false,
+                         "Failed to find an empty LLH filter to utilize\n");
+               return ECORE_INVAL;
+       } else {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                          "ETH type: %x is added at %d\n", filter, i);
+       }
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt, u16 filter)
+{
+       u32 high, low;
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       high = filter;
+       low = 0;
+
+       /* Find the entry and clean it */
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            2 * i * sizeof(u32)) != low)
+                       continue;
+               if (ecore_rd(p_hwfn, p_ptt,
+                            NIG_REG_LLH_FUNC_FILTER_VALUE +
+                            (2 * i + 1) * sizeof(u32)) != high)
+                       continue;
+
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), 0);
+               break;
+       }
+       if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+               DP_NOTICE(p_hwfn, false,
+                         "Tried to remove a non-configured filter\n");
+}
+
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt)
+{
+       int i;
+
+       if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+               return;
+
+       for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        2 * i * sizeof(u32), 0);
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LLH_FUNC_FILTER_VALUE +
+                        (2 * i + 1) * sizeof(u32), 0);
+       }
+}
+
+enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       u32 reg_tbl[] = {
+               BRB_REG_HEADER_SIZE,
+               BTB_REG_HEADER_SIZE,
+               CAU_REG_LONG_TIMEOUT_THRESHOLD,
+               CCFC_REG_ACTIVITY_COUNTER,
+               CDU_REG_CID_ADDR_PARAMS,
+               DBG_REG_CLIENT_ENABLE,
+               DMAE_REG_INIT,
+               DORQ_REG_IFEN,
+               GRC_REG_TIMEOUT_EN,
+               IGU_REG_BLOCK_CONFIGURATION,
+               MCM_REG_INIT,
+               MCP2_REG_DBG_DWORD_ENABLE,
+               MISC_REG_PORT_MODE,
+               MISCS_REG_CLK_100G_MODE,
+               MSDM_REG_ENABLE_IN1,
+               MSEM_REG_ENABLE_IN,
+               NIG_REG_CM_HDR,
+               NCSI_REG_CONFIG,
+               PBF_REG_INIT,
+               PTU_REG_ATC_INIT_ARRAY,
+               PCM_REG_INIT,
+               PGLUE_B_REG_ADMIN_PER_PF_REGION,
+               PRM_REG_DISABLE_PRM,
+               PRS_REG_SOFT_RST,
+               PSDM_REG_ENABLE_IN1,
+               PSEM_REG_ENABLE_IN,
+               PSWRQ_REG_DBG_SELECT,
+               PSWRQ2_REG_CDUT_P_SIZE,
+               PSWHST_REG_DISCARD_INTERNAL_WRITES,
+               PSWHST2_REG_DBGSYN_ALMOST_FULL_THR,
+               PSWRD_REG_DBG_SELECT,
+               PSWRD2_REG_CONF11,
+               PSWWR_REG_USDM_FULL_TH,
+               PSWWR2_REG_CDU_FULL_TH2,
+               QM_REG_MAXPQSIZE_0,
+               RSS_REG_RSS_INIT_EN,
+               RDIF_REG_STOP_ON_ERROR,
+               SRC_REG_SOFT_RST,
+               TCFC_REG_ACTIVITY_COUNTER,
+               TCM_REG_INIT,
+               TM_REG_PXP_READ_DATA_FIFO_INIT,
+               TSDM_REG_ENABLE_IN1,
+               TSEM_REG_ENABLE_IN,
+               TDIF_REG_STOP_ON_ERROR,
+               UCM_REG_INIT,
+               UMAC_REG_IPG_HD_BKP_CNTL_BB_B0,
+               USDM_REG_ENABLE_IN1,
+               USEM_REG_ENABLE_IN,
+               XCM_REG_INIT,
+               XSDM_REG_ENABLE_IN1,
+               XSEM_REG_ENABLE_IN,
+               YCM_REG_INIT,
+               YSDM_REG_ENABLE_IN1,
+               YSEM_REG_ENABLE_IN,
+               XYLD_REG_SCBD_STRICT_PRIO,
+               TMLD_REG_SCBD_STRICT_PRIO,
+               MULD_REG_SCBD_STRICT_PRIO,
+               YULD_REG_SCBD_STRICT_PRIO,
+       };
+       u32 test_val[] = { 0x0, 0x1 };
+       u32 val, save_val, i, j;
+
+       for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) {
+               for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) {
+                       save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
+                       ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]);
+                       val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
+                       /* Restore the original register's value */
+                       ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val);
+                       if (val != test_val[i]) {
+                               DP_INFO(p_hwfn->p_dev,
+                                       "offset 0x%x: val 0x%x != 0x%x\n",
+                                       reg_tbl[j], val, test_val[i]);
+                               return ECORE_AGAIN;
+                       }
+               }
+       }
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              u32 hw_addr, void *p_qzone,
+                                              osal_size_t qzone_size,
+                                              u8 timeset)
+{
+       struct coalescing_timeset *p_coalesce_timeset;
+
+       if (IS_VF(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
+               return ECORE_INVAL;
+       }
+
+       if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
+               DP_NOTICE(p_hwfn, true,
+                         "Coalescing configuration not enabled\n");
+               return ECORE_INVAL;
+       }
+
+       OSAL_MEMSET(p_qzone, 0, qzone_size);
+       p_coalesce_timeset = p_qzone;
+       p_coalesce_timeset->timeset = timeset;
+       p_coalesce_timeset->valid = 1;
+       ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid)
+{
+       struct ustorm_eth_queue_zone qzone;
+       u16 fw_qid = 0;
+       u32 address;
+       u8 timeset;
+       enum _ecore_status_t rc;
+
+       rc = ecore_fw_l2_queue(p_hwfn, (u16) qid, &fw_qid);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+       /* Translate the coalescing time into a timeset, according to:
+        * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1)
+        */
+       timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+
+       rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+                               sizeof(struct ustorm_eth_queue_zone), timeset);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
+out:
+       return rc;
+}
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid)
+{
+       struct ystorm_eth_queue_zone qzone;
+       u16 fw_qid = 0;
+       u32 address;
+       u8 timeset;
+       enum _ecore_status_t rc;
+
+       rc = ecore_fw_l2_queue(p_hwfn, (u16) qid, &fw_qid);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+       /* Translate the coalescing time into a timeset, according to:
+        * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1)
+        */
+       timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+
+       rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+                               sizeof(struct ystorm_eth_queue_zone), timeset);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
+out:
+       return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure it.
+ * After this configuration each vport must have
+ * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
+ */
+static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i, num_vports;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       for (i = 0; i < num_vports; i++) {
+               u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+               vport_params[i].vport_wfq =
+                   (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate;
+               ecore_init_vport_wfq(p_hwfn, p_ptt,
+                                    vport_params[i].first_tx_pq_id,
+                                    vport_params[i].vport_wfq);
+       }
+}
+
+static void
+ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+{
+       int i, num_vports;
+       u32 min_speed;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+       min_speed = min_pf_rate / num_vports;
+
+       for (i = 0; i < num_vports; i++) {
+               p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+               p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed;
+       }
+}
+
+static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i, num_vports;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       for (i = 0; i < num_vports; i++) {
+               ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+               ecore_init_vport_wfq(p_hwfn, p_ptt,
+                                    vport_params[i].first_tx_pq_id,
+                                    vport_params[i].vport_wfq);
+       }
+}
+
+/* validate wfq for a given vport and required min rate */
+static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
+                                                u16 vport_id, u32 req_rate,
+                                                u32 min_pf_rate)
+{
+       u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+       int non_requested_count = 0, req_count = 0, i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Check pre-set data for some of the vports */
+       for (i = 0; i < num_vports; i++) {
+               u32 tmp_speed;
+
+               if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
+                       req_count++;
+                       tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+                       total_req_min_rate += tmp_speed;
+               }
+       }
+
+       /* Include current vport data as well */
+       req_count++;
+       total_req_min_rate += req_rate;
+       non_requested_count = p_hwfn->qm_info.num_vports - req_count;
+
+       /* validate possible error cases */
+       if (req_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is greater"
+                          " than configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return ECORE_INVAL;
+       }
+
+       if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is less than"
+                          " one percent of configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return ECORE_INVAL;
+       }
+
+       /* TBD - for number of vports greater than 100 */
+       if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Number of vports are greater than 100\n");
+               return ECORE_INVAL;
+       }
+
+       if (total_req_min_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Total requested min rate for all vports[%d Mbps]"
+                          "is greater than configured PF min rate[%d Mbps]\n",
+                          total_req_min_rate, min_pf_rate);
+               return ECORE_INVAL;
+       }
+
+       /* Data left for non requested vports */
+       total_left_rate = min_pf_rate - total_req_min_rate;
+       left_rate_per_vp = total_left_rate / non_requested_count;
+
+       /* validate if non requested get < 1% of min bw */
+       if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1)
+               return ECORE_INVAL;
+
+       /* now req_rate for given vport passes all scenarios.
+        * assign final wfq rates to all vports.
+        */
+       p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+       p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      u16 vp_id, u32 rate)
+{
+       struct ecore_mcp_link_state *p_link;
+       int rc = ECORE_SUCCESS;
+
+       p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
+
+       if (!p_link->min_pf_rate) {
+               p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+               p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+               return rc;
+       }
+
+       rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+       if (rc == ECORE_SUCCESS)
+               ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+                                                  p_link->min_pf_rate);
+       else
+               DP_NOTICE(p_hwfn, false,
+                         "Validation failed while configuring min rate\n");
+
+       return rc;
+}
+
+static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_ptt *p_ptt,
+                                                  u32 min_pf_rate)
+{
+       int rc = ECORE_SUCCESS;
+       bool use_wfq = false;
+       u16 i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Validate all pre configured vports for wfq */
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured) {
+                       u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+                       use_wfq = true;
+                       rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+                       if (rc == ECORE_INVAL) {
+                               DP_NOTICE(p_hwfn, false,
+                                         "Validation failed while"
+                                         " configuring min rate\n");
+                               break;
+                       }
+               }
+       }
+
+       if (rc == ECORE_SUCCESS && use_wfq)
+               ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+       else
+               ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+       return rc;
+}
+
+/* Main API for ecore clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
+{
+       int i, rc = ECORE_INVAL;
+
+       /* TBD - for multiple hardware functions - that is 100 gig */
+       if (p_dev->num_hwfns > 1) {
+               DP_NOTICE(p_dev, false,
+                         "WFQ configuration is not supported for this dev\n");
+               return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_ptt *p_ptt;
+
+               p_ptt = ecore_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return ECORE_TIMEOUT;
+
+               rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+               if (rc != ECORE_SUCCESS) {
+                       ecore_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+                                          u32 min_pf_rate)
+{
+       int i;
+
+       /* TBD - for multiple hardware functions - that is 100 gig */
+       if (p_dev->num_hwfns > 1) {
+               DP_VERBOSE(p_dev, ECORE_MSG_LINK,
+                          "WFQ configuration is not supported for this dev\n");
+               return;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+                                                       p_hwfn->p_dpc_ptt,
+                                                       min_pf_rate);
+       }
+}
+
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 max_bw)
+{
+       int rc = ECORE_SUCCESS;
+
+       p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->speed = (p_link->line_speed * max_bw) / 100;
+
+       rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "Configured MAX bandwidth to be %08x Mb/sec\n",
+                  p_link->speed);
+
+       return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
+{
+       int i, rc = ECORE_INVAL;
+
+       if (max_bw < 1 || max_bw > 100) {
+               DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+               struct ecore_mcp_link_state *p_link;
+               struct ecore_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = ecore_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return ECORE_TIMEOUT;
+
+               rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                       p_link, max_bw);
+               if (rc != ECORE_SUCCESS) {
+                       ecore_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 min_bw)
+{
+       int rc = ECORE_SUCCESS;
+
+       p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+       rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "Configured MIN bandwidth to be %d Mb/sec\n",
+                  p_link->min_pf_rate);
+
+       return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
+{
+       int i, rc = ECORE_INVAL;
+
+       if (min_bw < 1 || min_bw > 100) {
+               DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
+               struct ecore_mcp_link_state *p_link;
+               struct ecore_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = ecore_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return ECORE_TIMEOUT;
+
+               rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                       p_link, min_bw);
+               if (rc != ECORE_SUCCESS) {
+                       ecore_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               if (p_link->min_pf_rate) {
+                       u32 min_rate = p_link->min_pf_rate;
+
+                       rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
+                                                                    p_ptt,
+                                                                    min_rate);
+               }
+
+               ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}
+
+void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_link_state *p_link;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+
+       if (p_link->min_pf_rate)
+               ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+                                                p_link->min_pf_rate);
+
+       OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
+                   sizeof(*p_hwfn->qm_info.wfq_data) *
+                   p_hwfn->qm_info.num_vports);
+}
+
+int ecore_device_num_engines(struct ecore_dev *p_dev)
+{
+       return ECORE_IS_BB(p_dev) ? 2 : 1;
+}
+
+int ecore_device_num_ports(struct ecore_dev *p_dev)
+{
+       /* in CMT always only one port */
+       if (p_dev->num_hwfns > 1)
+               return 1;
+
+       return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+}
diff --git a/drivers/net/qede/ecore/ecore_dev_api.h 
b/drivers/net/qede/ecore/ecore_dev_api.h
new file mode 100644
index 0000000..367991f
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_dev_api.h
@@ -0,0 +1,497 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_DEV_API_H__
+#define __ECORE_DEV_API_H__
+
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_int_api.h"
+
+struct ecore_tunn_start_params;
+
+/**
+ * @brief ecore_init_dp - initialize the debug level
+ *
+ * @param p_dev
+ * @param dp_module
+ * @param dp_level
+ * @param dp_ctx
+ */
+void ecore_init_dp(struct ecore_dev *p_dev,
+                  u32 dp_module, u8 dp_level, void *dp_ctx);
+
+/**
+ * @brief ecore_init_struct - initialize the device structure to
+ *        its defaults
+ *
+ * @param p_dev
+ */
+void ecore_init_struct(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_free -
+ *
+ * @param p_dev
+ */
+void ecore_resc_free(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_alloc -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_resc_setup -
+ *
+ * @param p_dev
+ */
+void ecore_resc_setup(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_init -
+ *
+ * @param p_dev
+ * @param p_tunn - tunneling parameters
+ * @param b_hw_start
+ * @param int_mode - interrupt mode [msix, inta, etc.] to use.
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *       for vports configured for tx-switching.
+ * @param bin_fw_data - binary fw data pointer in binary fw file.
+ *                     Pass NULL if not using binary fw file.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
+                                  struct ecore_tunn_start_params *p_tunn,
+                                  bool b_hw_start,
+                                  enum ecore_int_mode int_mode,
+                                  bool allow_npar_tx_switch,
+                                  const u8 *bin_fw_data);
+
+/**
+ * @brief ecore_hw_timers_stop_all -
+ *
+ * @param p_dev
+ *
+ * @return void
+ */
+void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_stop_fastpath -should be called incase
+ *        slowpath is still required for the device, but
+ *        fastpath is not.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_prepare_hibernate -should be called when
+ *        the system is going into the hibernate state
+ *
+ * @param p_dev
+ *
+ */
+void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_start_fastpath -restart fastpath traffic,
+ *        only if hw_stop_fastpath was called
+
+ * @param p_dev
+ *
+ */
+void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_hw_reset -
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_hw_prepare -
+ *
+ * @param p_dev
+ * @param personality - personality to initialize
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int 
personality);
+
+/**
+ * @brief ecore_hw_remove -
+ *
+ * @param p_dev
+ */
+void ecore_hw_remove(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_ptt_acquire - Allocate a PTT window
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function)
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_ptt
+ */
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_release - Release PTT Window
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+#ifndef __EXTRACT__LINUX__
+struct ecore_eth_stats {
+       u64 no_buff_discards;
+       u64 packet_too_big_discard;
+       u64 ttl0_discard;
+       u64 rx_ucast_bytes;
+       u64 rx_mcast_bytes;
+       u64 rx_bcast_bytes;
+       u64 rx_ucast_pkts;
+       u64 rx_mcast_pkts;
+       u64 rx_bcast_pkts;
+       u64 mftag_filter_discards;
+       u64 mac_filter_discards;
+       u64 tx_ucast_bytes;
+       u64 tx_mcast_bytes;
+       u64 tx_bcast_bytes;
+       u64 tx_ucast_pkts;
+       u64 tx_mcast_pkts;
+       u64 tx_bcast_pkts;
+       u64 tx_err_drop_pkts;
+       u64 tpa_coalesced_pkts;
+       u64 tpa_coalesced_events;
+       u64 tpa_aborts_num;
+       u64 tpa_not_coalesced_pkts;
+       u64 tpa_coalesced_bytes;
+
+       /* port */
+       u64 rx_64_byte_packets;
+       u64 rx_65_to_127_byte_packets;
+       u64 rx_128_to_255_byte_packets;
+       u64 rx_256_to_511_byte_packets;
+       u64 rx_512_to_1023_byte_packets;
+       u64 rx_1024_to_1518_byte_packets;
+       u64 rx_1519_to_1522_byte_packets;
+       u64 rx_1519_to_2047_byte_packets;
+       u64 rx_2048_to_4095_byte_packets;
+       u64 rx_4096_to_9216_byte_packets;
+       u64 rx_9217_to_16383_byte_packets;
+       u64 rx_crc_errors;
+       u64 rx_mac_crtl_frames;
+       u64 rx_pause_frames;
+       u64 rx_pfc_frames;
+       u64 rx_align_errors;
+       u64 rx_carrier_errors;
+       u64 rx_oversize_packets;
+       u64 rx_jabbers;
+       u64 rx_undersize_packets;
+       u64 rx_fragments;
+       u64 tx_64_byte_packets;
+       u64 tx_65_to_127_byte_packets;
+       u64 tx_128_to_255_byte_packets;
+       u64 tx_256_to_511_byte_packets;
+       u64 tx_512_to_1023_byte_packets;
+       u64 tx_1024_to_1518_byte_packets;
+       u64 tx_1519_to_2047_byte_packets;
+       u64 tx_2048_to_4095_byte_packets;
+       u64 tx_4096_to_9216_byte_packets;
+       u64 tx_9217_to_16383_byte_packets;
+       u64 tx_pause_frames;
+       u64 tx_pfc_frames;
+       u64 tx_lpi_entry_count;
+       u64 tx_total_collisions;
+       u64 brb_truncates;
+       u64 brb_discards;
+       u64 rx_mac_bytes;
+       u64 rx_mac_uc_packets;
+       u64 rx_mac_mc_packets;
+       u64 rx_mac_bc_packets;
+       u64 rx_mac_frames_ok;
+       u64 tx_mac_bytes;
+       u64 tx_mac_uc_packets;
+       u64 tx_mac_mc_packets;
+       u64 tx_mac_bc_packets;
+       u64 tx_mac_ctrl_frames;
+};
+#endif
+
+enum ecore_dmae_address_type_t {
+       ECORE_DMAE_ADDRESS_HOST_VIRT,
+       ECORE_DMAE_ADDRESS_HOST_PHYS,
+       ECORE_DMAE_ADDRESS_GRC
+};
+
+/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define ECORE_DMAE_FLAG_RW_REPL_SRC    0x00000001
+#define ECORE_DMAE_FLAG_VF_SRC         0x00000002
+#define ECORE_DMAE_FLAG_VF_DST         0x00000004
+#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+
+struct ecore_dmae_params {
+       u32 flags;              /* consists of ECORE_DMAE_FLAG_* values */
+       u8 src_vfid;
+       u8 dst_vfid;
+};
+
+/**
+* @brief ecore_dmae_host2grc - copy data from source addr to
+* dmae registers using the given ptt
+*
+* @param p_hwfn
+* @param p_ptt
+* @param source_addr
+* @param grc_addr (dmae_data_offset)
+* @param size_in_dwords
+* @param flags (one of the flags defined above)
+*/
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u64 source_addr,
+                   u32 grc_addr, u32 size_in_dwords, u32 flags);
+
+/**
+* @brief ecore_dmae_grc2host - Read data from dmae data offset
+* to source address using the given ptt
+*
+* @param p_ptt
+* @param grc_addr (dmae_data_offset)
+* @param dest_addr
+* @param size_in_dwords
+* @param flags - one of the flags defined above
+*/
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u32 grc_addr,
+                   dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
+
+/**
+* @brief ecore_dmae_host2host - copy data from to source address
+* to a destination adress (for SRIOV) using the given ptt
+*
+* @param p_hwfn
+* @param p_ptt
+* @param source_addr
+* @param dest_addr
+* @param size_in_dwords
+* @param params
+*/
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    dma_addr_t source_addr,
+                    dma_addr_t dest_addr,
+                    u32 size_in_dwords, struct ecore_dmae_params *p_params);
+
+/**
+ * @brief ecore_chain_alloc - Allocate and initialize a chain
+ *
+ * @param p_hwfn
+ * @param intended_use
+ * @param mode
+ * @param num_elems
+ * @param elem_size
+ * @param p_chain
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_chain_alloc(struct ecore_dev *p_dev,
+                 enum ecore_chain_use_mode intended_use,
+                 enum ecore_chain_mode mode,
+                 enum ecore_chain_cnt_type cnt_type,
+                 u32 num_elems,
+                 osal_size_t elem_size, struct ecore_chain *p_chain);
+
+/**
+ * @brief ecore_chain_free - Free chain DMA memory
+ *
+ * @param p_hwfn
+ * @param p_chain
+ */
+void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
+
+/**
+ * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
+                                      u16 src_id, u16 *dst_id);
+
+/**
+ * @@brief ecore_fw_vport - Get absolute vport ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
+                                   u8 src_id, u8 *dst_id);
+
+/**
+ * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
+ *
+ *  @param p_hwfn
+ *  @param src_id - relative to p_hwfn
+ *  @param dst_id - absolute per engine
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
+                                     u8 src_id, u8 *dst_id);
+
+/**
+ * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to add
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 *p_filter);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_filter - MAC to remove
+ */
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt, u8 *p_filter);
+
+/**
+ * @brief ecore_llh_add_ethertype_filter - configures a ethertype filter in llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param filter - ethertype to add
+ */
+enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt,
+                                                   u16 filter);
+
+/**
+ * @brief ecore_llh_remove_ethertype_filter - removes a ethertype llh filter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param filter - ethertype to remove
+ */
+void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt, u16 filter);
+
+/**
+ * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt);
+
+ /**
+*@brief Cleanup of previous driver remains prior to load
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 id, bool is_vf);
+
+/**
+ * @brief ecore_test_registers - Perform register tests
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ *  @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx 
queue
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid);
+
+/**
+ * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u8 coalesce, u8 qid);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_gtt_reg_addr.h 
b/drivers/net/qede/ecore/ecore_gtt_reg_addr.h
new file mode 100644
index 0000000..cc49fc7
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_gtt_reg_addr.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef GTT_REG_ADDR_H
+#define GTT_REG_ADDR_H
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM  0x016000UL
+
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_gtt_values.h 
b/drivers/net/qede/ecore/ecore_gtt_values.h
new file mode 100644
index 0000000..f2efe24
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_gtt_values.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __PREVENT_PXP_GLOBAL_WIN__
+
+static u32 pxp_global_win[] = {
+       0,
+       0,
+       0x1c02,                 /* win 2: addr=0x1c02000, size=4096 bytes */
+       0x1c80,                 /* win 3: addr=0x1c80000, size=4096 bytes */
+       0x1d00,                 /* win 4: addr=0x1d00000, size=4096 bytes */
+       0x1d01,                 /* win 5: addr=0x1d01000, size=4096 bytes */
+       0x1d80,                 /* win 6: addr=0x1d80000, size=4096 bytes */
+       0x1d81,                 /* win 7: addr=0x1d81000, size=4096 bytes */
+       0x1d82,                 /* win 8: addr=0x1d82000, size=4096 bytes */
+       0x1e00,                 /* win 9: addr=0x1e00000, size=4096 bytes */
+       0x1e80,                 /* win 10: addr=0x1e80000, size=4096 bytes */
+       0x1f00,                 /* win 11: addr=0x1f00000, size=4096 bytes */
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+       0,
+};
+
+#endif /* __PREVENT_PXP_GLOBAL_WIN__ */
diff --git a/drivers/net/qede/ecore/ecore_hsi_common.h 
b/drivers/net/qede/ecore/ecore_hsi_common.h
new file mode 100644
index 0000000..4267710
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_hsi_common.h
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_COMMON__
+#define __ECORE_HSI_COMMON__
+/********************************/
+/* Add include to common target */
+/********************************/
+#include "common_hsi.h"
+
+/*
+ * opcodes for the event ring
+ */
+enum common_event_opcode {
+       COMMON_EVENT_PF_START,
+       COMMON_EVENT_PF_STOP,
+       COMMON_EVENT_VF_START,
+       COMMON_EVENT_VF_STOP,
+       COMMON_EVENT_VF_PF_CHANNEL,
+       COMMON_EVENT_VF_FLR,
+       COMMON_EVENT_PF_UPDATE,
+       COMMON_EVENT_MALICIOUS_VF,
+       COMMON_EVENT_EMPTY,
+       MAX_COMMON_EVENT_OPCODE
+};
+
+/*
+ * Common Ramrod Command IDs
+ */
+enum common_ramrod_cmd_id {
+       COMMON_RAMROD_UNUSED,
+       COMMON_RAMROD_PF_START /* PF Function Start Ramrod */ ,
+       COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */ ,
+       COMMON_RAMROD_VF_START /* VF Function Start */ ,
+       COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */ ,
+       COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */ ,
+       COMMON_RAMROD_EMPTY /* Empty Ramrod */ ,
+       MAX_COMMON_RAMROD_CMD_ID
+};
+
+/*
+ * The core storm context for the Ystorm
+ */
+struct ystorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * The core storm context for the Pstorm
+ */
+struct pstorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * Core Slowpath Connection storm context of Xstorm
+ */
+struct xstorm_core_conn_st_ctx {
+       __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
+       __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
+       struct regpair consolid_base_addr /* Consolidation Ring Base Address */
+         ;
+       __le16 spq_cons /* SPQ Ring Consumer */;
+       __le16 consolid_cons /* Consolidation Ring Consumer */;
+       __le32 reserved0[55] /* Pad to 15 cycles */;
+};
+
+struct xstorm_core_conn_ag_ctx {
+       u8 reserved0 /* cdu_validation */;
+       u8 core_state /* state */;
+       u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT        0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT           5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT           6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT           7
+       u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT           0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT           1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK            0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT           2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT      6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT        7
+       u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT                 6
+       u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT                 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT                 6
+       u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT                 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK                  0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT                 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT                6
+       u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT                0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT                4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT                6
+       u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK     0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT    0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK         0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT        6
+       u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK             0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK           0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT          2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK            0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT           4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT               7
+       u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT               6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT               7
+       u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT              0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT              1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT              2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT              3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT              4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK  0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT              7
+       u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK      0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT     1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK          0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT         2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT          3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK               0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT              5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT          6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT          7
+       u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT          0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK           0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT          1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK       0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT      2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT             3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT             4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT             5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK              0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT             7
+       u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT            4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT            5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT            6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT            7
+       u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT            0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK             0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT            1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT        2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT        3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT        4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT        5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT        6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK         0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT        7
+       u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT               0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT               1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT               2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT               3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT               4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK                0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT               5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK                 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT                6
+       u8 byte2 /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 consolid_prod /* physical_q1 */;
+       __le16 reserved16 /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_or_spq_prod /* word4 */;
+       __le16 word5 /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+       u8 byte3 /* byte3 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       u8 byte6 /* byte6 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* cf_array0 */;
+       __le32 reg6 /* cf_array1 */;
+       __le16 word7 /* word7 */;
+       __le16 word8 /* word8 */;
+       __le16 word9 /* word9 */;
+       __le16 word10 /* word10 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       __le32 reg9 /* reg9 */;
+       u8 byte7 /* byte7 */;
+       u8 byte8 /* byte8 */;
+       u8 byte9 /* byte9 */;
+       u8 byte10 /* byte10 */;
+       u8 byte11 /* byte11 */;
+       u8 byte12 /* byte12 */;
+       u8 byte13 /* byte13 */;
+       u8 byte14 /* byte14 */;
+       u8 byte15 /* byte15 */;
+       u8 byte16 /* byte16 */;
+       __le16 word11 /* word11 */;
+       __le32 reg10 /* reg10 */;
+       __le32 reg11 /* reg11 */;
+       __le32 reg12 /* reg12 */;
+       __le32 reg13 /* reg13 */;
+       __le32 reg14 /* reg14 */;
+       __le32 reg15 /* reg15 */;
+       __le32 reg16 /* reg16 */;
+       __le32 reg17 /* reg17 */;
+       __le32 reg18 /* reg18 */;
+       __le32 reg19 /* reg19 */;
+       __le16 word12 /* word12 */;
+       __le16 word13 /* word13 */;
+       __le16 word14 /* word14 */;
+       __le16 word15 /* word15 */;
+};
+
+struct tstorm_core_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT    3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT    4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK     0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT    5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     6
+       u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT     6
+       u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT     2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT     4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT     6
+       u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK      0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT     0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK     0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT    2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   7
+       u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT   3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT   4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK    0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT   5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK   0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT  6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* reg5 */;
+       __le32 reg6 /* reg6 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* word0 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       __le16 word1 /* word1 */;
+       __le16 word2 /* conn_dpi */;
+       __le16 word3 /* word3 */;
+       __le32 reg9 /* reg9 */;
+       __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_core_conn_ag_ctx {
+       u8 reserved /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT     0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT     2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT     4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK      0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT     6
+       u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT   3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT   4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT   5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK    0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT   6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+       u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK  0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* conn_dpi */;
+       __le16 word1 /* word1 */;
+       __le32 rx_producers /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le16 word2 /* word2 */;
+       __le16 word3 /* word3 */;
+};
+
+/*
+ * The core storm context for the Mstorm
+ */
+struct mstorm_core_conn_st_ctx {
+       __le32 reserved[24];
+};
+
+/*
+ * The core storm context for the Ustorm
+ */
+struct ustorm_core_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * core connection context
+ */
+struct core_conn_context {
+       struct ystorm_core_conn_st_ctx ystorm_st_context
+           /* ystorm storm context */;
+       struct regpair ystorm_st_padding[2] /* padding */;
+       struct pstorm_core_conn_st_ctx pstorm_st_context
+           /* pstorm storm context */;
+       struct regpair pstorm_st_padding[2] /* padding */;
+       struct xstorm_core_conn_st_ctx xstorm_st_context
+           /* xstorm storm context */;
+       struct xstorm_core_conn_ag_ctx xstorm_ag_context
+           /* xstorm aggregative context */;
+       struct tstorm_core_conn_ag_ctx tstorm_ag_context
+           /* tstorm aggregative context */;
+       struct ustorm_core_conn_ag_ctx ustorm_ag_context
+           /* ustorm aggregative context */;
+       struct mstorm_core_conn_st_ctx mstorm_st_context
+           /* mstorm storm context */;
+       struct ustorm_core_conn_st_ctx ustorm_st_context
+           /* ustorm storm context */;
+       struct regpair ustorm_st_padding[2] /* padding */;
+};
+
+/*
+ * How ll2 should deal with packet upon errors
+ */
+enum core_error_handle {
+       LL2_DROP_PACKET /* If error occurs drop packet */ ,
+       LL2_DO_NOTHING /* If error occurs do nothing */ ,
+       LL2_ASSERT /* If error occurs assert */ ,
+       MAX_CORE_ERROR_HANDLE
+};
+
+/*
+ * opcodes for the event ring
+ */
+enum core_event_opcode {
+       CORE_EVENT_TX_QUEUE_START,
+       CORE_EVENT_TX_QUEUE_STOP,
+       CORE_EVENT_RX_QUEUE_START,
+       CORE_EVENT_RX_QUEUE_STOP,
+       MAX_CORE_EVENT_OPCODE
+};
+
+/*
+ * The L4 pseudo checksum mode for Core
+ */
+enum core_l4_pseudo_checksum_mode {
+       CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH
+           ,
+       CORE_L4_PSEUDO_CSUM_ZERO_LENGTH
+           /* Pseudo Checksum on packet is calculated with zero length. */ ,
+       MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_port_stats {
+       struct regpair gsi_invalid_hdr;
+       struct regpair gsi_invalid_pkt_length;
+       struct regpair gsi_unsupported_pkt_typ;
+       struct regpair gsi_crcchksm_error;
+};
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct core_ll2_pstorm_per_queue_stat {
+       struct regpair sent_ucast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_mcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_bcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_ucast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_mcast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_bcast_pkts
+           /* number of total packets sent without errors */;
+};
+
+/*
+ * Light-L2 RX Producers in Tstorm RAM
+ */
+struct core_ll2_rx_prod {
+       __le16 bd_prod /* BD Producer */;
+       __le16 cqe_prod /* CQE Producer */;
+       __le32 reserved;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+       struct regpair packet_too_big_discard
+           /* Number of packets discarded because they are bigger than MTU */;
+       struct regpair no_buff_discard
+           /* Number of packets discarded due to lack of host buffers */;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+       struct regpair rcv_ucast_bytes;
+       struct regpair rcv_mcast_bytes;
+       struct regpair rcv_bcast_bytes;
+       struct regpair rcv_ucast_pkts;
+       struct regpair rcv_mcast_pkts;
+       struct regpair rcv_bcast_pkts;
+};
+
+/*
+ * Core Ramrod Command IDs (light L2)
+ */
+enum core_ramrod_cmd_id {
+       CORE_RAMROD_UNUSED,
+       CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */ ,
+       CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */ ,
+       CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */ ,
+       CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */ ,
+       MAX_CORE_RAMROD_CMD_ID
+};
+
+/*
+ * Specifies how ll2 should deal with packets errors: packet_too_big and 
no_buff
+ */
+struct core_rx_action_on_error {
+       u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK  0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK         0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT        2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK        0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT       4
+};
+
+/*
+ * Core RX BD for Light L2
+ */
+struct core_rx_bd {
+       struct regpair addr;
+       __le16 reserved[4];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+struct core_rx_bd_with_buff_len {
+       struct regpair addr;
+       __le16 buff_length;
+       __le16 reserved[3];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_bd_union {
+       struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
+       struct core_rx_bd_with_buff_len rx_bd_with_len
+           /* Core Rx Bd with dynamic buffer length */;
+};
+
+/*
+ * Opaque Data for Light L2 RX CQE .
+ */
+struct core_rx_cqe_opaque_data {
+       __le32 data[2] /* Opaque CQE Data */;
+};
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_rx_cqe_type {
+       CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */ ,
+       CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */ ,
+       CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */ ,
+       CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */ ,
+       MAX_CORE_RX_CQE_TYPE
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_fast_path_cqe {
+       u8 type /* CQE type */;
+       u8 placement_offset
+           /* Offset (in bytes) of the packet from start of the buffer */;
+       struct parsing_and_err_flags parse_flags
+           /* Parsing and error flags from the parser */;
+       __le16 packet_length /* Total packet length (from the parser) */;
+       __le16 vlan /* 802.1q VLAN tag */;
+       struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+       __le32 reserved[4];
+};
+
+/*
+ * Core Rx CM offload CQE .
+ */
+struct core_rx_gsi_offload_cqe {
+       u8 type /* CQE type */;
+       u8 data_length_error /* set if gsi data is bigger than buff */;
+       struct parsing_and_err_flags parse_flags
+           /* Parsing and error flags from the parser */;
+       __le16 data_length /* Total packet length (from the parser) */;
+       __le16 vlan /* 802.1q VLAN tag */;
+       __le32 src_mac_addrhi /* hi 4 bytes source mac address */;
+       __le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
+       u8 reserved1[2];
+       __le32 gid_dst[4] /* Gid destination address */;
+};
+
+/*
+ * Core RX CQE for Light L2 .
+ */
+struct core_rx_slow_path_cqe {
+       u8 type /* CQE type */;
+       u8 ramrod_cmd_id;
+       __le16 echo;
+       __le32 reserved1[7];
+};
+
+/*
+ * Core RX CM offload BD for Light L2
+ */
+union core_rx_cqe_union {
+       struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */;
+       struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */;
+       struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_rx_start_ramrod_data {
+       struct regpair bd_base /* bd address of the first bd page */;
+       struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+       __le16 mtu /* Maximum transmission unit */;
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* index of the protocol index */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
+       __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+       u8 inner_vlan_removal_en
+           /* if set, 802.1q tags will be removed and copied to CQE */;
+       u8 queue_id /* Light L2 RX Queue ID */;
+       u8 main_func_queue /* Is this the main queue for the PF */;
+       u8 mf_si_bcast_accept_all;
+       u8 mf_si_mcast_accept_all;
+       struct core_rx_action_on_error action_on_error;
+       u8 gsi_offload_flag
+           /* set when in GSI offload mode on ROCE connection */;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx queue stop ramrod
+ */
+struct core_rx_stop_ramrod_data {
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 queue_id /* Light L2 RX Queue ID */;
+       u8 reserved1;
+       __le16 reserved2[2];
+};
+
+/*
+ * Flags for Core TX BD
+ */
+struct core_tx_bd_flags {
+       u8 as_bitfield;
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK      0x1
+#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK       0x1
+#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_FLAGS_START_BD_MASK             0x1
+#define CORE_TX_BD_FLAGS_START_BD_SHIFT            2
+#define CORE_TX_BD_FLAGS_IP_CSUM_MASK              0x1
+#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_FLAGS_L4_CSUM_MASK              0x1
+#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK             0x1
+#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK          0x1
+#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK  0x1
+#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+};
+
+/*
+ * Core TX BD for Light L2
+ */
+struct core_tx_bd {
+       struct regpair addr /* Buffer Address */;
+       __le16 nbytes /* Number of Bytes in Buffer */;
+       __le16 vlan /* VLAN to insert to packet (if insertion flag set) */;
+       u8 nbds /* Number of BDs that make up one packet */;
+       struct core_tx_bd_flags bd_flags /* BD Flags */;
+       __le16 l4_hdr_offset_w;
+};
+
+/*
+ * Light L2 TX Destination
+ */
+enum core_tx_dest {
+       CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */ ,
+       CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */ ,
+       MAX_CORE_TX_DEST
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct core_tx_start_ramrod_data {
+       struct regpair pbl_base_addr /* Address of the pbl page */;
+       __le16 mtu /* Maximum transmission unit */;
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* Status block protocol index */;
+       u8 tx_dest /* TX Destination (either Network or LB) */;
+       u8 stats_en /* Statistics Enable */;
+       u8 stats_id /* Statistics Counter ID */;
+       __le16 pbl_size /* Number of BD pages pointed by PBL */;
+       __le16 qm_pq_id /* QM PQ ID */;
+       u8 conn_type /* connection type that loaded ll2 */;
+       u8 gsi_offload_flag
+           /* set when in GSI offload mode on ROCE connection */;
+       u8 resrved[2];
+};
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct core_tx_stop_ramrod_data {
+       __le32 reserved0[2];
+};
+
+struct eth_mstorm_per_queue_stat {
+       struct regpair ttl0_discard;
+       struct regpair packet_too_big_discard;
+       struct regpair no_buff_discard;
+       struct regpair not_active_discard;
+       struct regpair tpa_coalesced_pkts;
+       struct regpair tpa_coalesced_events;
+       struct regpair tpa_aborts_num;
+       struct regpair tpa_coalesced_bytes;
+};
+
+/*
+ * Ethernet TX Per Queue Stats
+ */
+struct eth_pstorm_per_queue_stat {
+       struct regpair sent_ucast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_mcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_bcast_bytes
+           /* number of total bytes sent without errors */;
+       struct regpair sent_ucast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_mcast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair sent_bcast_pkts
+           /* number of total packets sent without errors */;
+       struct regpair error_drop_pkts
+           /* number of total packets dropped due to errors */;
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_rate_limit {
+       __le16 mult;
+       __le16 cnst
+           /* Constant term to add (or subtract from number of cycles) */;
+       u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
+       u8 reserved0;
+       __le16 reserved1;
+};
+
+struct eth_ustorm_per_queue_stat {
+       struct regpair rcv_ucast_bytes;
+       struct regpair rcv_mcast_bytes;
+       struct regpair rcv_bcast_bytes;
+       struct regpair rcv_ucast_pkts;
+       struct regpair rcv_mcast_pkts;
+       struct regpair rcv_bcast_pkts;
+};
+
+/*
+ * Event Ring Next Page Address
+ */
+struct event_ring_next_addr {
+       struct regpair addr /* Next Page Address */;
+       __le32 reserved[2] /* Reserved */;
+};
+
+/*
+ * Event Ring Element
+ */
+union event_ring_element {
+       struct event_ring_entry entry /* Event Ring Entry */;
+       struct event_ring_next_addr next_addr /* Event Ring Next Page Address */
+         ;
+};
+
+/*
+ * Ports mode
+ */
+enum fw_flow_ctrl_mode {
+       flow_ctrl_pause,
+       flow_ctrl_pfc,
+       MAX_FW_FLOW_CTRL_MODE
+};
+
+/*
+ * Integration Phase
+ */
+enum integ_phase {
+       INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */ ,
+       INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */ ,
+       INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */ ,
+       MAX_INTEG_PHASE
+};
+
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+       MALICIOUS_VF_NO_ERROR /* Zero placeholder value */ ,
+       VF_PF_CHANNEL_NOT_READY
+           /* Writing to VF/PF channel when it is not ready */ ,
+       VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */ ,
+       VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */ ,
+       ETH_PACKET_TOO_SMALL
+           /* TX packet is shorter then reported on BDs or from minimal size */
+           ,
+       ETH_ILLEGAL_VLAN_MODE
+           /* Tx packet with marked as insert VLAN when its illegal */ ,
+       ETH_MTU_VIOLATION /* TX packet is greater then MTU */ ,
+       ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */ ,
+       ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */ ,
+       ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */
+           ,
+       ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */ ,
+       ETH_INSUFFICIENT_BDS
+           /* There are not enough BDs for transmission of even one packet */ ,
+       ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */ ,
+       ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */ ,
+       ETH_ZERO_SIZE_BD
+           /* empty BD (which not contains control flags) is illegal  */ ,
+       ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit  */ ,
+       ETH_INSUFFICIENT_PAYLOAD
+           ,
+       ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */ ,
+       ETH_TUNN_IPV6_EXT_NBD_ERR
+           /* Tunneled packet with IPv6+Ext without a proper number of BDs */ ,
+       MAX_MALICIOUS_VF_ERROR_ID
+};
+
+/*
+ * Mstorm non-triggering VF zone
+ */
+struct mstorm_non_trigger_vf_zone {
+       struct eth_mstorm_per_queue_stat eth_queue_stat
+           /* VF statistic bucket */;
+};
+
+/*
+ * Mstorm VF zone
+ */
+struct mstorm_vf_zone {
+       struct mstorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+};
+
+/*
+ * personality per PF
+ */
+enum personality_type {
+       BAD_PERSONALITY_TYP,
+       PERSONALITY_ISCSI /* iSCSI and LL2 */ ,
+       PERSONALITY_FCOE /* Fcoe and LL2 */ ,
+       PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */ ,
+       PERSONALITY_RDMA /* Roce and LL2 */ ,
+       PERSONALITY_CORE /* CORE(LL2) */ ,
+       PERSONALITY_ETH /* Ethernet */ ,
+       PERSONALITY_TOE /* Toe and LL2 */ ,
+       MAX_PERSONALITY_TYPE
+};
+
+/*
+ * tunnel configuration
+ */
+struct pf_start_tunnel_config {
+       u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port. */;
+       u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port. */
+         ;
+       u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+       u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+       u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+       u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+       u8 tunnel_clss_l2geneve
+           /* Classification scheme for l2 GENEVE tunnel. */;
+       u8 tunnel_clss_ipgeneve
+           /* Classification scheme for ip GENEVE tunnel. */;
+       u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+       u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+       __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+       __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+};
+
+/*
+ * Ramrod data for PF start ramrod
+ */
+struct pf_start_ramrod_data {
+       struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
+       struct regpair consolid_q_pbl_addr
+           /* PBL address of consolidation queue */;
+       struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */
+         ;
+       __le16 event_ring_sb_id /* Status block ID */;
+       u8 base_vf_id;
+         ;
+       u8 num_vfs /* Amount of vfs owned by PF */;
+       u8 event_ring_num_pages /* Number of PBL pages in event ring */;
+       u8 event_ring_sb_index /* Status block index */;
+       u8 path_id /* HW path ID (engine ID) */;
+       u8 warning_as_error /* In FW asserts, treat warning as error */;
+       u8 dont_log_ramrods
+           /* If not set - throw a warning for each ramrod (for debug) */;
+       u8 personality /* define what type of personality is new PF */;
+       __le16 log_type_mask;
+       u8 mf_mode /* Multi function mode */;
+       u8 integ_phase /* Integration phase */;
+       u8 allow_npar_tx_switching;
+       u8 inner_to_outer_pri_map[8];
+       u8 pri_map_valid
+           /* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+         ;
+       __le32 outer_tag;
+       u8 reserved0[4];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct protocol_dcb_data {
+       u8 dcb_enable_flag /* dcbEnable flag value */;
+       u8 dcb_priority /* dcbPri flag value */;
+       u8 dcb_tc /* dcb TC value */;
+       u8 reserved;
+};
+
+/*
+ * tunnel configuration
+ */
+struct pf_update_tunnel_config {
+       u8 update_rx_pf_clss;
+       u8 update_tx_pf_clss;
+       u8 set_vxlan_udp_port_flg
+           /* Update VXLAN tunnel UDP destination port. */;
+       u8 set_geneve_udp_port_flg
+           /* Update GENEVE tunnel UDP destination port. */;
+       u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
+       u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
+         ;
+       u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
+       u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
+       u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
+       u8 tunnel_clss_l2geneve
+           /* Classification scheme for l2 GENEVE tunnel. */;
+       u8 tunnel_clss_ipgeneve
+           /* Classification scheme for ip GENEVE tunnel. */;
+       u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
+       u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+       __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
+       __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+       __le16 reserved[3];
+};
+
+/*
+ * Data for port update ramrod
+ */
+struct pf_update_ramrod_data {
+       u8 pf_id;
+       u8 update_eth_dcb_data_flag /* Update Eth DCB  data indication */;
+       u8 update_fcoe_dcb_data_flag /* Update FCOE DCB  data indication */;
+       u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB  data indication */;
+       u8 update_roce_dcb_data_flag /* Update ROCE DCB  data indication */;
+       u8 update_iwarp_dcb_data_flag /* Update IWARP DCB  data indication */;
+       u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+       u8 reserved;
+       struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
+       struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
+       struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */
+         ;
+       struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
+       struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */
+         ;
+       __le16 mf_vlan /* new outer vlan id value */;
+       __le16 reserved2;
+       struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */
+         ;
+};
+
+/*
+ * Ports mode
+ */
+enum ports_mode {
+       ENGX2_PORTX1 /* 2 engines x 1 port */ ,
+       ENGX2_PORTX2 /* 2 engines x 2 ports */ ,
+       ENGX1_PORTX1 /* 1 engine  x 1 port */ ,
+       ENGX1_PORTX2 /* 1 engine  x 2 ports */ ,
+       ENGX1_PORTX4 /* 1 engine  x 4 ports */ ,
+       MAX_PORTS_MODE
+};
+
+/*
+ * RDMA TX Stats
+ */
+struct rdma_sent_stats {
+       struct regpair sent_bytes /* number of total RDMA bytes sent */;
+       struct regpair sent_pkts /* number of total RDMA packets sent */;
+};
+
+/*
+ * Pstorm non-triggering VF zone
+ */
+struct pstorm_non_trigger_vf_zone {
+       struct eth_pstorm_per_queue_stat eth_queue_stat
+           /* VF statistic bucket */;
+       struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
+};
+
+/*
+ * Pstorm VF zone
+ */
+struct pstorm_vf_zone {
+       struct pstorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+       struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
+};
+
+/*
+ * Ramrod Header of SPQE
+ */
+struct ramrod_header {
+       __le32 cid /* Slowpath Connection CID */;
+       u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
+       u8 protocol_id /* Ramrod Protocol ID */;
+       __le16 echo /* Ramrod echo */;
+};
+
+/*
+ * RDMA RX Stats
+ */
+struct rdma_rcv_stats {
+       struct regpair rcv_bytes /* number of total RDMA bytes received */;
+       struct regpair rcv_pkts /* number of total RDMA packets received */;
+};
+
+/*
+ * Slowpath Element (SPQE)
+ */
+struct slow_path_element {
+       struct ramrod_header hdr /* Ramrod Header */;
+       struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
+};
+
+/*
+ * Tstorm non-triggering VF zone
+ */
+struct tstorm_non_trigger_vf_zone {
+       struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
+};
+
+struct tstorm_per_port_stat {
+       struct regpair trunc_error_discard
+           /* packet is dropped because it was truncated in NIG */;
+       struct regpair mac_error_discard
+           /* packet is dropped because of Ethernet FCS error */;
+       struct regpair mftag_filter_discard
+           /* packet is dropped because classification was unsuccessful */;
+       struct regpair eth_mac_filter_discard;
+       struct regpair ll2_mac_filter_discard;
+       struct regpair ll2_conn_disabled_discard;
+       struct regpair iscsi_irregular_pkt
+           /* packet is an ISCSI irregular packet */;
+       struct regpair fcoe_irregular_pkt
+           /* packet is an FCOE irregular packet */;
+       struct regpair roce_irregular_pkt
+           /* packet is an ROCE irregular packet */;
+       struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */
+         ;
+       struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */
+         ;
+       struct regpair preroce_irregular_pkt
+           /* packet is an PREROCE irregular packet */;
+};
+
+/*
+ * Tstorm VF zone
+ */
+struct tstorm_vf_zone {
+       struct tstorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+};
+
+/*
+ * Tunnel classification scheme
+ */
+enum tunnel_clss {
+       TUNNEL_CLSS_MAC_VLAN =
+           0
+           /* Use MAC & VLAN from first L2 header for vport classification. */
+           ,
+       TUNNEL_CLSS_MAC_VNI
+           ,
+       TUNNEL_CLSS_INNER_MAC_VLAN
+           /* Use MAC and VLAN from last L2 header for vport classification */
+           ,
+       TUNNEL_CLSS_INNER_MAC_VNI
+           ,
+       MAX_TUNNEL_CLSS
+};
+
+/*
+ * Ustorm non-triggering VF zone
+ */
+struct ustorm_non_trigger_vf_zone {
+       struct eth_ustorm_per_queue_stat eth_queue_stat
+           /* VF statistic bucket */;
+       struct regpair vf_pf_msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Ustorm triggering VF zone
+ */
+struct ustorm_trigger_vf_zone {
+       u8 vf_pf_msg_valid /* VF-PF message valid flag */;
+       u8 reserved[7];
+};
+
+/*
+ * Ustorm VF zone
+ */
+struct ustorm_vf_zone {
+       struct ustorm_non_trigger_vf_zone non_trigger
+           /* non-interrupt-triggering zone */;
+       struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
+};
+
+/*
+ * VF-PF channel data
+ */
+struct vf_pf_channel_data {
+       __le32 ready;
+       u8 valid;
+       u8 reserved0;
+       __le16 reserved1;
+};
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_start_ramrod_data {
+       u8 vf_id /* VF ID */;
+       u8 enable_flr_ack;
+       __le16 opaque_fid /* VF opaque FID */;
+       u8 personality /* define what type of personality is new VF */;
+       u8 reserved[3];
+};
+
+/*
+ * Ramrod data for VF start ramrod
+ */
+struct vf_stop_ramrod_data {
+       u8 vf_id /* VF ID */;
+       u8 reserved0;
+       __le16 reserved1;
+       __le32 reserved2;
+};
+
+/*
+ * Attentions status block
+ */
+struct atten_status_block {
+       __le32 atten_bits;
+       __le32 atten_ack;
+       __le16 reserved0;
+       __le16 sb_index /* status block running index */;
+       __le32 reserved1;
+};
+
+enum block_addr {
+       GRCBASE_GRC = 0x50000,
+       GRCBASE_MISCS = 0x9000,
+       GRCBASE_MISC = 0x8000,
+       GRCBASE_DBU = 0xa000,
+       GRCBASE_PGLUE_B = 0x2a8000,
+       GRCBASE_CNIG = 0x218000,
+       GRCBASE_CPMU = 0x30000,
+       GRCBASE_NCSI = 0x40000,
+       GRCBASE_OPTE = 0x53000,
+       GRCBASE_BMB = 0x540000,
+       GRCBASE_PCIE = 0x54000,
+       GRCBASE_MCP = 0xe00000,
+       GRCBASE_MCP2 = 0x52000,
+       GRCBASE_PSWHST = 0x2a0000,
+       GRCBASE_PSWHST2 = 0x29e000,
+       GRCBASE_PSWRD = 0x29c000,
+       GRCBASE_PSWRD2 = 0x29d000,
+       GRCBASE_PSWWR = 0x29a000,
+       GRCBASE_PSWWR2 = 0x29b000,
+       GRCBASE_PSWRQ = 0x280000,
+       GRCBASE_PSWRQ2 = 0x240000,
+       GRCBASE_PGLCS = 0x0,
+       GRCBASE_DMAE = 0xc000,
+       GRCBASE_PTU = 0x560000,
+       GRCBASE_TCM = 0x1180000,
+       GRCBASE_MCM = 0x1200000,
+       GRCBASE_UCM = 0x1280000,
+       GRCBASE_XCM = 0x1000000,
+       GRCBASE_YCM = 0x1080000,
+       GRCBASE_PCM = 0x1100000,
+       GRCBASE_QM = 0x2f0000,
+       GRCBASE_TM = 0x2c0000,
+       GRCBASE_DORQ = 0x100000,
+       GRCBASE_BRB = 0x340000,
+       GRCBASE_SRC = 0x238000,
+       GRCBASE_PRS = 0x1f0000,
+       GRCBASE_TSDM = 0xfb0000,
+       GRCBASE_MSDM = 0xfc0000,
+       GRCBASE_USDM = 0xfd0000,
+       GRCBASE_XSDM = 0xf80000,
+       GRCBASE_YSDM = 0xf90000,
+       GRCBASE_PSDM = 0xfa0000,
+       GRCBASE_TSEM = 0x1700000,
+       GRCBASE_MSEM = 0x1800000,
+       GRCBASE_USEM = 0x1900000,
+       GRCBASE_XSEM = 0x1400000,
+       GRCBASE_YSEM = 0x1500000,
+       GRCBASE_PSEM = 0x1600000,
+       GRCBASE_RSS = 0x238800,
+       GRCBASE_TMLD = 0x4d0000,
+       GRCBASE_MULD = 0x4e0000,
+       GRCBASE_YULD = 0x4c8000,
+       GRCBASE_XYLD = 0x4c0000,
+       GRCBASE_PRM = 0x230000,
+       GRCBASE_PBF_PB1 = 0xda0000,
+       GRCBASE_PBF_PB2 = 0xda4000,
+       GRCBASE_RPB = 0x23c000,
+       GRCBASE_BTB = 0xdb0000,
+       GRCBASE_PBF = 0xd80000,
+       GRCBASE_RDIF = 0x300000,
+       GRCBASE_TDIF = 0x310000,
+       GRCBASE_CDU = 0x580000,
+       GRCBASE_CCFC = 0x2e0000,
+       GRCBASE_TCFC = 0x2d0000,
+       GRCBASE_IGU = 0x180000,
+       GRCBASE_CAU = 0x1c0000,
+       GRCBASE_UMAC = 0x51000,
+       GRCBASE_XMAC = 0x210000,
+       GRCBASE_DBG = 0x10000,
+       GRCBASE_NIG = 0x500000,
+       GRCBASE_WOL = 0x600000,
+       GRCBASE_BMBN = 0x610000,
+       GRCBASE_IPC = 0x20000,
+       GRCBASE_NWM = 0x800000,
+       GRCBASE_NWS = 0x700000,
+       GRCBASE_MS = 0x6a0000,
+       GRCBASE_PHY_PCIE = 0x620000,
+       GRCBASE_MISC_AEU = 0x8000,
+       GRCBASE_BAR0_MAP = 0x1c00000,
+       MAX_BLOCK_ADDR
+};
+
+enum block_id {
+       BLOCK_GRC,
+       BLOCK_MISCS,
+       BLOCK_MISC,
+       BLOCK_DBU,
+       BLOCK_PGLUE_B,
+       BLOCK_CNIG,
+       BLOCK_CPMU,
+       BLOCK_NCSI,
+       BLOCK_OPTE,
+       BLOCK_BMB,
+       BLOCK_PCIE,
+       BLOCK_MCP,
+       BLOCK_MCP2,
+       BLOCK_PSWHST,
+       BLOCK_PSWHST2,
+       BLOCK_PSWRD,
+       BLOCK_PSWRD2,
+       BLOCK_PSWWR,
+       BLOCK_PSWWR2,
+       BLOCK_PSWRQ,
+       BLOCK_PSWRQ2,
+       BLOCK_PGLCS,
+       BLOCK_DMAE,
+       BLOCK_PTU,
+       BLOCK_TCM,
+       BLOCK_MCM,
+       BLOCK_UCM,
+       BLOCK_XCM,
+       BLOCK_YCM,
+       BLOCK_PCM,
+       BLOCK_QM,
+       BLOCK_TM,
+       BLOCK_DORQ,
+       BLOCK_BRB,
+       BLOCK_SRC,
+       BLOCK_PRS,
+       BLOCK_TSDM,
+       BLOCK_MSDM,
+       BLOCK_USDM,
+       BLOCK_XSDM,
+       BLOCK_YSDM,
+       BLOCK_PSDM,
+       BLOCK_TSEM,
+       BLOCK_MSEM,
+       BLOCK_USEM,
+       BLOCK_XSEM,
+       BLOCK_YSEM,
+       BLOCK_PSEM,
+       BLOCK_RSS,
+       BLOCK_TMLD,
+       BLOCK_MULD,
+       BLOCK_YULD,
+       BLOCK_XYLD,
+       BLOCK_PRM,
+       BLOCK_PBF_PB1,
+       BLOCK_PBF_PB2,
+       BLOCK_RPB,
+       BLOCK_BTB,
+       BLOCK_PBF,
+       BLOCK_RDIF,
+       BLOCK_TDIF,
+       BLOCK_CDU,
+       BLOCK_CCFC,
+       BLOCK_TCFC,
+       BLOCK_IGU,
+       BLOCK_CAU,
+       BLOCK_UMAC,
+       BLOCK_XMAC,
+       BLOCK_DBG,
+       BLOCK_NIG,
+       BLOCK_WOL,
+       BLOCK_BMBN,
+       BLOCK_IPC,
+       BLOCK_NWM,
+       BLOCK_NWS,
+       BLOCK_MS,
+       BLOCK_PHY_PCIE,
+       BLOCK_MISC_AEU,
+       BLOCK_BAR0_MAP,
+       MAX_BLOCK_ID
+};
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ */
+enum command_type_bit {
+       IGU_COMMAND_TYPE_NOP = 0,
+       IGU_COMMAND_TYPE_SET = 1,
+       MAX_COMMAND_TYPE_BIT
+};
+
+/*
+ * DMAE command
+ */
+struct dmae_cmd {
+       __le32 opcode;
+#define DMAE_CMD_SRC_MASK              0x1
+#define DMAE_CMD_SRC_SHIFT             0
+#define DMAE_CMD_DST_MASK              0x3
+#define DMAE_CMD_DST_SHIFT             1
+#define DMAE_CMD_C_DST_MASK            0x1
+#define DMAE_CMD_C_DST_SHIFT           3
+#define DMAE_CMD_CRC_RESET_MASK        0x1
+#define DMAE_CMD_CRC_RESET_SHIFT       4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT  5
+#define DMAE_CMD_DST_ADDR_RESET_MASK   0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT  6
+#define DMAE_CMD_COMP_FUNC_MASK        0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT       7
+#define DMAE_CMD_COMP_WORD_EN_MASK     0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT    8
+#define DMAE_CMD_COMP_CRC_EN_MASK      0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT     9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK  0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK        0x1
+#define DMAE_CMD_RESERVED1_SHIFT       13
+#define DMAE_CMD_ENDIANITY_MODE_MASK   0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT  14
+#define DMAE_CMD_ERR_HANDLING_MASK     0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT    16
+#define DMAE_CMD_PORT_ID_MASK          0x3
+#define DMAE_CMD_PORT_ID_SHIFT         18
+#define DMAE_CMD_SRC_PF_ID_MASK        0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT       20
+#define DMAE_CMD_DST_PF_ID_MASK        0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT       24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK  0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK        0x3
+#define DMAE_CMD_RESERVED2_SHIFT       30
+       __le32 src_addr_lo
+           /* PCIe source address low in bytes or GRC source address in DW */;
+       __le32 src_addr_hi;
+       __le32 dst_addr_lo;
+       __le32 dst_addr_hi;
+       __le16 length /* Length in DW */;
+       __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK        0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT       0
+#define DMAE_CMD_DST_VF_ID_MASK        0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT       8
+       __le32 comp_addr_lo /* PCIe completion address low or grc address */;
+       __le32 comp_addr_hi;
+       __le32 comp_val /* Value to write to completion address */;
+       __le32 crc32 /* crc16 result */;
+       __le32 crc_32_c /* crc32_c result */;
+       __le16 crc16 /* crc16 result */;
+       __le16 crc16_c /* crc16_c result */;
+       __le16 crc10 /* crc_t10 result */;
+       __le16 reserved;
+       __le16 xsum16 /* checksum16 result  */;
+       __le16 xsum8 /* checksum8 result  */;
+};
+
+struct fw_ver_num {
+       u8 major /* Firmware major version number */;
+       u8 minor /* Firmware minor version number */;
+       u8 rev /* Firmware revision version number */;
+       u8 eng /* Firmware engineering version number (for bootleg versions) */
+         ;
+};
+
+struct fw_ver_info {
+       __le16 tools_ver /* Tools version number */;
+       u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+       u8 reserved1;
+       struct fw_ver_num num /* FW version number */;
+       __le32 timestamp /* FW Timestamp in unix time  (sec. since 1970) */;
+       __le32 reserved2;
+};
+
+struct storm_ram_section {
+       __le16 offset
+           /* The offset of the section in the RAM (in 64 bit units) */;
+       __le16 size /* The size of the section (in 64 bit units) */;
+};
+
+struct fw_info {
+       struct fw_ver_info ver /* FW version information */;
+       struct storm_ram_section fw_asserts_section
+           /* The FW Asserts offset/size in Storm RAM */;
+       __le32 reserved;
+};
+
+struct fw_info_location {
+       __le32 grc_addr /* GRC address where the fw_info struct is located. */;
+       __le32 size
+           /* Size of the fw_info structure (thats located at the grc_addr). */
+         ;
+};
+
+/*
+ * IGU cleanup command
+ */
+struct igu_cleanup {
+       __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK     0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT    0
+#define IGU_CLEANUP_CLEANUP_SET_MASK   0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT  27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK  0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK  0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+       __le32 reserved1;
+};
+
+/*
+ * IGU firmware driver command
+ */
+union igu_command {
+       struct igu_prod_cons_update prod_cons_update;
+       struct igu_cleanup cleanup;
+};
+
+/*
+ * IGU firmware driver command
+ */
+struct igu_command_reg_ctrl {
+       __le16 opaque_fid;
+       __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+/*
+ * IGU mapping line structure
+ */
+struct igu_mapping_line {
+       __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK            0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT           0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK    0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT   1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK  0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK         0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT        17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK        0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT       18
+#define IGU_MAPPING_LINE_RESERVED_MASK         0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT        24
+};
+
+/*
+ * IGU MSIX line structure
+ */
+struct igu_msix_vector {
+       struct regpair address;
+       __le32 data;
+       __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK      0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT     0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK     0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT    1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK  0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK     0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT    24
+};
+
+enum init_modes {
+       MODE_BB_A0,
+       MODE_BB_B0,
+       MODE_K2,
+       MODE_ASIC,
+       MODE_EMUL_REDUCED,
+       MODE_EMUL_FULL,
+       MODE_FPGA,
+       MODE_CHIPSIM,
+       MODE_SF,
+       MODE_MF_SD,
+       MODE_MF_SI,
+       MODE_PORTS_PER_ENG_1,
+       MODE_PORTS_PER_ENG_2,
+       MODE_PORTS_PER_ENG_4,
+       MODE_100G,
+       MODE_EAGLE_ENG1_WORKAROUND,
+       MAX_INIT_MODES
+};
+
+enum init_phases {
+       PHASE_ENGINE,
+       PHASE_PORT,
+       PHASE_PF,
+       PHASE_VF,
+       PHASE_QM_PF,
+       MAX_INIT_PHASES
+};
+
+struct mstorm_core_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       __le16 word0 /* word0 */;
+       __le16 word1 /* word1 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+};
+
+/*
+ * per encapsulation type enabling flags
+ */
+struct prs_reg_encapsulation_type_en {
+       u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK     0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT    0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK      0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT     1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT           2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK            0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT           3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK  0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK   0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT  5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT               6
+};
+
+enum pxp_tph_st_hint {
+       TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */ ,
+       TPH_ST_HINT_REQUESTER /* Read/Write access by Device */ ,
+       TPH_ST_HINT_TARGET
+           /* Device Write and Host Read, or Host Write and Device Read */ ,
+       TPH_ST_HINT_TARGET_PRIO,
+       MAX_PXP_TPH_ST_HINT
+};
+
+/*
+ * QM hardware structure of enable bypass credit mask
+ */
+struct qm_rf_bypass_mask {
+       u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK    0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT   0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT     2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK      0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT     3
+#define QM_RF_BYPASS_MASK_PFRL_MASK       0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT      4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK    0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT   5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK    0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT   6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK  0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/*
+ * QM hardware structure of opportunistic credit mask
+ */
+struct qm_rf_opportunistic_mask {
+       __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT    0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT    1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT      2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK       0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT      3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK        0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT       4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT    5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK     0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT    6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK   0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT  7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK  0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK   0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT  9
+};
+
+/*
+ * QM hardware structure of QM map memory
+ */
+struct qm_rf_pq_map {
+       __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK          0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT         0
+#define QM_RF_PQ_MAP_RL_ID_MASK             0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT            1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK          0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT         9
+#define QM_RF_PQ_MAP_VOQ_MASK               0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT              18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK  0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK          0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT         25
+#define QM_RF_PQ_MAP_RESERVED_MASK          0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT         26
+};
+
+/*
+ * Completion params for aggregated interrupt completion
+ */
+struct sdm_agg_int_comp_params {
+       __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
+};
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+       __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK  0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK   0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT  16
+#define SDM_OP_GEN_RESERVED_MASK    0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT   20
+};
+
+struct ystorm_core_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT    0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK     0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT    1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK      0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT     2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK      0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT     4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK      0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT     6
+       u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK    0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT   0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK    0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT   1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK    0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT   2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK  0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* word0 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le16 word1 /* word1 */;
+       __le16 word2 /* word2 */;
+       __le16 word3 /* word3 */;
+       __le16 word4 /* word4 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+};
+
+#endif /* __ECORE_HSI_COMMON__ */
diff --git a/drivers/net/qede/ecore/ecore_hsi_eth.h 
b/drivers/net/qede/ecore/ecore_hsi_eth.h
new file mode 100644
index 0000000..fcd5e66
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_hsi_eth.h
@@ -0,0 +1,1912 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_ETH__
+#define __ECORE_HSI_ETH__
+/************************************************************************/
+/* Add include to common eth target for both eCore and protocol driver */
+/************************************************************************/
+#include "eth_common.h"
+
+/*
+ * The eth storm context for the Tstorm
+ */
+struct tstorm_eth_conn_st_ctx {
+       __le32 reserved[4];
+};
+
+/*
+ * The eth storm context for the Pstorm
+ */
+struct pstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/*
+ * The eth storm context for the Xstorm
+ */
+struct xstorm_eth_conn_st_ctx {
+       __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+       u8 reserved0 /* cdu_validation */;
+       u8 eth_state /* state */;
+       u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK               0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK                     0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK                    0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK              0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK               0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK                   0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK              0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK                 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 word1 /* physical_q1 */;
+       __le16 edpm_num_bds /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_prod /* word4 */;
+       __le16 go_to_bd_cons /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+       u8 byte3 /* byte3 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       u8 byte6 /* byte6 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* cf_array0 */;
+       __le32 reg6 /* cf_array1 */;
+       __le16 word7 /* word7 */;
+       __le16 word8 /* word8 */;
+       __le16 word9 /* word9 */;
+       __le16 word10 /* word10 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       __le32 reg9 /* reg9 */;
+       u8 byte7 /* byte7 */;
+       u8 byte8 /* byte8 */;
+       u8 byte9 /* byte9 */;
+       u8 byte10 /* byte10 */;
+       u8 byte11 /* byte11 */;
+       u8 byte12 /* byte12 */;
+       u8 byte13 /* byte13 */;
+       u8 byte14 /* byte14 */;
+       u8 byte15 /* byte15 */;
+       u8 byte16 /* byte16 */;
+       __le16 word11 /* word11 */;
+       __le32 reg10 /* reg10 */;
+       __le32 reg11 /* reg11 */;
+       __le32 reg12 /* reg12 */;
+       __le32 reg13 /* reg13 */;
+       __le32 reg14 /* reg14 */;
+       __le32 reg15 /* reg15 */;
+       __le32 reg16 /* reg16 */;
+       __le32 reg17 /* reg17 */;
+       __le32 reg18 /* reg18 */;
+       __le32 reg19 /* reg19 */;
+       __le16 word12 /* word12 */;
+       __le16 word13 /* word13 */;
+       __le16 word14 /* word14 */;
+       __le16 word15 /* word15 */;
+};
+
+/*
+ * The eth storm context for the Ystorm
+ */
+struct ystorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+struct ystorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK                  0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK                  0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK     0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT    2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK      0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT     4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK                   0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT                  6
+       u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK  0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK   0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT  1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK                 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT              3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT              4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT              5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT              6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK               0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT              7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* word0 */;
+       __le32 terminate_spqe /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le16 tx_bd_cons_upd /* word1 */;
+       __le16 word2 /* word2 */;
+       __le16 word3 /* word3 */;
+       __le16 word4 /* word4 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT     0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT     1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT     3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT     4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK      0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT     5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT      6
+       u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT      6
+       u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT      2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT      4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT      6
+       u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK       0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT      0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK      0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT     2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT    6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT    7
+       u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT    0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT    1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT    2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT    3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT    4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK     0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT    5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK    0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT   6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT  7
+       u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT  0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT  1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT  2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT  3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT  4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK  0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT  6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK   0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT  7
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+       __le32 reg5 /* reg5 */;
+       __le32 reg6 /* reg6 */;
+       __le32 reg7 /* reg7 */;
+       __le32 reg8 /* reg8 */;
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 rx_bd_cons /* word0 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       __le16 rx_bd_prod /* word1 */;
+       __le16 word2 /* conn_dpi */;
+       __le16 word3 /* word3 */;
+       __le32 reg9 /* reg9 */;
+       __le32 reg10 /* reg10 */;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT                   0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK                    0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT                   1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK     0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT    2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK     0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT    4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK                     0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT                    6
+       u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK                     0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT                    0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK               0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT              2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK               0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT              4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK       0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT      6
+       u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK  0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK  0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT                  2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT                  3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK            0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT           4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK            0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT           5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK    0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT   6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT                7
+       u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT                0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT                1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT                2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT                3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT                4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT                5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT                6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK                 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT                7
+       u8 byte2 /* byte2 */;
+       u8 byte3 /* byte3 */;
+       __le16 word0 /* conn_dpi */;
+       __le16 tx_bd_cons /* word1 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 tx_int_coallecing_timeset /* reg3 */;
+       __le16 tx_drv_bd_cons /* word2 */;
+       __le16 rx_drv_cqe_cons /* word3 */;
+};
+
+/*
+ * The eth storm context for the Ustorm
+ */
+struct ustorm_eth_conn_st_ctx {
+       __le32 reserved[40];
+};
+
+/*
+ * The eth storm context for the Mstorm
+ */
+struct mstorm_eth_conn_st_ctx {
+       __le32 reserved[8];
+};
+
+/*
+ * eth connection context
+ */
+struct eth_conn_context {
+       struct tstorm_eth_conn_st_ctx tstorm_st_context
+           /* tstorm storm context */;
+       struct regpair tstorm_st_padding[2] /* padding */;
+       struct pstorm_eth_conn_st_ctx pstorm_st_context
+           /* pstorm storm context */;
+       struct xstorm_eth_conn_st_ctx xstorm_st_context
+           /* xstorm storm context */;
+       struct xstorm_eth_conn_ag_ctx xstorm_ag_context
+           /* xstorm aggregative context */;
+       struct ystorm_eth_conn_st_ctx ystorm_st_context
+           /* ystorm storm context */;
+       struct ystorm_eth_conn_ag_ctx ystorm_ag_context
+           /* ystorm aggregative context */;
+       struct tstorm_eth_conn_ag_ctx tstorm_ag_context
+           /* tstorm aggregative context */;
+       struct ustorm_eth_conn_ag_ctx ustorm_ag_context
+           /* ustorm aggregative context */;
+       struct ustorm_eth_conn_st_ctx ustorm_st_context
+           /* ustorm storm context */;
+       struct mstorm_eth_conn_st_ctx mstorm_st_context
+           /* mstorm storm context */;
+};
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_error_code {
+       ETH_OK = 0x00 /* command succeeded */ ,
+       ETH_FILTERS_MAC_ADD_FAIL_FULL
+           /* mac add filters command failed due to cam full state */ ,
+       ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2
+           /* mac add filters command failed due to mtt2 full state */ ,
+       ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2
+           /* mac add filters command failed due to duplicate mac address */ ,
+       ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2
+           /* mac add filters command failed due to duplicate mac address */ ,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF
+           /* mac delete filters command failed due to not found state */ ,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2
+           /* mac delete filters command failed due to not found state */ ,
+       ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2
+           /* mac delete filters command failed due to not found state */ ,
+       ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC
+           /* mac add filters command failed due to MAC Address of
+            * 00:00:00:00:00:00
+            */
+           ,
+       ETH_FILTERS_VLAN_ADD_FAIL_FULL
+           /* vlan add filters command failed due to cam full state */ ,
+       ETH_FILTERS_VLAN_ADD_FAIL_DUP
+           /* vlan add filters command failed due to duplicate VLAN filter */ ,
+       ETH_FILTERS_VLAN_DEL_FAIL_NOF
+           /* vlan delete filters command failed due to not found state */ ,
+       ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1
+           /* vlan delete filters command failed due to not found state */ ,
+       ETH_FILTERS_PAIR_ADD_FAIL_DUP
+           /* pair add filters command failed due to duplicate request */ ,
+       ETH_FILTERS_PAIR_ADD_FAIL_FULL
+           /* pair add filters command failed due to full state */ ,
+       ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC
+           /* pair add filters command failed due to full state */ ,
+       ETH_FILTERS_PAIR_DEL_FAIL_NOF
+           /* pair add filters command failed due not found state */ ,
+       ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1
+           /* pair add filters command failed due not found state */ ,
+       ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC
+           /* pair add filters command failed due to MAC Address of
+            * 00:00:00:00:00:00
+            */
+           ,
+       ETH_FILTERS_VNI_ADD_FAIL_FULL
+           /* vni add filters command failed due to cam full state */ ,
+       ETH_FILTERS_VNI_ADD_FAIL_DUP
+           /* vni add filters command failed due to duplicate VNI filter */ ,
+       MAX_ETH_ERROR_CODE
+};
+
+/*
+ * opcodes for the event ring
+ */
+enum eth_event_opcode {
+       ETH_EVENT_UNUSED,
+       ETH_EVENT_VPORT_START,
+       ETH_EVENT_VPORT_UPDATE,
+       ETH_EVENT_VPORT_STOP,
+       ETH_EVENT_TX_QUEUE_START,
+       ETH_EVENT_TX_QUEUE_STOP,
+       ETH_EVENT_RX_QUEUE_START,
+       ETH_EVENT_RX_QUEUE_UPDATE,
+       ETH_EVENT_RX_QUEUE_STOP,
+       ETH_EVENT_FILTERS_UPDATE,
+       ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+       ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+       ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
+       ETH_EVENT_RX_ADD_UDP_FILTER,
+       ETH_EVENT_RX_DELETE_UDP_FILTER,
+       ETH_EVENT_RX_ADD_GFT_FILTER,
+       ETH_EVENT_RX_DELETE_GFT_FILTER,
+       ETH_EVENT_RX_CREATE_GFT_ACTION,
+       MAX_ETH_EVENT_OPCODE
+};
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum eth_filter_action {
+       ETH_FILTER_ACTION_UNUSED,
+       ETH_FILTER_ACTION_REMOVE,
+       ETH_FILTER_ACTION_ADD,
+       ETH_FILTER_ACTION_REMOVE_ALL
+           /* Remove all filters of given type and vport ID. */ ,
+       MAX_ETH_FILTER_ACTION
+};
+
+/*
+ * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd {
+       u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
+       u8 vport_id /* the vport id */;
+       u8 action /* filter command action: add/remove/replace */;
+       u8 reserved0;
+       __le32 vni;
+       __le16 mac_lsb;
+       __le16 mac_mid;
+       __le16 mac_msb;
+       __le16 vlan_id;
+};
+
+/*
+ *  $$KEEP_ENDIANNESS$$
+ */
+struct eth_filter_cmd_header {
+       u8 rx /* If set, apply these commands to the RX path */;
+       u8 tx /* If set, apply these commands to the TX path */;
+       u8 cmd_cnt /* Number of filter commands */;
+       u8 assert_on_error;
+       u8 reserved1[4];
+};
+
+/*
+ * Ethernet filter types: mac/vlan/pair
+ */
+enum eth_filter_type {
+       ETH_FILTER_TYPE_UNUSED,
+       ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */ ,
+       ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */ ,
+       ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */ ,
+       ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */ ,
+       ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */ ,
+       ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */ ,
+       ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR /* Add/remove a inner MAC-VNI pair */
+           ,
+       ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */ ,
+       ETH_FILTER_TYPE_VNI /* Add/remove a VNI */ ,
+       MAX_ETH_FILTER_TYPE
+};
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ipv4_frag_type {
+       ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */ ,
+       ETH_IPV4_FIRST_FRAG
+           /* First Fragment of IPv4 Packet (contains headers) */ ,
+       ETH_IPV4_NON_FIRST_FRAG
+           /* Non-First Fragment of IPv4 Packet (does not contain headers) */ ,
+       MAX_ETH_IPV4_FRAG_TYPE
+};
+
+/*
+ * eth IPv4 Fragment Type
+ */
+enum eth_ip_type {
+       ETH_IPV4 /* IPv4 */ ,
+       ETH_IPV6 /* IPv6 */ ,
+       MAX_ETH_IP_TYPE
+};
+
+/*
+ * Ethernet Ramrod Command IDs
+ */
+enum eth_ramrod_cmd_id {
+       ETH_RAMROD_UNUSED,
+       ETH_RAMROD_VPORT_START /* VPort Start Ramrod */ ,
+       ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */ ,
+       ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */ ,
+       ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */ ,
+       ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */ ,
+       ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */ ,
+       ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */ ,
+       ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */ ,
+       ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */ ,
+       ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION
+           /* RX - Create an Openflow Action */ ,
+       ETH_RAMROD_RX_ADD_OPENFLOW_FILTER
+           /* RX - Add an Openflow Filter to the Searcher */ ,
+       ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER
+           /* RX - Delete an Openflow Filter to the Searcher */ ,
+       ETH_RAMROD_RX_ADD_UDP_FILTER /* RX - Add a UDP Filter to the Searcher */
+           ,
+       ETH_RAMROD_RX_DELETE_UDP_FILTER
+           /* RX - Delete a UDP Filter to the Searcher */ ,
+       ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create an Gft Action */ ,
+       ETH_RAMROD_RX_DELETE_GFT_FILTER
+           /* RX - Delete an GFT Filter to the Searcher */ ,
+       ETH_RAMROD_RX_ADD_GFT_FILTER
+           /* RX - Add an GFT Filter to the Searcher */ ,
+       MAX_ETH_RAMROD_CMD_ID
+};
+
+/*
+ * return code from eth sp ramrods
+ */
+struct eth_return_code {
+       u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK  0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK  0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK     0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT    7
+};
+
+/*
+ * What to do in case an error occurs
+ */
+enum eth_tx_err {
+       ETH_TX_ERR_DROP /* Drop erroneous packet. */ ,
+       ETH_TX_ERR_ASSERT_MALICIOUS
+           /* Assert an interrupt for PF, declare as malicious for VF */ ,
+       MAX_ETH_TX_ERR
+};
+
+/*
+ * Array of the different error type behaviors
+ */
+struct eth_tx_err_vals {
+       __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK            0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT           0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK             0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT            1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK            0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT           2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK          0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT         3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK  0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK                0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT               5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK        0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT       6
+#define ETH_TX_ERR_VALS_RESERVED_MASK                     0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT                    7
+};
+
+/*
+ * vport rss configuration data
+ */
+struct eth_vport_rss_config {
+       __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK        0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT       0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK        0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT       1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT   2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT   3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT   4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK    0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT   5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK  0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK              0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT             7
+       u8 rss_id;
+       u8 rss_mode /* The RSS mode for this function */;
+       u8 update_rss_key /* if set update the rss key */;
+       u8 update_rss_ind_table /* if set update the indirection table */;
+       u8 update_rss_capabilities /* if set update the capabilities */;
+       u8 tbl_size /* rss mask (Tbl size) */;
+       __le32 reserved2[2];
+       __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]
+           /* RSS indirection table */;
+       __le32 rss_key[ETH_RSS_KEY_SIZE_REGS] /* RSS key supplied to us by OS */
+          ;
+       __le32 reserved3[2];
+};
+
+/*
+ * eth vport RSS mode
+ */
+enum eth_vport_rss_mode {
+       ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */ ,
+       ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */ ,
+       MAX_ETH_VPORT_RSS_MODE
+};
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_rx_mode {
+       __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT         0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT       1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK  0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK          0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT         3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT       4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK        0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT       5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK               0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT              6
+       __le16 reserved2[3];
+};
+
+/*
+ * Command for setting tpa parameters
+ */
+struct eth_vport_tpa_param {
+       u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */;
+       u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
+       u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
+       u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+       u8 tpa_pkt_split_flg;
+       u8 tpa_hdr_data_split_flg
+           /* If set, put header of first TPA segment on bd and data on SGE */
+          ;
+       u8 tpa_gro_consistent_flg
+           /* If set, GRO data consistent will checked for TPA continue */;
+       u8 tpa_max_aggs_num
+           /* maximum number of opened aggregations per v-port  */;
+       __le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
+       __le16 tpa_min_size_to_start
+           /* minimum TCP payload size for a packet to start aggregation */;
+       __le16 tpa_min_size_to_cont
+           /* minimum TCP payload size for a packet to continue aggregation */
+          ;
+       u8 max_buff_num
+           /* maximal number of buffers that can be used for one aggregation */
+          ;
+       u8 reserved;
+};
+
+/*
+ * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
+ */
+struct eth_vport_tx_mode {
+       __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT   0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK    0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT   2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK  0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK         0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT        5
+       __le16 reserved2[3];
+};
+
+/*
+ * Ramrod data for rx add gft filter data
+ */
+struct rx_add_gft_filter_data {
+       struct regpair pkt_hdr_addr /* Packet Header That Defines GFT Filter */
+          ;
+       __le16 action_icid /* ICID of Action to run for this filter */;
+       __le16 pkt_hdr_length /* Packet Header Length */;
+       u8 reserved[4];
+};
+
+/*
+ * Ramrod data for rx add openflow filter
+ */
+struct rx_add_openflow_filter_data {
+       __le16 action_icid /* CID of Action to run for this filter */;
+       u8 priority /* Searcher String - Packet priority */;
+       u8 reserved0;
+       __le32 tenant_id /* Searcher String - Tenant ID */;
+       __le16 dst_mac_hi /* Searcher String - Destination Mac Bytes 0 to 1 */;
+       __le16 dst_mac_mid /* Searcher String - Destination Mac Bytes 2 to 3 */
+          ;
+       __le16 dst_mac_lo /* Searcher String - Destination Mac Bytes 4 to 5 */;
+       __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
+       __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
+       __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
+       __le16 vlan_id /* Searcher String - Vlan ID */;
+       __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */;
+       u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */;
+       u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */;
+       u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */;
+       u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+       __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */;
+       __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */;
+       __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */;
+       __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
+};
+
+/*
+ * Ramrod data for rx create gft action
+ */
+struct rx_create_gft_action_data {
+       u8 vport_id /* Vport Id of GFT Action  */;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx create openflow action
+ */
+struct rx_create_openflow_action_data {
+       u8 vport_id /* ID of RX queue */;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_start_ramrod_data {
+       __le16 rx_queue_id /* ID of RX queue */;
+       __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+       __le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* index of the protocol index */;
+       u8 vport_id /* ID of virtual port */;
+       u8 default_rss_queue_flg /* set queue as default rss queue if set */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 stats_counter_id /* Statistics counter ID */;
+       u8 pin_context /* Pin context in CCFC to improve performance */;
+       u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
+       u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet placement */
+          ;
+       u8 pxp_st_hint
+           /* PXP command Steering tag hint. Use enum pxp_tph_st_hint */;
+       __le16 pxp_st_index /* PXP command Steering tag index */;
+       u8 pmd_mode
+           /* Indicates that current queue belongs to poll-mode driver */;
+       u8 notify_en;
+       u8 toggle_val
+           /* Initial value for the toggle valid bit - used in PMD mode */;
+       u8 reserved[7];
+       __le16 reserved1 /* FW reserved. */;
+       struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
+       struct regpair bd_base /* bd address of the first bd page */;
+       struct regpair reserved2 /* FW reserved. */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct rx_queue_stop_ramrod_data {
+       __le16 rx_queue_id /* ID of RX queue */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 vport_id /* ID of virtual port */;
+       u8 reserved[3];
+};
+
+/*
+ * Ramrod data for rx queue update ramrod
+ */
+struct rx_queue_update_ramrod_data {
+       __le16 rx_queue_id /* ID of RX queue */;
+       u8 complete_cqe_flg /* post completion to the CQE ring if set */;
+       u8 complete_event_flg /* post completion to the event ring if set */;
+       u8 vport_id /* ID of virtual port */;
+       u8 reserved[4];
+       u8 reserved1 /* FW reserved. */;
+       u8 reserved2 /* FW reserved. */;
+       u8 reserved3 /* FW reserved. */;
+       __le16 reserved4 /* FW reserved. */;
+       __le16 reserved5 /* FW reserved. */;
+       struct regpair reserved6 /* FW reserved. */;
+};
+
+/*
+ * Ramrod data for rx Add UDP Filter
+ */
+struct rx_udp_filter_data {
+       __le16 action_icid /* CID of Action to run for this filter */;
+       __le16 vlan_id /* Searcher String - Vlan ID */;
+       u8 ip_type /* Searcher String - IP Type */;
+       u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
+       __le16 reserved1;
+       __le32 ip_dst_addr[4];
+           /* Searcher String-IP Dest Addr for IPv4 use ip_dst_addr[0] only */
+          ;
+       __le32 ip_src_addr[4]
+           /* Searcher String-IP Src Addr, for IPv4 use ip_dst_addr[0] only */
+          ;
+       __le16 udp_dst_port /* Searcher String - UDP Destination Port */;
+       __le16 udp_src_port /* Searcher String - UDP Source Port */;
+       __le32 tenant_id /* Searcher String - Tenant ID */;
+};
+
+/*
+ * Ramrod data for rx queue start ramrod
+ */
+struct tx_queue_start_ramrod_data {
+       __le16 sb_id /* Status block ID */;
+       u8 sb_index /* Status block protocol index */;
+       u8 vport_id /* VPort ID */;
+       u8 reserved0 /* FW reserved. */;
+       u8 stats_counter_id /* Statistics counter ID to use */;
+       __le16 qm_pq_id /* QM PQ ID */;
+       u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK  0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT     1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK      0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT     2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK               0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT              3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK              0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT             4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK            0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT           5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK              0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT             6
+       u8 pxp_st_hint /* PXP command Steering tag hint */;
+       u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
+       u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
+       __le16 pxp_st_index /* PXP command Steering tag index */;
+       __le16 comp_agg_size /* TX completion min agg size - for PMD queues */;
+       __le16 queue_zone_id /* queue zone ID to use */;
+       __le16 test_dup_count /* In Test Mode, number of duplications */;
+       __le16 pbl_size /* Number of BD pages pointed by PBL */;
+       __le16 tx_queue_id
+           /* unique Queue ID - currently used only by PMD flow */;
+       struct regpair pbl_base_addr /* address of the pbl page */;
+       struct regpair bd_cons_address
+           /* BD consumer address in host - for PMD queues */;
+};
+
+/*
+ * Ramrod data for tx queue stop ramrod
+ */
+struct tx_queue_stop_ramrod_data {
+       __le16 reserved[4];
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_filter_update_ramrod_data {
+       struct eth_filter_cmd_header filter_cmd_hdr
+           /* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */;
+       struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]
+           /* Filter Commands */;
+};
+
+/*
+ * Ramrod data for vport start ramrod
+ */
+struct vport_start_ramrod_data {
+       u8 vport_id;
+       u8 sw_fid;
+       __le16 mtu;
+       u8 drop_ttl0_en /* if set, drop packet with ttl=0 */;
+       u8 inner_vlan_removal_en;
+       struct eth_vport_rx_mode rx_mode /* Rx filter data */;
+       struct eth_vport_tx_mode tx_mode /* Tx filter data */;
+       struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
+          ;
+       __le16 default_vlan /* Default Vlan value to be forced by FW */;
+       u8 tx_switching_en /* Tx switching is enabled for current Vport */;
+       u8 anti_spoofing_en
+           /* Anti-spoofing verification is set for current Vport */;
+       u8 default_vlan_en
+           /* If set, the default Vlan value is forced by the FW */;
+       u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
+          ;
+       u8 silent_vlan_removal_en;
+       /* If enable then innerVlan will be striped and not written to cqe */
+       u8 untagged;
+       struct eth_tx_err_vals tx_err_behav
+           /* Desired behavior per TX error type */;
+       u8 zero_placement_offset;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for vport stop ramrod
+ */
+struct vport_stop_ramrod_data {
+       u8 vport_id;
+       u8 reserved[7];
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data_cmn {
+       u8 vport_id;
+       u8 update_rx_active_flg /* set if rx active flag should be handled */;
+       u8 rx_active_flg /* rx active flag value */;
+       u8 update_tx_active_flg /* set if tx active flag should be handled */;
+       u8 tx_active_flg /* tx active flag value */;
+       u8 update_rx_mode_flg /* set if rx state data should be handled */;
+       u8 update_tx_mode_flg /* set if tx state data should be handled */;
+       u8 update_approx_mcast_flg
+           /* set if approx. mcast data should be handled */;
+       u8 update_rss_flg /* set if rss data should be handled  */;
+       u8 update_inner_vlan_removal_en_flg
+           /* set if inner_vlan_removal_en should be handled */;
+       u8 inner_vlan_removal_en;
+       u8 update_tpa_param_flg;
+       u8 update_tpa_en_flg /* set if tpa enable changes */;
+       u8 update_tx_switching_en_flg
+           /* set if tx switching en flag should be handled */;
+       u8 tx_switching_en /* tx switching en value */;
+       u8 update_anti_spoofing_en_flg
+           /* set if anti spoofing flag should be handled */;
+       u8 anti_spoofing_en /* Anti-spoofing verification en value */;
+       u8 update_handle_ptp_pkts
+           /* set if handle_ptp_pkts should be handled. */;
+       u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
+          ;
+       u8 update_default_vlan_en_flg
+           /* If set, the default Vlan enable flag is updated */;
+       u8 default_vlan_en
+           /* If set, the default Vlan value is forced by the FW */;
+       u8 update_default_vlan_flg
+           /* If set, the default Vlan value is updated */;
+       __le16 default_vlan /* Default Vlan value to be forced by FW */;
+       u8 update_accept_any_vlan_flg
+           /* set if accept_any_vlan should be handled */;
+       u8 accept_any_vlan /* accept_any_vlan updated value */;
+       u8 silent_vlan_removal_en;
+       u8 update_mtu_flg
+           /* If set, MTU will be updated. Vport must be not active. */;
+       __le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
+       u8 reserved[2];
+};
+
+struct vport_update_ramrod_mcast {
+       __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */;
+};
+
+/*
+ * Ramrod data for vport update ramrod
+ */
+struct vport_update_ramrod_data {
+       struct vport_update_ramrod_data_cmn common
+           /* Common data for all vport update ramrods */;
+       struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
+       struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+       struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
+          ;
+       struct vport_update_ramrod_mcast approx_mcast;
+       struct eth_vport_rss_config rss_config /* rss config data */;
+};
+
+/*
+ * GFT CAM line struct
+ */
+struct gft_cam_line {
+       __le32 camline;
+#define GFT_CAM_LINE_VALID_MASK      0x1
+#define GFT_CAM_LINE_VALID_SHIFT     0
+#define GFT_CAM_LINE_DATA_MASK       0x3FFF
+#define GFT_CAM_LINE_DATA_SHIFT      1
+#define GFT_CAM_LINE_MASK_BITS_MASK  0x3FFF
+#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
+#define GFT_CAM_LINE_RESERVED1_MASK  0x7
+#define GFT_CAM_LINE_RESERVED1_SHIFT 29
+};
+
+/*
+ * GFT CAM line struct (for driversim use)
+ */
+struct gft_cam_line_mapped {
+       __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK                     0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT                    0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK                0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT               1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK         0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT        2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK       0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT      3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK               0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT              7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK                     0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT                    11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK           0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT          15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK    0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT   16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK  0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK          0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT         21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK                0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT               25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK                 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT                29
+};
+
+union gft_cam_line_union {
+       struct gft_cam_line cam_line;
+       struct gft_cam_line_mapped cam_line_mapped;
+};
+
+/*
+ * Used in gft_profile_key: Indication for ip version
+ */
+enum gft_profile_ip_version {
+       GFT_PROFILE_IPV4 = 0,
+       GFT_PROFILE_IPV6 = 1,
+       MAX_GFT_PROFILE_IP_VERSION
+};
+
+/*
+ * Profile key stucr fot GFT logic in Prs
+ */
+struct gft_profile_key {
+       __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK           0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT          0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK    0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT   1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK  0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK          0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT         6
+#define GFT_PROFILE_KEY_PF_ID_MASK                0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT               10
+#define GFT_PROFILE_KEY_RESERVED0_MASK            0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT           14
+};
+
+/*
+ * Used in gft_profile_key: Indication for tunnel type
+ */
+enum gft_profile_tunnel_type {
+       GFT_PROFILE_NO_TUNNEL = 0,
+       GFT_PROFILE_VXLAN_TUNNEL = 1,
+       GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+       GFT_PROFILE_GRE_IP_TUNNEL = 3,
+       GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+       GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+       MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+/*
+ * Used in gft_profile_key: Indication for protocol type
+ */
+enum gft_profile_upper_protocol_type {
+       GFT_PROFILE_ROCE_PROTOCOL = 0,
+       GFT_PROFILE_RROCE_PROTOCOL = 1,
+       GFT_PROFILE_FCOE_PROTOCOL = 2,
+       GFT_PROFILE_ICMP_PROTOCOL = 3,
+       GFT_PROFILE_ARP_PROTOCOL = 4,
+       GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+       GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+       GFT_PROFILE_TCP_PROTOCOL = 7,
+       GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+       GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+       GFT_PROFILE_UDP_PROTOCOL = 10,
+       GFT_PROFILE_USER_IP_1_INNER = 11,
+       GFT_PROFILE_USER_IP_2_OUTER = 12,
+       GFT_PROFILE_USER_ETH_1_INNER = 13,
+       GFT_PROFILE_USER_ETH_2_OUTER = 14,
+       GFT_PROFILE_RAW = 15,
+       MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+/*
+ * GFT RAM line struct
+ */
+struct gft_ram_line {
+       __le32 low32bits;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK              0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT             0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT         2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK     0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT    3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK               0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT              4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK         0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT        5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT         6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT         7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT             8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK  0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK            0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT           10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK            0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT           11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK          0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT         12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK     0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT    13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK              0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT             14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK           0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT          15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK           0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT          16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK            0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT           17
+#define GFT_RAM_LINE_TTL_MASK                      0x1
+#define GFT_RAM_LINE_TTL_SHIFT                     18
+#define GFT_RAM_LINE_ETHERTYPE_MASK                0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT               19
+#define GFT_RAM_LINE_RESERVED0_MASK                0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT               20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT            21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT            22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT            23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT            24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT            25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT            26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT            27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK             0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT            28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK              0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT             29
+#define GFT_RAM_LINE_DST_PORT_MASK                 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT                30
+#define GFT_RAM_LINE_SRC_PORT_MASK                 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT                31
+       __le32 high32bits;
+#define GFT_RAM_LINE_DSCP_MASK                     0x1
+#define GFT_RAM_LINE_DSCP_SHIFT                    0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK         0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT        1
+#define GFT_RAM_LINE_DST_IP_MASK                   0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT                  2
+#define GFT_RAM_LINE_SRC_IP_MASK                   0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT                  3
+#define GFT_RAM_LINE_PRIORITY_MASK                 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT                4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK            0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT           5
+#define GFT_RAM_LINE_VLAN_MASK                     0x1
+#define GFT_RAM_LINE_VLAN_SHIFT                    6
+#define GFT_RAM_LINE_DST_MAC_MASK                  0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT                 7
+#define GFT_RAM_LINE_SRC_MAC_MASK                  0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT                 8
+#define GFT_RAM_LINE_TENANT_ID_MASK                0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT               9
+#define GFT_RAM_LINE_RESERVED1_MASK                0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT               10
+};
+
+/*
+ * Used in the first 2 bits for gft_ram_line: Indication for vlan mask
+ */
+enum gft_vlan_select {
+       INNER_PROVIDER_VLAN = 0,
+       INNER_VLAN = 1,
+       OUTER_PROVIDER_VLAN = 2,
+       OUTER_VLAN = 3,
+       MAX_GFT_VLAN_SELECT
+};
+
+struct mstorm_eth_conn_ag_ctx {
+       u8 byte0 /* cdu_validation */;
+       u8 byte1 /* state */;
+       u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK  0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK          0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK           0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK           0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK           0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+       u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK         0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK         0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK         0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK       0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+       __le16 word0 /* word0 */;
+       __le16 word1 /* word1 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+};
+
+/* @DPDK: xstormEthConnAgCtxDqExtLdPart */
+struct xstorm_eth_conn_ag_ctx_dq_ext_ld_part {
+       u8 reserved0 /* cdu_validation */;
+       u8 eth_state /* state */;
+       u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK               0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK                     0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK                    0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK                   0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK            0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK                0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK              0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK               0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK                   0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK                  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK         0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK             0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK              0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK                 0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK                0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK            0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK              0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 word1 /* physical_q1 */;
+       __le16 edpm_num_bds /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_prod /* word4 */;
+       __le16 go_to_bd_cons /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+       u8 byte3 /* byte3 */;
+       u8 byte4 /* byte4 */;
+       u8 byte5 /* byte5 */;
+       u8 byte6 /* byte6 */;
+       __le32 reg0 /* reg0 */;
+       __le32 reg1 /* reg1 */;
+       __le32 reg2 /* reg2 */;
+       __le32 reg3 /* reg3 */;
+       __le32 reg4 /* reg4 */;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+       u8 reserved0 /* cdu_validation */;
+       u8 eth_state /* state */;
+       u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+       u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK               0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+       u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+       u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+       u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK                     0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+       u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK                    0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+       u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK        0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK                   0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+       u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK                0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK               0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+       u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK                   0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+       u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK                  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK     0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+       u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK  0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+       u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK              0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK                 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+       u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+       u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK                0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK            0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+       u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK      0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK    0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK          0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK        0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK              0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+       u8 edpm_event_id /* byte2 */;
+       __le16 physical_q0 /* physical_q0 */;
+       __le16 word1 /* physical_q1 */;
+       __le16 edpm_num_bds /* physical_q2 */;
+       __le16 tx_bd_cons /* word3 */;
+       __le16 tx_bd_prod /* word4 */;
+       __le16 go_to_bd_cons /* word5 */;
+       __le16 conn_dpi /* conn_dpi */;
+};
+
+#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/ecore/ecore_hsi_tools.h 
b/drivers/net/qede/ecore/ecore_hsi_tools.h
new file mode 100644
index 0000000..0f0cbdb
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_hsi_tools.h
@@ -0,0 +1,1081 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_TOOLS__
+#define __ECORE_HSI_TOOLS__
+/**********************************/
+/* Tools HSI constants and macros */
+/**********************************/
+
+/*********************************** Init ************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS                  23
+#define MAX_GRC_ADDR                   ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID                   0xffff
+
+/* init pattern size in bytes */
+#define INIT_PATTERN_SIZE_BITS 4
+#define MAX_INIT_PATTERN_SIZE  (1 << INIT_PATTERN_SIZE_BITS)
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE                        8192
+
+/* Global PXP window */
+#define NUM_OF_PXP_WIN                 19
+#define PXP_WIN_DWORD_SIZE_BITS        10
+#define PXP_WIN_DWORD_SIZE             (1 << PXP_WIN_DWORD_SIZE_BITS)
+#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
+#define PXP_WIN_BYTE_SIZE              (PXP_WIN_DWORD_SIZE * 4)
+
+/********************************* GRC Dump **********************************/
+
+/* width of GRC dump register sequence length in bits */
+#define DUMP_SEQ_LEN_BITS                      8
+#define DUMP_SEQ_LEN_MAX_VAL           ((1 << DUMP_SEQ_LEN_BITS) - 1)
+
+/* width of GRC dump memory length in bits */
+#define DUMP_MEM_LEN_BITS                      18
+#define DUMP_MEM_LEN_MAX_VAL           ((1 << DUMP_MEM_LEN_BITS) - 1)
+
+/* width of register type ID in bits */
+#define REG_TYPE_ID_BITS                       6
+#define REG_TYPE_ID_MAX_VAL                    ((1 << REG_TYPE_ID_BITS) - 1)
+
+/* width of block ID in bits */
+#define BLOCK_ID_BITS                          8
+#define BLOCK_ID_MAX_VAL                       ((1 << BLOCK_ID_BITS) - 1)
+
+/******************************** Idle Check *********************************/
+
+/* max number of idle check predicate immediates */
+#define MAX_IDLE_CHK_PRED_IMM          3
+
+/* max number of idle check argument registers */
+#define MAX_IDLE_CHK_READ_REGS         3
+
+/* max number of idle check loops */
+#define MAX_IDLE_CHK_LOOPS                     0x10000
+
+/* max idle check address increment */
+#define MAX_IDLE_CHK_INCREMENT         0x10000
+
+/* inicates an undefined idle check line index */
+#define IDLE_CHK_UNDEFINED_LINE_IDX    0xffffff
+
+/* max number of register values following the idle check header for LSI */
+#define IDLE_CHK_MAX_LSI_DUMP_REGS     2
+
+/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
+#define IDLE_CHK_QM_RD_WR_PTR          0
+#define IDLE_CHK_QM_RD_WR_BANK         1
+
+/**************************************/
+/* HSI Functions constants and macros */
+/**************************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES                 8
+
+/* the MCP Trace meta data signautre is duplicated in the
+ * perl script that generats the NVRAM images
+ */
+#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
+
+/* Maximal number of RAM lines occupied by FW Asserts data */
+#define MAX_FW_ASSERTS_RAM_LINES               800
+
+/*
+ * Binary buffer header
+ */
+struct bin_buffer_hdr {
+       __le32 offset
+           /* buffer offset in bytes from the beginning of the binary file */;
+       __le32 length /* buffer length in bytes */;
+};
+
+/*
+ * binary buffer types
+ */
+enum bin_buffer_type {
+       BIN_BUF_FW_VER_INFO /* fw_ver_info struct */ ,
+       BIN_BUF_INIT_CMD /* init commands */ ,
+       BIN_BUF_INIT_VAL /* init data */ ,
+       BIN_BUF_INIT_MODE_TREE /* init modes tree */ ,
+       BIN_BUF_IRO /* internal RAM offsets array */ ,
+       MAX_BIN_BUFFER_TYPE
+};
+
+/*
+ * Chip IDs
+ */
+enum chip_ids {
+       CHIP_BB_A0 /* BB A0 chip ID */ ,
+       CHIP_BB_B0 /* BB B0 chip ID */ ,
+       CHIP_K2 /* AH chip ID */ ,
+       MAX_CHIP_IDS
+};
+
+/*
+ * memory dump descriptor
+ */
+struct dbg_dump_mem_desc {
+       __le32 dword0;
+#define DBG_DUMP_MEM_DESC_ADDRESS_MASK         0xFFFFFF
+#define DBG_DUMP_MEM_DESC_ADDRESS_SHIFT        0
+#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_MASK  0xF
+#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_SHIFT 24
+#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_MASK   0xF
+#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_SHIFT  28
+       __le32 dword1;
+#define DBG_DUMP_MEM_DESC_LENGTH_MASK          0x3FFFF
+#define DBG_DUMP_MEM_DESC_LENGTH_SHIFT         0
+#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_MASK     0x3F
+#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_SHIFT    18
+#define DBG_DUMP_MEM_DESC_BLOCK_ID_MASK        0xFF
+#define DBG_DUMP_MEM_DESC_BLOCK_ID_SHIFT       24
+};
+
+/*
+ * registers dump descriptor: chip
+ */
+struct dbg_dump_regs_chip_desc {
+       __le32 data;
+#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_MASK    0x1
+#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_SHIFT   0
+#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_MASK  0x7FFFFF
+#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_SHIFT 1
+#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_MASK   0xFF
+#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_SHIFT  24
+};
+
+/*
+ * registers dump descriptor: raw
+ */
+struct dbg_dump_regs_raw_desc {
+       __le32 data;
+#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_MASK  0x1
+#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_RAW_DESC_PARAM1_MASK        0x7FFFFF
+#define DBG_DUMP_REGS_RAW_DESC_PARAM1_SHIFT       1
+#define DBG_DUMP_REGS_RAW_DESC_PARAM2_MASK        0xFF
+#define DBG_DUMP_REGS_RAW_DESC_PARAM2_SHIFT       24
+};
+
+/*
+ * registers dump descriptor: sequence
+ */
+struct dbg_dump_regs_seq_desc {
+       __le32 data;
+#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_MASK  0x1
+#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_SHIFT 0
+#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_MASK       0x7FFFFF
+#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_SHIFT      1
+#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_MASK        0xFF
+#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_SHIFT       24
+};
+
+/*
+ * registers dump descriptor
+ */
+union dbg_dump_regs_desc {
+       struct dbg_dump_regs_raw_desc raw /* dumped registers raw descriptor */
+          ;
+       struct dbg_dump_regs_seq_desc seq /* dumped registers seq descriptor */
+          ;
+       struct dbg_dump_regs_chip_desc chip
+           /* dumped registers chip descriptor */;
+};
+
+/*
+ * idle check macro types
+ */
+enum idle_chk_macro_types {
+       IDLE_CHK_MACRO_TYPE_COMPARE /* parametric register comparison */ ,
+       IDLE_CHK_MACRO_TYPE_QM_RD_WR /* compare QM r/w pointers and banks */ ,
+       MAX_IDLE_CHK_MACRO_TYPES
+};
+
+/*
+ * Idle Check result header
+ */
+struct idle_chk_result_hdr {
+       __le16 rule_idx /* Idle check rule index in CSV file */;
+       __le16 loop_idx /* the loop index in which the failure occured */;
+       __le16 num_fw_values;
+       __le16 data;
+#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_MASK  0xF
+#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_SHIFT 0
+#define IDLE_CHK_RESULT_HDR_LOOP_VALID_MASK      0x1
+#define IDLE_CHK_RESULT_HDR_LOOP_VALID_SHIFT     4
+#define IDLE_CHK_RESULT_HDR_SEVERITY_MASK        0x7
+#define IDLE_CHK_RESULT_HDR_SEVERITY_SHIFT       5
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_MASK      0xF
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_SHIFT     8
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_MASK  0xF
+#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_SHIFT 12
+};
+
+/*
+ * Idle Check rule
+ */
+struct idle_chk_rule {
+       __le32 data;
+#define IDLE_CHK_RULE_ASIC_CHIP_MASK_MASK  0xF
+#define IDLE_CHK_RULE_ASIC_CHIP_MASK_SHIFT 0
+#define IDLE_CHK_RULE_SIM_CHIP_MASK_MASK   0xF
+#define IDLE_CHK_RULE_SIM_CHIP_MASK_SHIFT  4
+#define IDLE_CHK_RULE_BLOCK_ID_MASK        0xFF
+#define IDLE_CHK_RULE_BLOCK_ID_SHIFT       8
+#define IDLE_CHK_RULE_MACRO_TYPE_MASK      0xF
+#define IDLE_CHK_RULE_MACRO_TYPE_SHIFT     16
+#define IDLE_CHK_RULE_SEVERITY_MASK        0x7
+#define IDLE_CHK_RULE_SEVERITY_SHIFT       20
+#define IDLE_CHK_RULE_RESERVED_MASK        0x1
+#define IDLE_CHK_RULE_RESERVED_SHIFT       23
+#define IDLE_CHK_RULE_PRED_ID_MASK         0xFF
+#define IDLE_CHK_RULE_PRED_ID_SHIFT        24
+       __le16 loop;
+       __le16 increment
+           /* address increment of first argument register on each iteration */
+          ;
+       __le32 reg_addr[3];
+       __le32 pred_imm[3]
+           /* immediate values passed as arguments to the idle check rule */;
+};
+
+/*
+ * idle check severity types
+ */
+enum idle_chk_severity_types {
+       IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */ ,
+       IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC
+           ,
+       IDLE_CHK_SEVERITY_WARNING
+           /* idle check failure should cause a warning */ ,
+       MAX_IDLE_CHK_SEVERITY_TYPES
+};
+
+/*
+ * init array header: raw
+ */
+struct init_array_raw_hdr {
+       __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK    0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT   0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK  0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/*
+ * init array header: standard
+ */
+struct init_array_standard_hdr {
+       __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK  0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: zipped
+ */
+struct init_array_zipped_hdr {
+       __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK         0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT        0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK  0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: pattern
+ */
+struct init_array_pattern_hdr {
+       __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK          0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT         0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK  0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK   0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT  8
+};
+
+/*
+ * init array header union
+ */
+union init_array_hdr {
+       struct init_array_raw_hdr raw /* raw init array header */;
+       struct init_array_standard_hdr standard /* standard init array header */
+          ;
+       struct init_array_zipped_hdr zipped /* zipped init array header */;
+       struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+/*
+ * init array types
+ */
+enum init_array_types {
+       INIT_ARR_STANDARD /* standard init array */ ,
+       INIT_ARR_ZIPPED /* zipped init array */ ,
+       INIT_ARR_PATTERN /* a repeated pattern */ ,
+       MAX_INIT_ARRAY_TYPES
+};
+
+/*
+ * init operation: callback
+ */
+struct init_callback_op {
+       __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK        0xF
+#define INIT_CALLBACK_OP_OP_SHIFT       0
+#define INIT_CALLBACK_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+       __le16 callback_id /* Callback ID */;
+       __le16 block_id /* Blocks ID */;
+};
+
+/*
+ * init operation: delay
+ */
+struct init_delay_op {
+       __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK        0xF
+#define INIT_DELAY_OP_OP_SHIFT       0
+#define INIT_DELAY_OP_RESERVED_MASK  0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+       __le32 delay /* delay in us */;
+};
+
+/*
+ * init operation: if_mode
+ */
+struct init_if_mode_op {
+       __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK          0xF
+#define INIT_IF_MODE_OP_OP_SHIFT         0
+#define INIT_IF_MODE_OP_RESERVED1_MASK   0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT  4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK  0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+       __le16 reserved2;
+       __le16 modes_buf_offset
+           /* offset (in bytes) in modes expression buffer */;
+};
+
+/*
+ * init operation: if_phase
+ */
+struct init_if_phase_op {
+       __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK           0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT          0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK  0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK    0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT   5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK   0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT  16
+       __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK        0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT       0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK    0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT   8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK     0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT    16
+};
+
+/*
+ * init mode operators
+ */
+enum init_mode_ops {
+       INIT_MODE_OP_NOT /* init mode not operator */ ,
+       INIT_MODE_OP_OR /* init mode or operator */ ,
+       INIT_MODE_OP_AND /* init mode and operator */ ,
+       MAX_INIT_MODE_OPS
+};
+
+/*
+ * init operation: raw
+ */
+struct init_raw_op {
+       __le32 op_data;
+#define INIT_RAW_OP_OP_MASK      0xF
+#define INIT_RAW_OP_OP_SHIFT     0
+#define INIT_RAW_OP_PARAM1_MASK  0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+       __le32 param2 /* Init param 2 */;
+};
+
+/*
+ * init array params
+ */
+struct init_op_array_params {
+       __le16 size /* array size in dwords */;
+       __le16 offset /* array start offset in dwords */;
+};
+
+/*
+ * Write init operation arguments
+ */
+union init_write_args {
+       __le32 inline_val
+           /* value to write, used when init source is INIT_SRC_INLINE */;
+       __le32 zeros_count;
+       __le32 array_offset
+           /* array offset to write, used when init source is INIT_SRC_ARRAY */
+          ;
+       struct init_op_array_params runtime;
+};
+
+/*
+ * init operation: write
+ */
+struct init_write_op {
+       __le32 data;
+#define INIT_WRITE_OP_OP_MASK        0xF
+#define INIT_WRITE_OP_OP_SHIFT       0
+#define INIT_WRITE_OP_SOURCE_MASK    0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT   4
+#define INIT_WRITE_OP_RESERVED_MASK  0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK  0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK   0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT  9
+       union init_write_args args /* Write init operation arguments */;
+};
+
+/*
+ * init operation: read
+ */
+struct init_read_op {
+       __le32 op_data;
+#define INIT_READ_OP_OP_MASK         0xF
+#define INIT_READ_OP_OP_SHIFT        0
+#define INIT_READ_OP_POLL_TYPE_MASK  0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK   0x1
+#define INIT_READ_OP_RESERVED_SHIFT  8
+#define INIT_READ_OP_ADDRESS_MASK    0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT   9
+       __le32 expected_val
+           /* expected polling value, used only when polling is done */;
+};
+
+/*
+ * Init operations union
+ */
+union init_op {
+       struct init_raw_op raw /* raw init operation */;
+       struct init_write_op write /* write init operation */;
+       struct init_read_op read /* read init operation */;
+       struct init_if_mode_op if_mode /* if_mode init operation */;
+       struct init_if_phase_op if_phase /* if_phase init operation */;
+       struct init_callback_op callback /* callback init operation */;
+       struct init_delay_op delay /* delay init operation */;
+};
+
+/*
+ * Init command operation types
+ */
+enum init_op_types {
+       INIT_OP_READ /* GRC read init command */ ,
+       INIT_OP_WRITE /* GRC write init command */ ,
+       INIT_OP_IF_MODE
+           /* Skip init commands if the init modes expression doesnt match */ ,
+       INIT_OP_IF_PHASE /* Skip init commands if the init phase doesnt match */
+           ,
+       INIT_OP_DELAY /* delay init command */ ,
+       INIT_OP_CALLBACK /* callback init command */ ,
+       MAX_INIT_OP_TYPES
+};
+
+/*
+ * init polling types
+ */
+enum init_poll_types {
+       INIT_POLL_NONE /* No polling */ ,
+       INIT_POLL_EQ /* init value is included in the init command */ ,
+       INIT_POLL_OR /* init value is all zeros */ ,
+       INIT_POLL_AND /* init value is an array of values */ ,
+       MAX_INIT_POLL_TYPES
+};
+
+/*
+ * init source types
+ */
+enum init_source_types {
+       INIT_SRC_INLINE /* init value is included in the init command */ ,
+       INIT_SRC_ZEROS /* init value is all zeros */ ,
+       INIT_SRC_ARRAY /* init value is an array of values */ ,
+       INIT_SRC_RUNTIME /* init value is provided during runtime */ ,
+       MAX_INIT_SOURCE_TYPES
+};
+
+/*
+ * Internal RAM Offsets macro data
+ */
+struct iro {
+       __le32 base /* RAM field offset */;
+       __le16 m1 /* multiplier 1 */;
+       __le16 m2 /* multiplier 2 */;
+       __le16 m3 /* multiplier 3 */;
+       __le16 size /* RAM field size */;
+};
+
+/*
+ * register descriptor
+ */
+struct reg_desc {
+       __le32 data;
+#define REG_DESC_ADDRESS_MASK  0xFFFFFF
+#define REG_DESC_ADDRESS_SHIFT 0
+#define REG_DESC_SIZE_MASK     0xFF
+#define REG_DESC_SIZE_SHIFT    24
+};
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+       u8 enabled /* Indicates if the block is enabled for recording (0/1) */;
+       u8 hw_id /* HW ID associated with the block */;
+       u8 line_num /* Debug line number to select */;
+       u8 right_shift /* Number of units to  right the debug data (0-3) */;
+       u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
+       u8 force_valid /* 4-bit value: bit i set -> unit i is forced valid. */;
+       u8 force_frame
+           /* 4-bit value: bit i set -> unit i frame bit is forced. */;
+       u8 reserved;
+};
+
+/*
+ * Debug Bus Clients
+ */
+enum dbg_bus_clients {
+       DBG_BUS_CLIENT_RBCN,
+       DBG_BUS_CLIENT_RBCP,
+       DBG_BUS_CLIENT_RBCR,
+       DBG_BUS_CLIENT_RBCT,
+       DBG_BUS_CLIENT_RBCU,
+       DBG_BUS_CLIENT_RBCF,
+       DBG_BUS_CLIENT_RBCX,
+       DBG_BUS_CLIENT_RBCS,
+       DBG_BUS_CLIENT_RBCH,
+       DBG_BUS_CLIENT_RBCZ,
+       DBG_BUS_CLIENT_OTHER_ENGINE,
+       DBG_BUS_CLIENT_TIMESTAMP,
+       DBG_BUS_CLIENT_CPU,
+       DBG_BUS_CLIENT_RBCY,
+       DBG_BUS_CLIENT_RBCQ,
+       DBG_BUS_CLIENT_RBCM,
+       DBG_BUS_CLIENT_RBCB,
+       DBG_BUS_CLIENT_RBCW,
+       DBG_BUS_CLIENT_RBCV,
+       MAX_DBG_BUS_CLIENTS
+};
+
+/*
+ * Debug Bus constraint operation types
+ */
+enum dbg_bus_constraint_ops {
+       DBG_BUS_CONSTRAINT_OP_EQ /* equal */ ,
+       DBG_BUS_CONSTRAINT_OP_NE /* not equal */ ,
+       DBG_BUS_CONSTRAINT_OP_LT /* less than */ ,
+       DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */ ,
+       DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */ ,
+       DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */ ,
+       DBG_BUS_CONSTRAINT_OP_GT /* greater than */ ,
+       DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */ ,
+       DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */ ,
+       DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */ ,
+       MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/*
+ * Debug Bus memory address
+ */
+struct dbg_bus_mem_addr {
+       __le32 lo;
+       __le32 hi;
+};
+
+/*
+ * Debug Bus PCI buffer data
+ */
+struct dbg_bus_pci_buf_data {
+       struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
+       struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
+       __le32 size /* PCI buffer size in bytes */;
+};
+
+/*
+ * Debug Bus Storm EID range filter params
+ */
+struct dbg_bus_storm_eid_range_params {
+       u8 min /* Minimal event ID to filter on */;
+       u8 max /* Maximal event ID to filter on */;
+};
+
+/*
+ * Debug Bus Storm EID mask filter params
+ */
+struct dbg_bus_storm_eid_mask_params {
+       u8 val /* Event ID value */;
+       u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
+};
+
+/*
+ * Debug Bus Storm EID filter params
+ */
+union dbg_bus_storm_eid_params {
+       struct dbg_bus_storm_eid_range_params range
+           /* EID range filter params */;
+       struct dbg_bus_storm_eid_mask_params mask /* EID mask filter params */;
+};
+
+/*
+ * Debug Bus Storm data
+ */
+struct dbg_bus_storm_data {
+       u8 fast_enabled;
+       u8 fast_mode
+           /* Fast debug Storm mode, valid only if fast_enabled is set */;
+       u8 slow_enabled;
+       u8 slow_mode
+           /* Slow debug Storm mode, valid only if slow_enabled is set */;
+       u8 hw_id /* HW ID associated with the Storm */;
+       u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
+       u8 eid_range_not_mask;
+       u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
+       union dbg_bus_storm_eid_params eid_filter_params;
+       __le16 reserved;
+       __le32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
+};
+
+/*
+ * Debug Bus data
+ */
+struct dbg_bus_data {
+       __le32 app_version /* The tools version number of the application */;
+       u8 state /* The current debug bus state */;
+       u8 hw_dwords /* HW dwords per cycle */;
+       u8 next_hw_id /* Next HW ID to be associated with an input */;
+       u8 num_enabled_blocks /* Number of blocks enabled for recording */;
+       u8 num_enabled_storms /* Number of Storms enabled for recording */;
+       u8 target /* Output target */;
+       u8 next_trigger_state /* ID of next trigger state to be added */;
+       u8 next_constraint_id
+           /* ID of next filter/trigger constraint to be added */;
+       u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
+       u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
+       u8 timestamp_input_en
+           /* Indicates if timestamp recording is enabled (0/1) */;
+       u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
+       u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */
+          ;
+       u8 adding_filter;
+       u8 filter_pre_trigger;
+       u8 filter_post_trigger;
+       u8 unify_inputs;
+       u8 rcv_from_other_engine;
+       struct dbg_bus_pci_buf_data pci_buf;
+       __le16 reserved;
+       struct dbg_bus_block_data blocks[80] /* Debug Bus data for each block */
+          ;
+       struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */
+          ;
+};
+
+/*
+ * Debug bus filter types
+ */
+enum dbg_bus_filter_types {
+       DBG_BUS_FILTER_TYPE_OFF /* filter always off */ ,
+       DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */ ,
+       DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */ ,
+       DBG_BUS_FILTER_TYPE_ON /* filter always on */ ,
+       MAX_DBG_BUS_FILTER_TYPES
+};
+
+/*
+ * Debug bus frame modes
+ */
+enum dbg_bus_frame_modes {
+       DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */ ,
+       DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */ ,
+       DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */ ,
+       MAX_DBG_BUS_FRAME_MODES
+};
+
+/*
+ * Debug bus input types
+ */
+enum dbg_bus_input_types {
+       DBG_BUS_INPUT_TYPE_STORM,
+       DBG_BUS_INPUT_TYPE_BLOCK,
+       MAX_DBG_BUS_INPUT_TYPES
+};
+
+/*
+ * Debug bus other engine mode
+ */
+enum dbg_bus_other_engine_modes {
+       DBG_BUS_OTHER_ENGINE_MODE_NONE,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+       DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+       MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+/*
+ * Debug bus post-trigger recording types
+ */
+enum dbg_bus_post_trigger_types {
+       DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */ ,
+       DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */ ,
+       MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+/*
+ * Debug bus pre-trigger recording types
+ */
+enum dbg_bus_pre_trigger_types {
+       DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */ ,
+       DBG_BUS_PRE_TRIGGER_NUM_CHUNKS
+           /* start recording some chunks before trigger */ ,
+       DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */ ,
+       MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+/*
+ * Debug bus SEMI frame modes
+ */
+enum dbg_bus_semi_frame_modes {
+       DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
+           0 /* 0 slow dwords, 4 fast dwords */ ,
+       DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
+           3 /* 4 slow dwords, 0 fast dwords */ ,
+       MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+/*
+ * Debug bus states
+ */
+enum dbg_bus_states {
+       DBG_BUS_STATE_BEFORE_RECORD /* before debug bus the recording starts */
+           ,
+       DBG_BUS_STATE_DURING_RECORD /* during debug bus recording */ ,
+       DBG_BUS_STATE_AFTER_RECORD /* after debug bus recording */ ,
+       MAX_DBG_BUS_STATES
+};
+
+/*
+ * Debug Bus Storm modes
+ */
+enum dbg_bus_storm_modes {
+       DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */ ,
+       DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */ ,
+       DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */ ,
+       DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */ ,
+       DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */ ,
+       DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */ ,
+       DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */ ,
+       DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */ ,
+       DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */ ,
+       MAX_DBG_BUS_STORM_MODES
+};
+
+/*
+ * Debug bus target IDs
+ */
+enum dbg_bus_targets {
+       DBG_BUS_TARGET_ID_INT_BUF
+           /* records debug bus to DBG block internal buffer */ ,
+       DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */ ,
+       DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */ ,
+       MAX_DBG_BUS_TARGETS
+};
+
+/*
+ * GRC Dump data
+ */
+struct dbg_grc_data {
+       u8 is_updated /* Indicates if the GRC Dump data is updated (0/1) */;
+       u8 chip_id /* Chip ID */;
+       u8 chip_mask /* Chip mask */;
+       u8 reserved;
+       __le32 max_dump_dwords /* Max GRC Dump size in dwords */;
+       __le32 param_val[40];
+       u8 param_set_by_user[40];
+};
+
+/*
+ * Debug GRC params
+ */
+enum dbg_grc_params {
+       DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */ ,
+       DBG_GRC_PARAM_RESERVED /* reserved */ ,
+       DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */ ,
+       DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */ ,
+       DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */ ,
+       DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */ ,
+       DBG_GRC_PARAM_EXCLUDE_ALL
+           /* preset: exclude all memories from dump (1 only) */ ,
+       DBG_GRC_PARAM_CRASH
+           /* preset: include memories for crash dump (1 only) */ ,
+       DBG_GRC_PARAM_PARITY_SAFE
+           /* perform dump only if MFW is responding (0/1) */ ,
+       DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */ ,
+       MAX_DBG_GRC_PARAMS
+};
+
+/*
+ * Debug reset registers
+ */
+enum dbg_reset_regs {
+       DBG_RESET_REG_MISCS_PL_UA,
+       DBG_RESET_REG_MISCS_PL_HV,
+       DBG_RESET_REG_MISC_PL_UA,
+       DBG_RESET_REG_MISC_PL_HV,
+       DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+       DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+       DBG_RESET_REG_MISC_PL_PDA_VAUX,
+       MAX_DBG_RESET_REGS
+};
+
+/*
+ * Debug status codes
+ */
+enum dbg_status {
+       DBG_STATUS_OK,
+       DBG_STATUS_APP_VERSION_NOT_SET,
+       DBG_STATUS_UNSUPPORTED_APP_VERSION,
+       DBG_STATUS_DBG_BLOCK_NOT_RESET,
+       DBG_STATUS_INVALID_ARGS,
+       DBG_STATUS_OUTPUT_ALREADY_SET,
+       DBG_STATUS_INVALID_PCI_BUF_SIZE,
+       DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+       DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+       DBG_STATUS_TOO_MANY_INPUTS,
+       DBG_STATUS_INPUT_OVERLAP,
+       DBG_STATUS_HW_ONLY_RECORDING,
+       DBG_STATUS_STORM_ALREADY_ENABLED,
+       DBG_STATUS_STORM_NOT_ENABLED,
+       DBG_STATUS_BLOCK_ALREADY_ENABLED,
+       DBG_STATUS_BLOCK_NOT_ENABLED,
+       DBG_STATUS_NO_INPUT_ENABLED,
+       DBG_STATUS_NO_FILTER_TRIGGER_64B,
+       DBG_STATUS_FILTER_ALREADY_ENABLED,
+       DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+       DBG_STATUS_TRIGGER_NOT_ENABLED,
+       DBG_STATUS_CANT_ADD_CONSTRAINT,
+       DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+       DBG_STATUS_TOO_MANY_CONSTRAINTS,
+       DBG_STATUS_RECORDING_NOT_STARTED,
+       DBG_STATUS_DATA_DIDNT_TRIGGER,
+       DBG_STATUS_NO_DATA_RECORDED,
+       DBG_STATUS_DUMP_BUF_TOO_SMALL,
+       DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+       DBG_STATUS_UNKNOWN_CHIP,
+       DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+       DBG_STATUS_BLOCK_IN_RESET,
+       DBG_STATUS_INVALID_TRACE_SIGNATURE,
+       DBG_STATUS_INVALID_NVRAM_BUNDLE,
+       DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+       DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+       DBG_STATUS_NVRAM_READ_FAILED,
+       DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+       DBG_STATUS_MCP_TRACE_BAD_DATA,
+       DBG_STATUS_MCP_TRACE_NO_META,
+       DBG_STATUS_MCP_COULD_NOT_HALT,
+       DBG_STATUS_MCP_COULD_NOT_RESUME,
+       DBG_STATUS_DMAE_FAILED,
+       DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+       DBG_STATUS_IGU_FIFO_BAD_DATA,
+       DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+       DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+       DBG_STATUS_REG_FIFO_BAD_DATA,
+       DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+       MAX_DBG_STATUS
+};
+
+/*
+ * Debug Storms IDs
+ */
+enum dbg_storms {
+       DBG_TSTORM_ID,
+       DBG_MSTORM_ID,
+       DBG_USTORM_ID,
+       DBG_XSTORM_ID,
+       DBG_YSTORM_ID,
+       DBG_PSTORM_ID,
+       MAX_DBG_STORMS
+};
+
+/*
+ * Idle Check data
+ */
+struct idle_chk_data {
+       __le32 buf_size /* Idle check buffer size in dwords */;
+       u8 buf_size_set
+           /* Indicates if the idle check buffer size was set (0/1) */;
+       u8 reserved1;
+       __le16 reserved2;
+};
+
+/*
+ * Idle Check data
+ */
+struct mcp_trace_data {
+       __le32 buf_size /* MCP Trace buffer size in dwords */;
+       u8 buf_size_set
+           /* Indicates if the MCP Trace buffer size was set (0/1) */;
+       u8 reserved1;
+       __le16 reserved2;
+};
+
+/*
+ * Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+       struct dbg_grc_data grc /* GRC Dump data */;
+       struct dbg_bus_data bus /* Debug Bus data */;
+       struct idle_chk_data idle_chk /* Idle Check data */;
+       struct mcp_trace_data mcp_trace /* MCP Trace data */;
+       u8 block_in_reset[80] /* Indicates if a block is in reset state (0/1) */
+          ;
+       u8 chip_id /* Chip ID (from enum chip_ids) */;
+       u8 chip_mask
+           /* Chip mask = bit index chip_id is set, the rest are cleared */;
+       u8 initialized /* Indicates if the data was initialized */;
+       u8 reset_state_updated
+           /* Indicates if blocks reset state is updated (0/1) */;
+};
+
+/*
+ * BRB RAM init requirements
+ */
+struct init_brb_ram_req {
+       __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+       __le32 headroom_per_tc /* headroom size per TC, in bytes */;
+       __le32 min_pkt_size /* min packet size, in bytes */;
+       __le32 max_ports_per_engine /* min packet size, in bytes */;
+       u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
+};
+
+/*
+ * ETS per-TC init requirements
+ */
+struct init_ets_tc_req {
+       u8 use_sp;
+       u8 use_wfq;
+       __le16 weight /* An arbitration weight. Valid only if use_wfq is set. */
+          ;
+};
+
+/*
+ * ETS init requirements
+ */
+struct init_ets_req {
+       __le32 mtu /* Max packet size (in bytes) */;
+       struct init_ets_tc_req tc_req[NUM_OF_TCS]
+           /* ETS initialization requirements per TC. */;
+};
+
+/*
+ * NIG LB RL init requirements
+ */
+struct init_nig_lb_rl_req {
+       __le16 lb_mac_rate;
+       __le16 lb_rate;
+       __le32 mtu /* Max packet size (in bytes) */;
+       __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+/*
+ * NIG TC mapping for each priority
+ */
+struct init_nig_pri_tc_map_entry {
+       u8 tc_id /* the mapped TC ID */;
+       u8 valid /* indicates if the mapping entry is valid */;
+};
+
+/*
+ * NIG priority to TC map init requirements
+ */
+struct init_nig_pri_tc_map_req {
+       struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+/*
+ * QM per-port init parameters
+ */
+struct init_qm_port_params {
+       u8 active /* Indicates if this port is active */;
+       u8 num_active_phys_tcs /* number of physical TCs used by this port */;
+       __le16 num_pbf_cmd_lines
+           /* number of PBF command lines that can be used by this port */;
+       __le16 num_btb_blocks
+           /* number of BTB blocks that can be used by this port */;
+       __le16 reserved;
+};
+
+/*
+ * QM per-PQ init parameters
+ */
+struct init_qm_pq_params {
+       u8 vport_id /* VPORT ID */;
+       u8 tc_id /* TC ID */;
+       u8 wrr_group /* WRR group */;
+       u8 reserved;
+};
+
+/*
+ * QM per-vport init parameters
+ */
+struct init_qm_vport_params {
+       __le32 vport_rl;
+       __le16 vport_wfq;
+       __le16 first_tx_pq_id[NUM_OF_TCS]
+           /* the first Tx PQ ID associated with this VPORT for each TC. */;
+};
+
+#endif /* __ECORE_HSI_TOOLS__ */
diff --git a/drivers/net/qede/ecore/ecore_hw.c 
b/drivers/net/qede/ecore/ecore_hw.c
new file mode 100644
index 0000000..308a38a
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_hw.c
@@ -0,0 +1,1000 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+#include "ecore_utils.h"
+#include "ecore_iov_api.h"
+
+#ifndef ASIC_ONLY
+#define ECORE_EMUL_FACTOR 2000
+#define ECORE_FPGA_FACTOR 200
+#endif
+
+#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
+
+/* Invalid values */
+#define ECORE_BAR_INVALID_OFFSET               -1
+
+struct ecore_ptt {
+       osal_list_entry_t list_entry;
+       unsigned int idx;
+       struct pxp_ptt_entry pxp;
+};
+
+struct ecore_ptt_pool {
+       osal_list_t free_list;
+       osal_spinlock_t lock;
+       struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt_pool *p_pool;
+       int i;
+
+       p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                           sizeof(struct ecore_ptt_pool));
+       if (!p_pool)
+               return ECORE_NOMEM;
+
+       OSAL_LIST_INIT(&p_pool->free_list);
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_pool->ptts[i].idx = i;
+               p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
+               p_pool->ptts[i].pxp.pretend.control = 0;
+
+               /* There are special PTT entries that are taken only by design.
+                * The rest are added ot the list for general usage.
+                */
+               if (i >= RESERVED_PTT_MAX)
+                       OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
+                                           &p_pool->free_list);
+       }
+
+       p_hwfn->p_ptt_pool = p_pool;
+       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+       OSAL_SPIN_LOCK_INIT(&p_pool->lock);
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt *p_ptt;
+       int i;
+
+       for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+               p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+               p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
+       }
+}
+
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+       if (p_hwfn->p_ptt_pool)
+               OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+       p_hwfn->p_ptt_pool = OSAL_NULL;
+}
+
+struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_ptt *p_ptt;
+       unsigned int i;
+
+       /* Take the free PTT from the list */
+       for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
+               OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+               if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
+                       break;
+               OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+               OSAL_MSLEEP(1);
+       }
+
+       /* We should not time-out, but it can happen... --> Lock isn't held */
+       if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
+               return OSAL_NULL;
+       }
+
+       p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
+                                     struct ecore_ptt, list_entry);
+       OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
+                              &p_hwfn->p_ptt_pool->free_list);
+       OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
+
+       return p_ptt;
+}
+
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       /* This PTT should not be set to pretend if it is being released */
+
+       OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
+       OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+       OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       /* The HW is using DWORDS and we need to translate it to Bytes */
+       return p_ptt->pxp.offset << 2;
+}
+
+static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
+{
+       return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+           p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
+{
+       return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+           p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u32 new_hw_addr)
+{
+       u32 prev_hw_addr;
+
+       prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+       if (new_hw_addr == prev_hw_addr)
+               return;
+
+       /* Update PTT entery in admin window */
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "Updating PTT entry %d to offset 0x%x\n",
+                  p_ptt->idx, new_hw_addr);
+
+       /* The HW is using DWORDS and the address is in Bytes */
+       p_ptt->pxp.offset = new_hw_addr >> 2;
+
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
+}
+
+static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+       u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+       u32 offset;
+
+       offset = hw_addr - win_hw_addr;
+
+       /* Verify the address is within the window */
+       if (hw_addr < win_hw_addr ||
+           offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+               ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+               offset = 0;
+       }
+
+       return ecore_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+                                        enum reserved_ptts ptt_idx)
+{
+       if (ptt_idx >= RESERVED_PTT_MAX) {
+               DP_NOTICE(p_hwfn, true,
+                         "Requested PTT %d is out of range\n", ptt_idx);
+               return OSAL_NULL;
+       }
+
+       return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+             struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
+{
+       u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+       REG_WR(p_hwfn, bar_addr, val);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               OSAL_UDELAY(100);
+#endif
+}
+
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
+{
+       u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+       u32 val = REG_RD(p_hwfn, bar_addr);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+                  bar_addr, hw_addr, val);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               OSAL_UDELAY(100);
+#endif
+
+       return val;
+}
+
+static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           void *addr,
+                           u32 hw_addr, osal_size_t n, bool to_device)
+{
+       u32 dw_count, *host_addr, hw_offset;
+       osal_size_t quota, done = 0;
+       u32 OSAL_IOMEM *reg_addr;
+
+       while (done < n) {
+               quota = OSAL_MIN_T(osal_size_t, n - done,
+                                  PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+               if (IS_PF(p_hwfn->p_dev)) {
+                       ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+                       hw_offset = ecore_ptt_get_bar_addr(p_ptt);
+               } else {
+                       hw_offset = hw_addr + done;
+               }
+
+               dw_count = quota / 4;
+               host_addr = (u32 *) ((u8 *) addr + done);
+               reg_addr = (u32 OSAL_IOMEM *) OSAL_REG_ADDR(p_hwfn, hw_offset);
+
+               if (to_device)
+                       while (dw_count--)
+                               DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
+               else
+                       while (dw_count--)
+                               *host_addr++ = DIRECT_REG_RD(p_hwfn,
+                                                            reg_addr++);
+
+               done += quota;
+       }
+}
+
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt,
+                      void *dest, u32 hw_addr, osal_size_t n)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+                  hw_addr, dest, hw_addr, (unsigned long)n);
+
+       ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    u32 hw_addr, void *src, osal_size_t n)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+                  hw_addr, hw_addr, src, (unsigned long)n);
+
+       ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u16 fid)
+{
+       void *p_pretend;
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+       /* Every pretend undos prev pretends, including previous port pretend */
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+       p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
+       if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+               fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+       p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+       p_pretend = &p_ptt->pxp.pretend;
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *) p_pretend);
+}
+
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 port_id)
+{
+       void *p_pretend;
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+       p_ptt->pxp.pretend.control = control;
+
+       p_pretend = &p_ptt->pxp.pretend;
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *) p_pretend);
+}
+
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       void *p_pretend;
+       u16 control = 0;
+
+       SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+       SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+       p_ptt->pxp.pretend.control = control;
+
+       p_pretend = &p_ptt->pxp.pretend;
+       REG_WR(p_hwfn,
+              ecore_ptt_config_addr(p_ptt) +
+              OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *) p_pretend);
+}
+
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+       u32 concrete_fid = 0;
+
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+       SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+       return concrete_fid;
+}
+
+#if 0
+/* Ecore HW lock
+ * =============
+ * Although the implemention is ready, today we don't have any flow that
+ * utliizes said locks - and we want to keep it this way.
+ * If this changes, this needs to be revisted.
+ */
+#define HW_LOCK_MAX_RETRIES 1000
+enum _ecore_status_t ecore_hw_lock(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  u8 resource, bool block)
+{
+       u32 cnt, lock_status, hw_lock_cntr_reg;
+       enum _ecore_status_t ecore_status;
+
+       /* Locate the proper lock register for this function.
+        * Note This code assumes all the H/W lock registers are sequential
+        * in memory.
+        */
+       hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
+           p_hwfn->rel_pf_id * MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
+
+       /* Validate that the resource is not already taken */
+       lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
+
+       if (lock_status & resource) {
+               DP_NOTICE(p_hwfn, true,
+                         "Resc already locked: lock_status=0x%x res=0x%x\n",
+                         lock_status, resource);
+
+               return ECORE_BUSY;
+       }
+
+       /* Register for the lock */
+       ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg + sizeof(u32), resource);
+
+       /* Try for 5 seconds every 5ms */
+       for (cnt = 0; cnt < HW_LOCK_MAX_RETRIES; cnt++) {
+               lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
+
+               if (lock_status & resource)
+                       return ECORE_SUCCESS;
+
+               if (!block) {
+                       ecore_status = ECORE_BUSY;
+                       break;
+               }
+
+               OSAL_MSLEEP(5);
+       }
+
+       if (cnt == HW_LOCK_MAX_RETRIES) {
+               DP_NOTICE(p_hwfn, true, "Lock timeout resource=0x%x\n",
+                         resource);
+               ecore_status = ECORE_TIMEOUT;
+       }
+
+       /* Clear the pending request */
+       ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
+
+       return ecore_status;
+}
+
+enum _ecore_status_t ecore_hw_unlock(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt, u8 resource)
+{
+       u32 lock_status, hw_lock_cntr_reg;
+
+       /* Locate the proper lock register for this function.
+        * Note This code assumes all the H/W lock registers are sequential
+        * in memory.
+        */
+       hw_lock_cntr_reg = MISCS_REG_DRIVER_CONTROL_0 +
+           p_hwfn->rel_pf_id * MISCS_REG_DRIVER_CONTROL_0_SIZE * sizeof(u32);
+
+       /*  Validate that the resource is currently taken */
+       lock_status = ecore_rd(p_hwfn, p_ptt, hw_lock_cntr_reg);
+
+       if (!(lock_status & resource)) {
+               DP_NOTICE(p_hwfn, true,
+                         "resource 0x%x was not taken (lock status 0x%x)\n",
+                         resource, lock_status);
+
+               return ECORE_NODEV;
+       }
+
+       /* clear lock for resource */
+       ecore_wr(p_hwfn, p_ptt, hw_lock_cntr_reg, resource);
+       return ECORE_SUCCESS;
+}
+#endif /* HW locks logic */
+
+/* Ecore DMAE
+ * =============
+ */
+static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
+                             const u8 is_src_type_grc,
+                             const u8 is_dst_type_grc,
+                             struct ecore_dmae_params *p_params)
+{
+       u16 opcode_b = 0;
+       u32 opcode = 0;
+
+       /* Whether the source is the PCIe or the GRC.
+        * 0- The source is the PCIe
+        * 1- The source is the GRC.
+        */
+       opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
+                  : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
+       opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
+           DMAE_CMD_SRC_PF_ID_SHIFT;
+
+       /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+       opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
+                  : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
+       opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
+           DMAE_CMD_DST_PF_ID_SHIFT;
+
+       /* DMAE_E4_TODO need to check which value to specifiy here. */
+       /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
+
+       /* Whether to write a completion word to the completion destination:
+        * 0-Do not write a completion word
+        * 1-Write the completion word
+        */
+       opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
+       opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+       if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+               opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
+
+       /* swapping mode 3 - big endian there should be a define ifdefed in
+        * the HSI somewhere. Since it is currently
+        */
+       opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
+
+       opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+
+       /* reset source address in next go */
+       opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
+
+       /* reset dest address in next go */
+       opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
+
+       /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+       if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+               opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
+               opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
+       } else {
+               opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
+                            DMAE_CMD_SRC_VF_ID_SHIFT);
+       }
+       if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+               opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+               opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+       } else {
+               opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+       }
+
+       p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
+       p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
+}
+
+static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
+{
+       OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
+
+       return DMAE_REG_GO_C0 + idx * 4;
+}
+
+static enum _ecore_status_t
+ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+       u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+
+       /* verify address is not OSAL_NULL */
+       if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+            ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
+               DP_NOTICE(p_hwfn, true,
+                         "source or destination adress 0 idx_cmd=%d\n"
+                         "opcode = [0x%08x,0x%04x] len=0x%x"
+                         " src=0x%x:%x dst=0x%x:%x\n",
+                         idx_cmd, (u32) p_command->opcode,
+                         (u16) p_command->opcode_b,
+                         (int)p_command->length,
+                         (int)p_command->src_addr_hi,
+                         (int)p_command->src_addr_lo,
+                         (int)p_command->dst_addr_hi,
+                         (int)p_command->dst_addr_lo);
+
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                  "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
+                  "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+                  idx_cmd, (u32) p_command->opcode,
+                  (u16) p_command->opcode_b,
+                  (int)p_command->length,
+                  (int)p_command->src_addr_hi,
+                  (int)p_command->src_addr_lo,
+                  (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
+
+       /* Copy the command to DMAE - need to do it before every call
+        * for source/dest address no reset.
+        * The number of commands have been increased to 16 (previous was 14)
+        * The first 9 DWs are the command registers, the 10 DW is the
+        * GO register, and
+        * the rest are result registers (which are read only by the client).
+        */
+       for (i = 0; i < DMAE_CMD_SIZE; i++) {
+               u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+                   *(((u32 *) p_command) + i) : 0;
+
+               ecore_wr(p_hwfn, p_ptt,
+                        DMAE_REG_CMD_MEM +
+                        (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+                        (i * sizeof(u32)), data);
+       }
+
+       ecore_wr(p_hwfn, p_ptt,
+                ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+
+       return ecore_status;
+}
+
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
+{
+       dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+       struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+       u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+       u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+       *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
+       if (*p_comp == OSAL_NULL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `p_completion_word'\n");
+               ecore_dmae_info_free(p_hwfn);
+               return ECORE_NOMEM;
+       }
+
+       p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+       *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+                                        sizeof(struct dmae_cmd));
+       if (*p_cmd == OSAL_NULL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct dmae_cmd'\n");
+               ecore_dmae_info_free(p_hwfn);
+               return ECORE_NOMEM;
+       }
+
+       p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
+                                         sizeof(u32) * DMAE_MAX_RW_SIZE);
+       if (*p_buff == OSAL_NULL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `intermediate_buffer'\n");
+               ecore_dmae_info_free(p_hwfn);
+               return ECORE_NOMEM;
+       }
+
+       /* DMAE_E4_TODO : Need to change this to reflect proper channel */
+       p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
+{
+       dma_addr_t p_phys;
+
+       /* Just make sure no one is in the middle */
+       OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+
+       if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
+               p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->dmae_info.p_completion_word,
+                                      p_phys, sizeof(u32));
+               p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
+               p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->dmae_info.p_dmae_cmd,
+                                      p_phys, sizeof(struct dmae_cmd));
+               p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
+       }
+
+       if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
+               p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_hwfn->dmae_info.p_intermediate_buffer,
+                                      p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
+               p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
+       }
+
+       OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+}
+
+static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn 
*p_hwfn)
+{
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+       u32 wait_cnt_limit = 10000, wait_cnt = 0;
+
+#ifndef ASIC_ONLY
+       u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
+                     ECORE_EMUL_FACTOR :
+                     (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
+                      ECORE_FPGA_FACTOR : 1));
+
+       wait_cnt_limit *= factor;
+#endif
+
+       /* DMAE_E4_TODO : TODO check if we have to call any other function
+        * other than BARRIER to sync the completion_word since we are not
+        * using the volatile keyword for this
+        */
+       OSAL_BARRIER(p_hwfn->p_dev);
+       while (*(p_hwfn->dmae_info.p_completion_word) != DMAE_COMPLETION_VAL) {
+               /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
+                * functions are getting depriciated. Need to review for future.
+                */
+               OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
+               if (++wait_cnt > wait_cnt_limit) {
+                       DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
+                                 "Timed-out waiting for operation to"
+                                 " complete. Completion word is 0x%08x"
+                                 " expected 0x%08x.\n",
+                                 *(p_hwfn->dmae_info.p_completion_word),
+                                 DMAE_COMPLETION_VAL);
+                       ecore_status = ECORE_TIMEOUT;
+                       break;
+               }
+               /* to sync the completion_word since we are not
+                * using the volatile keyword for p_completion_word
+                */
+               OSAL_BARRIER(p_hwfn->p_dev);
+       }
+
+       if (ecore_status == ECORE_SUCCESS)
+               *(p_hwfn->dmae_info.p_completion_word) = 0;
+
+       return ecore_status;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                u64 src_addr,
+                                u64 dst_addr,
+                                u8 src_type, u8 dst_type, u32 length)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+
+       switch (src_type) {
+       case ECORE_DMAE_ADDRESS_GRC:
+       case ECORE_DMAE_ADDRESS_HOST_PHYS:
+               cmd->src_addr_hi = DMA_HI(src_addr);
+               cmd->src_addr_lo = DMA_LO(src_addr);
+               break;
+               /* for virt source addresses we use the intermediate buffer. */
+       case ECORE_DMAE_ADDRESS_HOST_VIRT:
+               cmd->src_addr_hi = DMA_HI(phys);
+               cmd->src_addr_lo = DMA_LO(phys);
+               OSAL_MEMCPY(&(p_hwfn->dmae_info.p_intermediate_buffer[0]),
+                           (void *)(osal_uintptr_t) src_addr,
+                           length * sizeof(u32));
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       switch (dst_type) {
+       case ECORE_DMAE_ADDRESS_GRC:
+       case ECORE_DMAE_ADDRESS_HOST_PHYS:
+               cmd->dst_addr_hi = DMA_HI(dst_addr);
+               cmd->dst_addr_lo = DMA_LO(dst_addr);
+               break;
+               /* for virt destination address we use the intermediate buff. */
+       case ECORE_DMAE_ADDRESS_HOST_VIRT:
+               cmd->dst_addr_hi = DMA_HI(phys);
+               cmd->dst_addr_lo = DMA_LO(phys);
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       cmd->length = (u16) length;
+
+       if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+           src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+               OSAL_DMA_SYNC(p_hwfn->p_dev,
+                             (void *)HILO_U64(cmd->src_addr_hi,
+                                              cmd->src_addr_lo),
+                             length * sizeof(u32), false);
+
+       ecore_dmae_post_command(p_hwfn, p_ptt);
+
+       ecore_status = ecore_dmae_operation_wait(p_hwfn);
+
+       /* TODO - is it true ? */
+       if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
+           src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
+               OSAL_DMA_SYNC(p_hwfn->p_dev,
+                             (void *)HILO_U64(cmd->src_addr_hi,
+                                              cmd->src_addr_lo),
+                             length * sizeof(u32), true);
+
+       if (ecore_status != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, ECORE_MSG_HW,
+                         "ecore_dmae_host2grc: Wait Failed. source_addr"
+                         " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
+                         src_addr, dst_addr, length);
+               return ecore_status;
+       }
+
+       if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
+               OSAL_MEMCPY((void *)(osal_uintptr_t) (dst_addr),
+                           &p_hwfn->dmae_info.p_intermediate_buffer[0],
+                           length * sizeof(u32));
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          u64 src_addr,
+                          u64 dst_addr,
+                          u8 src_type,
+                          u8 dst_type,
+                          u32 size_in_dwords,
+                          struct ecore_dmae_params *p_params)
+{
+       dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+       u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+       struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+       enum _ecore_status_t ecore_status = ECORE_SUCCESS;
+       u64 src_addr_split = 0, dst_addr_split = 0;
+       u16 length_limit = DMAE_MAX_RW_SIZE;
+       u32 offset = 0;
+
+       ecore_dmae_opcode(p_hwfn,
+                         (src_type == ECORE_DMAE_ADDRESS_GRC),
+                         (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
+
+       cmd->comp_addr_lo = DMA_LO(phys);
+       cmd->comp_addr_hi = DMA_HI(phys);
+       cmd->comp_val = DMAE_COMPLETION_VAL;
+
+       /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+       cnt_split = size_in_dwords / length_limit;
+       length_mod = size_in_dwords % length_limit;
+
+       src_addr_split = src_addr;
+       dst_addr_split = dst_addr;
+
+       for (i = 0; i <= cnt_split; i++) {
+               offset = length_limit * i;
+
+               if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+                       if (src_type == ECORE_DMAE_ADDRESS_GRC)
+                               src_addr_split = src_addr + offset;
+                       else
+                               src_addr_split = src_addr + (offset * 4);
+               }
+
+               if (dst_type == ECORE_DMAE_ADDRESS_GRC)
+                       dst_addr_split = dst_addr + offset;
+               else
+                       dst_addr_split = dst_addr + (offset * 4);
+
+               length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+               /* might be zero on last iteration */
+               if (!length_cur)
+                       continue;
+
+               ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
+                                                               p_ptt,
+                                                               src_addr_split,
+                                                               dst_addr_split,
+                                                               src_type,
+                                                               dst_type,
+                                                               length_cur);
+               if (ecore_status != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "ecore_dmae_execute_sub_operation Failed"
+                                 " with error 0x%x. source_addr 0x%lx,"
+                                 " dest addr 0x%lx, size_in_dwords 0x%x\n",
+                                 ecore_status, src_addr, dst_addr, length_cur);
+
+                       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
+                       break;
+               }
+       }
+
+       return ecore_status;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u64 source_addr,
+                   u32 grc_addr, u32 size_in_dwords, u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct ecore_dmae_params params;
+       enum _ecore_status_t rc;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+       params.flags = flags;
+
+       OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
+
+       rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                       grc_addr_in_dw,
+                                       ECORE_DMAE_ADDRESS_HOST_VIRT,
+                                       ECORE_DMAE_ADDRESS_GRC,
+                                       size_in_dwords, &params);
+
+       OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+                   struct ecore_ptt *p_ptt,
+                   u32 grc_addr,
+                   dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+       u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+       struct ecore_dmae_params params;
+       enum _ecore_status_t rc;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+       params.flags = flags;
+
+       OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
+
+       rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+                                       dest_addr, ECORE_DMAE_ADDRESS_GRC,
+                                       ECORE_DMAE_ADDRESS_HOST_VIRT,
+                                       size_in_dwords, &params);
+
+       OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    dma_addr_t source_addr,
+                    dma_addr_t dest_addr,
+                    u32 size_in_dwords, struct ecore_dmae_params *p_params)
+{
+       enum _ecore_status_t rc;
+
+       OSAL_MUTEX_ACQUIRE(&(p_hwfn->dmae_info.mutex));
+
+       rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+                                       dest_addr,
+                                       ECORE_DMAE_ADDRESS_HOST_PHYS,
+                                       ECORE_DMAE_ADDRESS_HOST_PHYS,
+                                       size_in_dwords, p_params);
+
+       OSAL_MUTEX_RELEASE(&(p_hwfn->dmae_info.mutex));
+
+       return rc;
+}
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+                   enum protocol_type proto,
+                   union ecore_qm_pq_params *p_params)
+{
+       u16 pq_id = 0;
+
+       if ((proto == PROTOCOLID_CORE ||
+            proto == PROTOCOLID_ETH ||
+            proto == PROTOCOLID_ISCSI) && !p_params) {
+               DP_NOTICE(p_hwfn, true,
+                         "Protocol %d received NULL PQ params\n", proto);
+               return 0;
+       }
+
+       switch (proto) {
+       case PROTOCOLID_CORE:
+               if (p_params->core.tc == LB_TC)
+                       pq_id = p_hwfn->qm_info.pure_lb_pq;
+               else if (p_params->core.tc == OOO_LB_TC)
+                       pq_id = p_hwfn->qm_info.ooo_pq;
+               else
+                       pq_id = p_hwfn->qm_info.offload_pq;
+               break;
+       case PROTOCOLID_ETH:
+               pq_id = p_params->eth.tc;
+               /* TODO - multi-CoS for VFs? */
+               if (p_params->eth.is_vf)
+                       pq_id += p_hwfn->qm_info.vf_queues_offset +
+                           p_params->eth.vf_id;
+               break;
+       case PROTOCOLID_ISCSI:
+               if (p_params->iscsi.q_idx == 1)
+                       pq_id = p_hwfn->qm_info.pure_ack_pq;
+               break;
+       case PROTOCOLID_ROCE:
+               pq_id = p_hwfn->qm_info.offload_pq;
+               break;
+       default:
+               pq_id = 0;
+       }
+
+       pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
+
+       return pq_id;
+}
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+                        enum ecore_hw_err_type err_type)
+{
+       /* Fan failure cannot be masked by handling of another HW error */
+       if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
+                          "Recovery is in progress."
+                          "Avoid notifying about HW error %d.\n",
+                          err_type);
+               return;
+       }
+
+       OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
+}
diff --git a/drivers/net/qede/ecore/ecore_hw.h 
b/drivers/net/qede/ecore/ecore_hw.h
new file mode 100644
index 0000000..07db11f
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_hw.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HW_H__
+#define __ECORE_HW_H__
+
+#include "ecore.h"
+#include "ecore_dev_api.h"
+
+/* Forward decleration */
+struct ecore_ptt;
+
+enum reserved_ptts {
+       RESERVED_PTT_EDIAG,
+       RESERVED_PTT_USER_SPACE,
+       RESERVED_PTT_MAIN,
+       RESERVED_PTT_DPC,
+       RESERVED_PTT_MAX
+};
+
+/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
+ * instead of 0, this should be fixed in later HW versions.
+ */
+#ifndef MISC_REG_DRIVER_CONTROL_0
+#define MISC_REG_DRIVER_CONTROL_0      MISC_REG_DRIVER_CONTROL_1
+#endif
+#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
+#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
+#endif
+
+enum _dmae_cmd_dst_mask {
+       DMAE_CMD_DST_MASK_NONE = 0,
+       DMAE_CMD_DST_MASK_PCIE = 1,
+       DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+       DMAE_CMD_SRC_MASK_PCIE = 0,
+       DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+       DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+       DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE  0x1
+
+#ifdef __BIG_ENDIAN
+#define DMAE_COMPLETION_VAL    0xAED10000
+#define DMAE_CMD_ENDIANITY     0x3
+#else
+#define DMAE_COMPLETION_VAL    0xD1AE
+#define DMAE_CMD_ENDIANITY     0x2
+#endif
+
+#define DMAE_CMD_SIZE  14
+/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
+#define DMAE_CMD_SIZE_TO_FILL  (DMAE_CMD_SIZE - 5)
+/* Minimum wait for dmae opertaion to complete 2 milliseconds */
+#define DMAE_MIN_WAIT_TIME     0x2
+#define DMAE_MAX_CLIENTS       32
+
+/**
+* @brief ecore_gtt_init - Initialize GTT windows
+*
+* @param p_hwfn
+*/
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t - success (0), negative - error.
+ */
+enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_pool_free -
+ *
+ * @param p_hwfn
+ */
+void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
+ *
+ * @param p_hwfn
+ * @param new_hw_addr
+ * @param p_ptt
+ */
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u32 new_hw_addr);
+
+/**
+ * @brief ecore_get_reserved_ptt - Get a specific reserved PTT
+ *
+ * @param p_hwfn
+ * @param ptt_idx
+ *
+ * @return struct ecore_ptt *
+ */
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+                                        enum reserved_ptts ptt_idx);
+
+/**
+ * @brief ecore_wr - Write value to BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+             struct ecore_ptt *p_ptt, u32 hw_addr, u32 val);
+
+/**
+ * @brief ecore_rd - Read value from BAR using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param val
+ * @param hw_addr
+ */
+u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
+
+/**
+ * @brief ecore_memcpy_from - copy n bytes from BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param dest
+ * @param hw_addr
+ * @param n
+ */
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt,
+                      void *dest, u32 hw_addr, osal_size_t n);
+
+/**
+ * @brief ecore_memcpy_to - copy n bytes to BAR using the given
+ *        ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param hw_addr
+ * @param src
+ * @param n
+ */
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt,
+                    u32 hw_addr, void *src, osal_size_t n);
+/**
+ * @brief ecore_fid_pretend - pretend to another function when
+ *        accessing the ptt window. There is no way to unpretend
+ *        a function. The only way to cancel a pretend is to
+ *        pretend back to the original function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param fid - fid field of pxp_pretend structure. Can contain
+ *            either pf / vf, port/path fields are don't care.
+ */
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+                      struct ecore_ptt *p_ptt, u16 fid);
+
+/**
+ * @brief ecore_port_pretend - pretend to another port when
+ *        accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ */
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 port_id);
+
+/**
+ * @brief ecore_port_unpretend - cancel any previously set port
+ *        pretend
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_vfid_to_concrete - build a concrete FID for a
+ *        given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+* @brief ecore_dmae_info_alloc - Init the dmae_info structure
+* which is part of p_hwfn.
+* @param p_hwfn
+*/
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_dmae_info_free - Free the dmae_info structure
+* which is part of p_hwfn
+*
+* @param p_hwfn
+*/
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+
+union ecore_qm_pq_params {
+       struct {
+               u8 q_idx;
+       } iscsi;
+
+       struct {
+               u8 tc;
+       } core;
+
+       struct {
+               u8 is_vf;
+               u8 vf_id;
+               u8 tc;
+       } eth;
+};
+
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+                   enum protocol_type proto, union ecore_qm_pq_params *params);
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+                                       const u8 *fw_data);
+
+void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
+                        enum ecore_hw_err_type err_type);
+
+#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/ecore/ecore_hw_defs.h 
b/drivers/net/qede/ecore/ecore_hw_defs.h
new file mode 100644
index 0000000..0874908
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_hw_defs.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _ECORE_IGU_DEF_H_
+#define _ECORE_IGU_DEF_H_
+
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN       (0x1<<0)     /* function enable        */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1<<1)     /* MSI/MSIX enable        */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1<<2)     /* INT enable             */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1<<3)     /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4)     /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE     (0x1<<5)     /* simd all ones mode     */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN        (0x1<<0)    /* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1<<1)    /* MSI/MSIX enable        */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1<<4)    /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK    (0xF)       /* Parent PF              */
+#define IGU_VF_CONF_PARENT_SHIFT   5   /* Parent PF              */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+       IGU_CTRL_CMD_TYPE_RD,
+       IGU_CTRL_CMD_TYPE_WR,
+       MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+       u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK          0xFFFF  /* Opaque_FID     */
+#define IGU_CTRL_REG_FID_SHIFT         0
+#define IGU_CTRL_REG_PXP_ADDR_MASK     0xFFF   /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT    16
+#define IGU_CTRL_REG_RESERVED_MASK     0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT    28
+#define IGU_CTRL_REG_TYPE_MASK         0x1     /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT                31
+};
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_init_fw_funcs.c 
b/drivers/net/qede/ecore/ecore_init_fw_funcs.c
new file mode 100644
index 0000000..7db5071
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_init_fw_funcs.c
@@ -0,0 +1,1275 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore_hw.h"
+#include "ecore_init_ops.h"
+#include "reg_addr.h"
+#include "ecore_rt_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_hsi_tools.h"
+#include "ecore_init_fw_funcs.h"
+
+/* @DPDK CmInterfaceEnum */
+enum cm_interface_enum {
+       MCM_SEC,
+       MCM_PRI,
+       UCM_SEC,
+       UCM_PRI,
+       TCM_SEC,
+       TCM_PRI,
+       YCM_SEC,
+       YCM_PRI,
+       XCM_SEC,
+       XCM_PRI,
+       NUM_OF_CM_INTERFACES
+};
+/* general constants */
+#define QM_PQ_MEM_4KB(pq_size) \
+(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) \
+(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID                       0xffff
+/* feature enable */
+#define QM_BYPASS_EN                           1
+#define QM_BYTE_CRD_EN                         1
+/* other PQ constants */
+#define QM_OTHER_PQS_PER_PF                    4
+/* WFQ constants */
+#define QM_WFQ_UPPER_BOUND                     62500000
+#define QM_WFQ_VP_PQ_VOQ_SHIFT         0
+#define QM_WFQ_VP_PQ_PF_SHIFT          5
+#define QM_WFQ_INC_VAL(weight)         ((weight) * 0x9000)
+#define QM_WFQ_MAX_INC_VAL                     43750000
+/* RL constants */
+#define QM_RL_UPPER_BOUND                      62500000
+#define QM_RL_PERIOD                           5
+#define QM_RL_PERIOD_CLK_25M           (25 * QM_RL_PERIOD)
+#define QM_RL_INC_VAL(rate) \
+OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD) / 8), 1)
+#define QM_RL_MAX_INC_VAL                      43750000
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF          1
+#define QM_OPPOR_FW_STOP_DEF           0
+#define QM_OPPOR_PQ_EMPTY_DEF          1
+#define EAGLE_WORKAROUND_TC                    7
+/* Command Queue constants */
+#define PBF_CMDQ_PURE_LB_LINES                 150
+#define PBF_CMDQ_EAGLE_WORKAROUND_LINES                8 /* eagle workaround 
CmdQ */
+#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
+(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
+- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
+(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - 
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+/* BTB: blocks constants (block size = 256B) */
+#define BTB_JUMBO_PKT_BLOCKS 38        /* 256B blocks in 9700B packet */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS       /* headroom per-port */
+#define BTB_EAGLE_WORKAROUND_BLOCKS    4       /* eagle workaround blocks */
+#define BTB_PURE_LB_FACTOR             10
+#define BTB_PURE_LB_RATIO              7 /* factored (hence really 0.7) */
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH                  32
+#define QM_STOP_CMD_ADDR                               0x2
+#define QM_STOP_CMD_STRUCT_SIZE                        2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET  0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT   0
+#define QM_STOP_CMD_PAUSE_MASK_MASK            -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET            1
+#define QM_STOP_CMD_GROUP_ID_SHIFT             16
+#define QM_STOP_CMD_GROUP_ID_MASK              15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET             1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT              24
+#define QM_STOP_CMD_PQ_TYPE_MASK               1
+#define QM_STOP_CMD_MAX_POLL_COUNT             100
+#define QM_STOP_CMD_POLL_PERIOD_US             500
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd)        cmd##_STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+/* QM: VOQ macros */
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
+((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port)                           (MAX_PHYS_VOQS + (port))
+#define VOQ(port, tc, max_phys_tcs_per_port) \
+((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
+/******************** INTERNAL IMPLEMENTATION *********************/
+/* Prepare PF RL enable/disable runtime init values */
+static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+       if (pf_rl_en) {
+               /* enable RLs for all VOQs */
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
+                            (1 << MAX_NUM_VOQS) - 1);
+               /* write RL period */
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (pf_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare VPORT RL enable/disable runtime init values */
+static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+                    vport_rl_en ? 1 : 0);
+       if (vport_rl_en) {
+               /* write RL period (use timer 0 only) */
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+                            QM_RL_PERIOD_CLK_25M);
+               /* set credit threshold for QM bypass flow */
+               if (QM_BYPASS_EN)
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+                                    QM_RL_UPPER_BOUND);
+       }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool 
vport_wfq_en)
+{
+       STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+                    vport_wfq_en ? 1 : 0);
+       /* set credit threshold for QM bypass flow */
+       if (vport_wfq_en && QM_BYPASS_EN)
+               STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+                            QM_WFQ_UPPER_BOUND);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+   the specified VOQ
+*/
+static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
+                                        u8 voq, u16 cmdq_lines)
+{
+       u32 qm_line_crd;
+       bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
+       if (is_bb_a0)
+               cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
+       qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+       OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+                        (u32) cmdq_lines);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
+       STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
+                    qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
+                                    u8 max_ports_per_engine,
+                                    u8 max_phys_tcs_per_port,
+                                    struct init_qm_port_params
+                                    port_params[MAX_NUM_PORTS])
+{
+       u8 tc, voq, port_id;
+       bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+       /* clear PBF lines for all VOQs */
+       for (voq = 0; voq < MAX_NUM_VOQS; voq++)
+               STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active) {
+                       u16 phys_lines, phys_lines_per_tc;
+                       phys_lines =
+                           port_params[port_id].num_pbf_cmd_lines -
+                           PBF_CMDQ_PURE_LB_LINES;
+                       if (eagle_workaround)
+                               phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES;
+                       /* find #lines per active physical TC */
+                       phys_lines_per_tc =
+                           phys_lines /
+                           port_params[port_id].num_active_phys_tcs;
+                       /* init registers per active TC */
+                       for (tc = 0;
+                            tc < port_params[port_id].num_active_phys_tcs;
+                            tc++) {
+                               voq =
+                                   PHYS_VOQ(port_id, tc,
+                                            max_phys_tcs_per_port);
+                               ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+                                                            phys_lines_per_tc);
+                       }
+                       /* init registers for pure LB TC */
+                       ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+                                                    PBF_CMDQ_PURE_LB_LINES);
+                       /* init registers for eagle workaround */
+                       if (eagle_workaround) {
+                               voq =
+                                   PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
+                                            max_phys_tcs_per_port);
+                               ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+                                            PBF_CMDQ_EAGLE_WORKAROUND_LINES);
+                       }
+               }
+       }
+}
+
+/*
+Prepare runtime init values to allocate guaranteed BTB blocks for the specified
+port. The guaranteed BTB space is divided between the TCs as follows (shared
+space Is currently not used):
+1. Parameters:
+    B ? BTB blocks for this port
+    C ? Number of physical TCs for this port
+2. Calculation:
+    a. 38 blocks (9700B jumbo frame) are allocated for global per port headroom
+    b. B = B ? 38 (remainder after global headroom allocation)
+    c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+    d. B = B ? MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+    e. B/C blocks are allocated for each physical TC.
+Assumptions:
+- MTU is up to 9700 bytes (38 blocks)
+- All TCs are considered symmetrical (same rate and packet size)
+- No optimization for lossy TC (all are considered lossless). Shared space is
+  not enabled and allocated for each TC.
+*/
+static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
+                                    u8 max_ports_per_engine,
+                                    u8 max_phys_tcs_per_port,
+                                    struct init_qm_port_params
+                                    port_params[MAX_NUM_PORTS])
+{
+       u8 tc, voq, port_id;
+       u32 usable_blocks, pure_lb_blocks, phys_blocks;
+       bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active) {
+                       /* subtract headroom blocks */
+                       usable_blocks =
+                           port_params[port_id].num_btb_blocks -
+                           BTB_HEADROOM_BLOCKS;
+                       if (eagle_workaround)
+                               usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS;
+                       pure_lb_blocks =
+                           (usable_blocks * BTB_PURE_LB_FACTOR) /
+                           (port_params[port_id].num_active_phys_tcs *
+                            BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
+                       pure_lb_blocks =
+                           OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+                                      pure_lb_blocks / BTB_PURE_LB_FACTOR);
+                       phys_blocks =
+                           (usable_blocks -
+                            pure_lb_blocks) /
+                           port_params[port_id].num_active_phys_tcs;
+                       /* init physical TCs */
+                       for (tc = 0;
+                            tc < port_params[port_id].num_active_phys_tcs;
+                            tc++) {
+                               voq =
+                                   PHYS_VOQ(port_id, tc,
+                                            max_phys_tcs_per_port);
+                               STORE_RT_REG(p_hwfn,
+                                            PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+                                            phys_blocks);
+                       }
+                       /* init pure LB TC */
+                       STORE_RT_REG(p_hwfn,
+                                    PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ
+                                                                 (port_id)),
+                                    pure_lb_blocks);
+                       /* init eagle workaround */
+                       if (eagle_workaround) {
+                               voq =
+                                   PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
+                                            max_phys_tcs_per_port);
+                               STORE_RT_REG(p_hwfn,
+                                            PBF_BTB_GUARANTEED_RT_OFFSET(voq),
+                                            BTB_EAGLE_WORKAROUND_BLOCKS);
+                       }
+               }
+       }
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   u8 port_id,
+                                   u8 pf_id,
+                                   u8 max_phys_tcs_per_port,
+                                   bool is_first_pf,
+                                   u32 num_pf_cids,
+                                   u32 num_vf_cids,
+                                   u16 start_pq,
+                                   u16 num_pf_pqs,
+                                   u16 num_vf_pqs,
+                                   u8 start_vport,
+                                   u32 base_mem_addr_4kb,
+                                   struct init_qm_pq_params *pq_params,
+                                   struct init_qm_vport_params *vport_params)
+{
+       u16 i, pq_id, pq_group;
+       u16 num_pqs = num_pf_pqs + num_vf_pqs;
+       u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+       u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+       bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
+       /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+       u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+       u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
+       u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+       u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+       /* set mapping from PQ group to PF */
+       for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+               STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+                            (u32) (pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+                    QM_PQ_SIZE_256B(num_pf_cids));
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+                    QM_PQ_SIZE_256B(num_vf_cids));
+       /* go over all Tx PQs */
+       for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
+               struct qm_rf_pq_map tx_pq_map;
+               u8 voq =
+                   VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+               bool is_vf_pq = (i >= num_pf_pqs);
+               /* update first Tx PQ of VPORT/TC */
+               u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
+               u16 first_tx_pq_id =
+                   vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
+                                                               tc_id];
+               if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+                       /* create new VP PQ */
+                       vport_params[vport_id_in_pf].
+                           first_tx_pq_id[pq_params[i].tc_id] = pq_id;
+                       first_tx_pq_id = pq_id;
+                       /* map VP PQ to VOQ and PF */
+                       STORE_RT_REG(p_hwfn,
+                                    QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
+                                    (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
+                                                       QM_WFQ_VP_PQ_PF_SHIFT));
+               }
+               /* fill PQ map entry */
+               OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
+                         is_vf_pq ? 1 : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
+                         is_vf_pq ? pq_params[i].vport_id : 0);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
+               SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
+                         pq_params[i].wrr_group);
+               /* write PQ map entry to CAM */
+               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
+                            *((u32 *) &tx_pq_map));
+               /* set base address */
+               STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               /* check if VF PQ */
+               if (is_vf_pq) {
+                       tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
+                           (1 << (pq_id % tx_pq_vf_mask_width));
+                       mem_addr_4kb += vport_pq_mem_4kb;
+               } else
+                       mem_addr_4kb += pq_mem_4kb;
+       }
+       /* store Tx PQ VF mask to size select register */
+       for (i = 0; i < num_tx_pq_vf_masks; i++) {
+               if (tx_pq_vf_mask[i]) {
+                       if (is_bb_a0) {
+                               u32 curr_mask =
+                                   is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
+                                                      QM_REG_MAXPQSIZETXSEL_0
+                                                              + i * 4);
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+                                            i, curr_mask | tx_pq_vf_mask[i]);
+                       } else
+                               STORE_RT_REG(p_hwfn,
+                                            QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+                                            i, tx_pq_vf_mask[i]);
+               }
+       }
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
+                                      u8 port_id,
+                                      u8 pf_id,
+                                      u32 num_pf_cids,
+                                      u32 num_tids, u32 base_mem_addr_4kb)
+{
+       u16 i, pq_id;
+       u16 pq_group = pf_id;
+       u32 pq_size = num_pf_cids + num_tids;
+       u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+       u32 mem_addr_4kb = base_mem_addr_4kb;
+       /* map PQ group to PF */
+       STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+                    (u32) (pf_id));
+       /* set PQ sizes */
+       STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+                    QM_PQ_SIZE_256B(pq_size));
+       /* set base address */
+       for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+            i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+               STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+                            mem_addr_4kb);
+               mem_addr_4kb += pq_mem_4kb;
+       }
+}
+
+static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+                               u8 port_id,
+                               u8 pf_id,
+                               u16 pf_wfq,
+                               u8 max_phys_tcs_per_port,
+                               u16 num_tx_pqs,
+                               struct init_qm_pq_params *pq_params)
+{
+       u16 i;
+       u32 inc_val;
+       u32 crd_reg_offset =
+           (pf_id <
+            MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+            QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
+       inc_val = QM_WFQ_INC_VAL(pf_wfq);
+       if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+       for (i = 0; i < num_tx_pqs; i++) {
+               u8 voq =
+                   VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+               OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+                                QM_WFQ_CRD_REG_SIGN_BIT);
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+       return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF. Return -1 on err */
+static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+               return -1;
+       }
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+                    QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+                    QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+       STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+       return 0;
+}
+
+static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
+                               u8 num_vports,
+                               struct init_qm_vport_params *vport_params)
+{
+       u8 tc, i;
+       u32 inc_val;
+       /* go over all PF VPORTs */
+       for (i = 0; i < num_vports; i++) {
+               if (vport_params[i].vport_wfq) {
+                       inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+                       if (inc_val > QM_WFQ_MAX_INC_VAL) {
+                               DP_NOTICE(p_hwfn, true,
+                                         "Invalid VPORT WFQ weight config");
+                               return -1;
+                       }
+                       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+                               u16 vport_pq_id =
+                                   vport_params[i].first_tx_pq_id[tc];
+                               if (vport_pq_id != QM_INVALID_PQ_ID) {
+                                       STORE_RT_REG(p_hwfn,
+                                                    QM_REG_WFQVPCRD_RT_OFFSET +
+                                                    vport_pq_id,
+                                                    QM_WFQ_CRD_REG_SIGN_BIT);
+                                       STORE_RT_REG(p_hwfn,
+                                               QM_REG_WFQVPWEIGHT_RT_OFFSET
+                                                    + vport_pq_id, inc_val);
+                               }
+                       }
+               }
+       }
+       return 0;
+}
+
+/* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. 
*/
+static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
+                                 u8 start_vport,
+                                 u8 num_vports,
+                                 struct init_qm_vport_params *vport_params)
+{
+       u8 i, vport_id;
+       /* go over all PF VPORTs */
+       for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
+               u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
+               if (inc_val > QM_RL_MAX_INC_VAL) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Invalid VPORT rate-limit configuration");
+                       return -1;
+               }
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
+                            QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn,
+                            QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
+                            QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+               STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
+                            inc_val);
+       }
+       return 0;
+}
+
+static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt)
+{
+       u32 reg_val, i;
+       for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+            i++) {
+               OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
+               reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+       }
+       /* check if timeout while waiting for SDM command ready */
+       if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+                          "Timeout waiting for QM SDM cmd ready signal\n");
+               return false;
+       }
+       return true;
+}
+
+static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+{
+       if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+               return false;
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+       return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+                        u32 num_pf_cids,
+                        u32 num_vf_cids,
+                        u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+{
+       return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+           QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+           QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+                           u8 max_ports_per_engine,
+                           u8 max_phys_tcs_per_port,
+                           bool pf_rl_en,
+                           bool pf_wfq_en,
+                           bool vport_rl_en,
+                           bool vport_wfq_en,
+                           struct init_qm_port_params
+                           port_params[MAX_NUM_PORTS])
+{
+       u8 port_id;
+       /* init AFullOprtnstcCrdMask */
+       u32 mask =
+           (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+           (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+           (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | (vport_wfq_en
+                                          <<
+                                          QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT)
+           | (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | (vport_rl_en
+                                                          <<
+                                  QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT)
+           | (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+           (QM_OPPOR_PQ_EMPTY_DEF <<
+            QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+       STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+       /* check eagle workaround */
+       for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+               if (port_params[port_id].active &&
+                   port_params[port_id].num_active_phys_tcs >
+                   EAGLE_WORKAROUND_TC
+                   && ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Can't config 8 TCs with Eagle"
+                                 " eng1 workaround");
+                       return -1;
+               }
+       }
+       /* enable/disable PF RL */
+       ecore_enable_pf_rl(p_hwfn, pf_rl_en);
+       /* enable/disable PF WFQ */
+       ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
+       /* enable/disable VPORT RL */
+       ecore_enable_vport_rl(p_hwfn, vport_rl_en);
+       /* enable/disable VPORT WFQ */
+       ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
+       /* init PBF CMDQ line credit */
+       ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
+                                max_phys_tcs_per_port, port_params);
+       /* init BTB blocks in PBF */
+       ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
+                                max_phys_tcs_per_port, port_params);
+       return 0;
+}
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       u8 port_id,
+                       u8 pf_id,
+                       u8 max_phys_tcs_per_port,
+                       bool is_first_pf,
+                       u32 num_pf_cids,
+                       u32 num_vf_cids,
+                       u32 num_tids,
+                       u16 start_pq,
+                       u16 num_pf_pqs,
+                       u16 num_vf_pqs,
+                       u8 start_vport,
+                       u8 num_vports,
+                       u16 pf_wfq,
+                       u32 pf_rl,
+                       struct init_qm_pq_params *pq_params,
+                       struct init_qm_vport_params *vport_params)
+{
+       u8 tc, i;
+       u32 other_mem_size_4kb =
+           QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+       /* clear first Tx PQ ID array for each VPORT */
+       for (i = 0; i < num_vports; i++)
+               for (tc = 0; tc < NUM_OF_TCS; tc++)
+                       vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+       /* map Other PQs (if any) */
+#if QM_OTHER_PQS_PER_PF > 0
+       ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
+                                  num_tids, 0);
+#endif
+       /* map Tx PQs */
+       ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
+                               max_phys_tcs_per_port, is_first_pf, num_pf_cids,
+                               num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
+                               start_vport, other_mem_size_4kb, pq_params,
+                               vport_params);
+       /* init PF WFQ */
+       if (pf_wfq)
+               if (ecore_pf_wfq_rt_init
+                   (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
+                    num_pf_pqs + num_vf_pqs, pq_params) != 0)
+                       return -1;
+       /* init PF RL */
+       if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
+               return -1;
+       /* set VPORT WFQ */
+       if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
+               return -1;
+       /* set VPORT RL */
+       if (ecore_vport_rl_rt_init
+           (p_hwfn, start_vport, num_vports, vport_params) != 0)
+               return -1;
+       return 0;
+}
+
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+       u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+       if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+               return -1;
+       }
+       ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+       return 0;
+}
+
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(pf_rl);
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+               return -1;
+       }
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
+                QM_RL_CRD_REG_SIGN_BIT);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+       return 0;
+}
+
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt,
+                        u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+       u8 tc;
+       u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+       if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Invalid VPORT WFQ weight configuration");
+               return -1;
+       }
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               u16 vport_pq_id = first_tx_pq_id[tc];
+               if (vport_pq_id != QM_INVALID_PQ_ID) {
+                       ecore_wr(p_hwfn, p_ptt,
+                                QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
+               }
+       }
+       return 0;
+}
+
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+{
+       u32 inc_val = QM_RL_INC_VAL(vport_rl);
+       if (inc_val > QM_RL_MAX_INC_VAL) {
+               DP_NOTICE(p_hwfn, true,
+                         "Invalid VPORT rate-limit configuration");
+               return -1;
+       }
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
+                QM_RL_CRD_REG_SIGN_BIT);
+       ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+       return 0;
+}
+
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           bool is_release_cmd,
+                           bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+       u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+       u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
+       /* set command's PQ type */
+       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+       /* go over requested PQs */
+       for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+               /* set PQ bit in mask (stop command only) */
+               if (!is_release_cmd)
+                       pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
+               /* if last PQ or end of PQ mask, write command */
+               if ((pq_id == last_pq)
+                   || (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+                       (QM_STOP_PQ_MASK_WIDTH - 1))) {
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
+                                        pq_mask);
+                       QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
+                                        pq_id / QM_STOP_PQ_MASK_WIDTH);
+                       if (!ecore_send_qm_cmd
+                           (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
+                            cmd_arr[1]))
+                               return false;
+                       pq_mask = 0;
+               }
+       }
+       return true;
+}
+
+/* NIG: ETS configuration constants */
+#define NIG_TX_ETS_CLIENT_OFFSET       4
+#define NIG_LB_ETS_CLIENT_OFFSET       1
+#define NIG_ETS_MIN_WFQ_BYTES          1600
+/* NIG: ETS constants */
+#define NIG_ETS_UP_BOUND(weight, mtu) \
+(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+/* NIG: RL constants */
+#define NIG_RL_BASE_TYPE                       1       /* byte base type */
+#define NIG_RL_PERIOD                          1       /* in us */
+#define NIG_RL_PERIOD_CLK_25M          (25 * NIG_RL_PERIOD)
+#define NIG_RL_INC_VAL(rate)           (((rate) * NIG_RL_PERIOD) / 8)
+#define NIG_RL_MAX_VAL(inc_val, mtu) \
+(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+/* NIG: packet prioritry configuration constants */
+#define NIG_PRIORITY_MAP_TC_BITS 4
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       struct init_ets_req *req, bool is_lb)
+{
+       u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+       u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+       u8 tc_client_offset =
+           is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
+       u32 min_weight = 0xffffffff;
+       u32 tc_weight_base_addr =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+           NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+       u32 tc_weight_addr_diff =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+           NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+           NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+       u32 tc_bound_base_addr =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+           NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+       u32 tc_bound_addr_diff =
+           is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+           NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+           NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+           NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+       for (tc = 0; tc < num_tc; tc++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               /* update SP map */
+               if (tc_req->use_sp)
+                       sp_tc_map |= (1 << tc);
+               if (tc_req->use_wfq) {
+                       /* update WFQ map */
+                       wfq_tc_map |= (1 << tc);
+                       /* find minimal weight */
+                       if (tc_req->weight < min_weight)
+                               min_weight = tc_req->weight;
+               }
+       }
+       /* write SP map */
+       ecore_wr(p_hwfn, p_ptt,
+                is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
+                NIG_REG_TX_ARB_CLIENT_IS_STRICT,
+                (sp_tc_map << tc_client_offset));
+       /* write WFQ map */
+       ecore_wr(p_hwfn, p_ptt,
+                is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
+                NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+                (wfq_tc_map << tc_client_offset));
+       /* write WFQ weights */
+       for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               if (tc_req->use_wfq) {
+                       /* translate weight to bytes */
+                       u32 byte_weight =
+                           (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+                           min_weight;
+                       /* write WFQ weight */
+                       ecore_wr(p_hwfn, p_ptt,
+                                tc_weight_base_addr +
+                                tc_weight_addr_diff * tc_client_offset,
+                                byte_weight);
+                       /* write WFQ upper bound */
+                       ecore_wr(p_hwfn, p_ptt,
+                                tc_bound_base_addr +
+                                tc_bound_addr_diff * tc_client_offset,
+                                NIG_ETS_UP_BOUND(byte_weight, req->mtu));
+               }
+       }
+}
+
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         struct init_nig_lb_rl_req *req)
+{
+       u8 tc;
+       u32 ctrl, inc_val, reg_offset;
+       /* disable global MAC+LB RL */
+       ctrl =
+           NIG_RL_BASE_TYPE <<
+           NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+       /* configure and enable global MAC+LB RL */
+       if (req->lb_mac_rate) {
+               /* configure  */
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
+                        NIG_RL_PERIOD_CLK_25M);
+               inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
+                        inc_val);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
+                        NIG_RL_MAX_VAL(inc_val, req->mtu));
+               /* enable */
+               ctrl |=
+                   1 <<
+                   NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
+       }
+       /* disable global LB-only RL */
+       ctrl =
+           NIG_RL_BASE_TYPE <<
+           NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+       /* configure and enable global LB-only RL */
+       if (req->lb_rate) {
+               /* configure  */
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
+                        NIG_RL_PERIOD_CLK_25M);
+               inc_val = NIG_RL_INC_VAL(req->lb_rate);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
+                        inc_val);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
+                        NIG_RL_MAX_VAL(inc_val, req->mtu));
+               /* enable */
+               ctrl |=
+                   1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
+       }
+       /* per-TC RLs */
+       for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
+            tc++, reg_offset += 4) {
+               /* disable TC RL */
+               ctrl =
+                   NIG_RL_BASE_TYPE <<
+               NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
+               ecore_wr(p_hwfn, p_ptt,
+                        NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
+               /* configure and enable TC RL */
+               if (req->tc_rate[tc]) {
+                       /* configure */
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+                                reg_offset, NIG_RL_PERIOD_CLK_25M);
+                       inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+                                reg_offset, inc_val);
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+                                reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+                       /* enable */
+                       ctrl |=
+                           1 <<
+               NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+                       ecore_wr(p_hwfn, p_ptt,
+                                NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
+                                ctrl);
+               }
+       }
+}
+
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt,
+                              struct init_nig_pri_tc_map_req *req)
+{
+       u8 pri, tc;
+       u32 pri_tc_mask = 0;
+       u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+       for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
+               if (req->pri[pri].valid) {
+                       pri_tc_mask |=
+                           (req->pri[pri].
+                            tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
+                       tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
+               }
+       }
+       /* write priority -> TC mask */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
+       /* write TC -> priority mask */
+       for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
+                        tc_pri_mask[tc]);
+               ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
+                        tc_pri_mask[tc]);
+       }
+}
+
+/* PRS: ETS configuration constants */
+#define PRS_ETS_MIN_WFQ_BYTES                  1600
+#define PRS_ETS_UP_BOUND(weight, mtu) \
+(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_ets_req *req)
+{
+       u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
+       u32 min_weight = 0xffffffff;
+       u32 tc_weight_addr_diff =
+           PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+       u32 tc_bound_addr_diff =
+           PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+           PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               /* update SP map */
+               if (tc_req->use_sp)
+                       sp_tc_map |= (1 << tc);
+               if (tc_req->use_wfq) {
+                       /* update WFQ map */
+                       wfq_tc_map |= (1 << tc);
+                       /* find minimal weight */
+                       if (tc_req->weight < min_weight)
+                               min_weight = tc_req->weight;
+               }
+       }
+       /* write SP map */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+       /* write WFQ map */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
+                wfq_tc_map);
+       /* write WFQ weights */
+       for (tc = 0; tc < NUM_OF_TCS; tc++) {
+               struct init_ets_tc_req *tc_req = &req->tc_req[tc];
+               if (tc_req->use_wfq) {
+                       /* translate weight to bytes */
+                       u32 byte_weight =
+                           (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+                           min_weight;
+                       /* write WFQ weight */
+                       ecore_wr(p_hwfn, p_ptt,
+                                PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
+                                tc * tc_weight_addr_diff, byte_weight);
+                       /* write WFQ upper bound */
+                       ecore_wr(p_hwfn, p_ptt,
+                                PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+                                tc * tc_bound_addr_diff,
+                                PRS_ETS_UP_BOUND(byte_weight, req->mtu));
+               }
+       }
+}
+
+/* BRB: RAM configuration constants */
+#define BRB_TOTAL_RAM_BLOCKS_BB        4800
+#define BRB_TOTAL_RAM_BLOCKS_K2        5632
+#define BRB_BLOCK_SIZE                 128     /* in bytes */
+#define BRB_MIN_BLOCKS_PER_TC  9
+#define BRB_HYST_BYTES                 10240
+#define BRB_HYST_BLOCKS                        (BRB_HYST_BYTES / 
BRB_BLOCK_SIZE)
+/*
+temporary big RAM allocation - should be updated
+*/
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
+{
+       u8 port, active_ports = 0;
+       u32 active_port_blocks, reg_offset = 0;
+       u32 tc_headroom_blocks =
+           (u32) DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
+       u32 min_pkt_size_blocks =
+           (u32) DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
+       u32 total_blocks =
+           ECORE_IS_K2(p_hwfn->
+                       p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+           BRB_TOTAL_RAM_BLOCKS_BB;
+       /* find number of active ports */
+       for (port = 0; port < MAX_NUM_PORTS; port++)
+               if (req->num_active_tcs[port])
+                       active_ports++;
+       active_port_blocks = (u32) (total_blocks / active_ports);
+       for (port = 0; port < req->max_ports_per_engine; port++) {
+               /* calculate per-port sizes */
+               u32 tc_guaranteed_blocks =
+                   (u32) DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
+               u32 port_blocks =
+                   req->num_active_tcs[port] ? active_port_blocks : 0;
+               u32 port_guaranteed_blocks =
+                   req->num_active_tcs[port] * tc_guaranteed_blocks;
+               u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
+               u32 full_xoff_th =
+                   req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
+               u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
+               u32 pause_xoff_th = tc_headroom_blocks;
+               u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+               u8 tc;
+               /* init total size per port */
+               ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
+                        port_blocks);
+               /* init shared size per port */
+               ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
+                        port_shared_blocks);
+               for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
+                       /* clear init values for non-active TCs */
+                       if (tc == req->num_active_tcs[port]) {
+                               tc_guaranteed_blocks = 0;
+                               full_xoff_th = 0;
+                               full_xon_th = 0;
+                               pause_xoff_th = 0;
+                               pause_xon_th = 0;
+                       }
+                       /* init guaranteed size per TC */
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_TC_GUARANTIED_0 + reg_offset,
+                                tc_guaranteed_blocks);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
+                                BRB_HYST_BLOCKS);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
+                                reg_offset, full_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
+                                reg_offset, full_xon_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
+                                reg_offset, pause_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
+                                reg_offset, pause_xon_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
+                                reg_offset, full_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
+                                reg_offset, full_xon_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
+                                reg_offset, pause_xoff_th);
+                       ecore_wr(p_hwfn, p_ptt,
+                                BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
+                                reg_offset, pause_xon_th);
+               }
+       }
+}
+
+/*In MF should be called once per engine to set EtherType of OuterTag*/
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, u32 eth_type)
+{
+       /* update PRS register */
+       STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+       /* update NIG register */
+       STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+       /* update PBF register */
+       STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+}
+
+/*In MF should be called once per port to set EtherType of OuterTag*/
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt, u32 eth_type)
+{
+       /* update DORQ register */
+       STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type);
+}
+
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
+#define PRS_ETH_TUNN_FIC_FORMAT        -188897008
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt, u16 dest_port)
+{
+       /* update PRS register */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+       /* update NIG register */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+       /* update PBF register */
+       ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt, bool vxlan_enable)
+{
+       u32 reg_val;
+       /* update PRS register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                          PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
+                          vxlan_enable);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val) {
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                        PRS_ETH_TUNN_FIC_FORMAT);
+       }
+       /* update NIG register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                                  NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
+                                  vxlan_enable);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+       /* update DORQ register */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+                vxlan_enable ? 1 : 0);
+}
+
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         bool eth_gre_enable, bool ip_gre_enable)
+{
+       u32 reg_val;
+       /* update PRS register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
+                  eth_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
+                  ip_gre_enable);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val) {
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                        PRS_ETH_TUNN_FIC_FORMAT);
+       }
+       /* update NIG register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
+                  eth_gre_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
+                  ip_gre_enable);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+       /* update DORQ registers */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+                eth_gre_enable ? 1 : 0);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+                ip_gre_enable ? 1 : 0);
+}
+
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt, u16 dest_port)
+{
+       /* geneve tunnel not supported in BB_A0 */
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+               return;
+       /* update PRS register */
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+       /* update NIG register */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+       /* update PBF register */
+       ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            bool eth_geneve_enable, bool ip_geneve_enable)
+{
+       u32 reg_val;
+       /* geneve tunnel not supported in BB_A0 */
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+               return;
+       /* update PRS register */
+       reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
+                  eth_geneve_enable);
+       SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
+                  PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
+                  ip_geneve_enable);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+       if (reg_val) {
+               ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+                        PRS_ETH_TUNN_FIC_FORMAT);
+       }
+       /* update NIG register */
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+                eth_geneve_enable ? 1 : 0);
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
+                ip_geneve_enable ? 1 : 0);
+       /* comp ver */
+       reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+       ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+       ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+       ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+       /* EDPM with geneve tunnel not supported in BB_B0 */
+       if (ECORE_IS_BB_B0(p_hwfn->p_dev))
+               return;
+       /* update DORQ registers */
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+                eth_geneve_enable ? 1 : 0);
+       ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+                ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/qede/ecore/ecore_init_fw_funcs.h 
b/drivers/net/qede/ecore/ecore_init_fw_funcs.h
new file mode 100644
index 0000000..5529a68
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_init_fw_funcs.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef _INIT_FW_FUNCS_H
+#define _INIT_FW_FUNCS_H
+/* forward declarations */
+struct init_qm_pq_params;
+/**
+ * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ *
+ * @param pf_id                        - physical function ID
+ * @param num_pf_cids  - number of connections used by this PF
+ * @param num_vf_cids  - number of connections used by VFs of this PF
+ * @param num_tids             - number of tasks used by this PF
+ * @param num_pf_pqs   - number of PQs used by this PF
+ * @param num_vf_pqs   - number of PQs used by VFs of this PF
+ *
+ * @return The required host memory size in 4KB units.
+ */
+u32 ecore_qm_pf_mem_size(u8 pf_id,
+                        u32 num_pf_cids,
+                        u32 num_vf_cids,
+                        u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
+/**
+ * @brief ecore_qm_common_rt_init -
+ * Prepare QM runtime init values for the engine phase
+ *
+ * @param p_hwfn
+ * @param max_ports_per_engine - max number of ports per engine in HW
+ * @param max_phys_tcs_per_port        - max number of physical TCs per port 
in HW
+ * @param pf_rl_en                             - enable per-PF rate limiters
+ * @param pf_wfq_en                            - enable per-PF WFQ
+ * @param vport_rl_en                  - enable per-VPORT rate limiters
+ * @param vport_wfq_en                 - enable per-VPORT WFQ
+ * @param port_params- array of size MAX_NUM_PORTS with parameters for each 
port
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
+                           u8 max_ports_per_engine,
+                           u8 max_phys_tcs_per_port,
+                           bool pf_rl_en,
+                           bool pf_wfq_en,
+                           bool vport_rl_en,
+                           bool vport_wfq_en,
+                           struct init_qm_port_params
+                           port_params[MAX_NUM_PORTS]);
+
+int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       u8 port_id,
+                       u8 pf_id,
+                       u8 max_phys_tcs_per_port,
+                       bool is_first_pf,
+                       u32 num_pf_cids,
+                       u32 num_vf_cids,
+                       u32 num_tids,
+                       u16 start_pq,
+                       u16 num_pf_pqs,
+                       u16 num_vf_pqs,
+                       u8 start_vport,
+                       u8 num_vports,
+                       u16 pf_wfq,
+                       u32 pf_rl,
+                       struct init_qm_pq_params *pq_params,
+                       struct init_qm_vport_params *vport_params);
+/**
+ * @brief ecore_init_pf_wfq  Initializes the WFQ weight of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt                - ptt window used for writing the registers
+ * @param pf_id                - PF ID
+ * @param pf_wfq       - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
+                     struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+/**
+ * @brief ecore_init_pf_rl  Initializes the rate limit of the specified PF
+ *
+ * @param p_hwfn
+ * @param p_ptt        - ptt window used for writing the registers
+ * @param pf_id        - PF ID
+ * @param pf_rl        - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
+                    struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+/**
+ * @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified 
VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt                        - ptt window used for writing the 
registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ *                        with the VPORT for each TC. This array is filled by
+ *                        ecore_qm_pf_rt_init
+ * @param vport_wfq            - WFQ weight. Must be non-zero.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt,
+                        u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+/**
+ * @brief ecore_init_vport_rl  Initializes the rate limit of the specified 
VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt                - ptt window used for writing the registers
+ * @param vport_id     - VPORT ID
+ * @param vport_rl     - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+/**
+ * @brief ecore_send_qm_stop_cmd  Sends a stop command to the QM
+ *
+ * @param p_hwfn
+ * @param p_ptt                 - ptt window used for writing the registers
+ * @param is_release_cmd - true for release, false for stop.
+ * @param is_tx_pq       - true for Tx PQs, false for Other PQs.
+ * @param start_pq       - first PQ ID to stop
+ * @param num_pqs        - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occured while
+ * waiting for QM command done.
+ */
+bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           bool is_release_cmd,
+                           bool is_tx_pq, u16 start_pq, u16 num_pqs);
+/**
+ * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the NIG ETS initialization requirements.
+ * @param is_lb        - if set, the loopback port arbiter is initialized, 
otherwise
+ *               the physical port arbiter is initialized. The pure-LB TC
+ *               requirements are ignored when is_lb is cleared.
+ */
+void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt,
+                       struct init_ets_req *req, bool is_lb);
+/**
+ * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
+ *
+ * Based on global and per-TC rate requirements
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the NIG LB RLs initialization requirements.
+ */
+void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         struct init_nig_lb_rl_req *req);
+/**
+ * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
+ *
+ * Assumes valid arguments.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - required mapping from prioirties to TCs.
+ */
+void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt,
+                              struct init_nig_pri_tc_map_req *req);
+/**
+ * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the PRS ETS initialization requirements.
+ */
+void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_ets_req *req);
+/**
+ * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
+ *
+ * Based on weight/priority requirements per-TC.
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param req  - the BRB RAM initialization requirements.
+ */
+void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct init_brb_ram_req *req);
+/**
+ * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf
+ * and llh ethType Regs to  input ethType
+ * should Be called once per engine if engine is in BD mode.
+ *
+ * @param p_ptt    - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, u32 eth_type);
+/**
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs
+ * to input ethType
+ * should Be called once per port.
+ *
+ * @param p_ptt    - ptt window used for writing the registers.
+ * @param ethType - etherType to configure
+ */
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt, u32 eth_type);
+/**
+ * @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port
+ *
+ * @param p_ptt     - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt, u16 dest_port);
+/**
+ * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt        - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt, bool vxlan_enable);
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt          - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable  - IP GRE enable enable flag.
+ */
+void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
+                         struct ecore_ptt *p_ptt,
+                         bool eth_gre_enable, bool ip_gre_enable);
+/**
+ * @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port
+ *
+ * @param p_ptt     - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
+ */
+void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt, u16 dest_port);
+/**
+ * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt             - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable  - IP GENEVE enable enable flag.
+  */
+void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            bool eth_geneve_enable, bool ip_geneve_enable);
+#endif
diff --git a/drivers/net/qede/ecore/ecore_init_ops.c 
b/drivers/net/qede/ecore/ecore_init_ops.c
new file mode 100644
index 0000000..1d1bc2c
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_init_ops.c
@@ -0,0 +1,610 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/* include the precompiled configuration values - only once */
+#include "bcm_osal.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_status.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_fw_funcs.h"
+
+#if 0
+#ifndef CONFIG_ECORE_BINARY_FW
+#ifdef CONFIG_ECORE_ZIPPED_FW
+#include "ecore_init_values_zipped.h"
+#else
+#include "ecore_init_values.h"
+#endif
+#endif
+#endif
+
+#include "ecore_iro_values.h"
+#include "ecore_sriov.h"
+#include "ecore_gtt_values.h"
+#include "reg_addr.h"
+#include "ecore_init_ops.h"
+
+#define ECORE_INIT_MAX_POLL_COUNT      100
+#define ECORE_INIT_POLL_PERIOD_US      500
+
+void ecore_init_iro_array(struct ecore_dev *p_dev)
+{
+       p_dev->iro_arr = iro_arr;
+}
+
+/* Runtime configuration helpers */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
+{
+       int i;
+
+       for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
+               p_hwfn->rt_data.b_valid[i] = false;
+}
+
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
+{
+       p_hwfn->rt_data.init_val[rt_offset] = val;
+       p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+                            u32 rt_offset, u32 *p_val, osal_size_t size)
+{
+       osal_size_t i;
+
+       for (i = 0; i < size / sizeof(u32); i++) {
+               p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+               p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+
+       }
+}
+
+static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 addr,
+                                         u16 rt_offset,
+                                         u16 size, bool b_must_dmae)
+{
+       u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+       bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u16 i, segment;
+
+       /* Since not all RT entries are initialized, go over the RT and
+        * for each segment of initialized values use DMA.
+        */
+       for (i = 0; i < size; i++) {
+               if (!p_valid[i])
+                       continue;
+
+               /* In case there isn't any wide-bus configuration here,
+                * simply write the data instead of using dmae.
+                */
+               if (!b_must_dmae) {
+                       ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+                       continue;
+               }
+
+               /* Start of a new segment */
+               for (segment = 1; i + segment < size; segment++)
+                       if (!p_valid[i + segment])
+                               break;
+
+               rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                        (osal_uintptr_t) (p_init_val + i),
+                                        addr + (i << 2), segment, 0);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               /* Jump over the entire segment, including invalid entry */
+               i += segment;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+
+       rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                      sizeof(bool) * RUNTIME_ARRAY_SIZE);
+       if (!rt_data->b_valid)
+               return ECORE_NOMEM;
+
+       rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                       sizeof(u32) * RUNTIME_ARRAY_SIZE);
+       if (!rt_data->init_val) {
+               OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
+               return ECORE_NOMEM;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_init_free(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
+}
+
+static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt,
+                                                 u32 addr,
+                                                 u32 dmae_data_offset,
+                                                 u32 size, const u32 *p_buf,
+                                                 bool b_must_dmae,
+                                                 bool b_can_dmae)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Perform DMAE only for lengthy enough sections or for wide-bus */
+#ifndef ASIC_ONLY
+       if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
+           !b_can_dmae || (!b_must_dmae && (size < 16))) {
+#else
+       if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+#endif
+               const u32 *data = p_buf + dmae_data_offset;
+               u32 i;
+
+               for (i = 0; i < size; i++)
+                       ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+       } else {
+               rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                        (osal_uintptr_t) (p_buf +
+                                                          dmae_data_offset),
+                                        addr, size, 0);
+       }
+
+       return rc;
+}
+
+static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                u32 addr, u32 fill,
+                                                u32 fill_count)
+{
+       static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+
+       OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+       return ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                  (osal_uintptr_t) (&(zero_buffer[0])),
+                                  addr, fill_count,
+                                  ECORE_DMAE_FLAG_RW_REPL_SRC);
+}
+
+static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
+                           struct ecore_ptt *p_ptt,
+                           u32 addr, u32 fill, u32 fill_count)
+{
+       u32 i;
+
+       for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+               ecore_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                struct init_write_op *cmd,
+                                                bool b_must_dmae,
+                                                bool b_can_dmae)
+{
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       u32 offset, output_len, input_len, max_size;
+#endif
+       u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       union init_array_hdr *hdr;
+       const u32 *array_data;
+       u32 size, addr, data;
+
+       array_data = p_dev->fw_data->arr_data;
+       data = OSAL_LE32_TO_CPU(cmd->data);
+       addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+       hdr = (union init_array_hdr *)
+               (uintptr_t) (array_data + dmae_array_offset);
+       data = OSAL_LE32_TO_CPU(hdr->raw.data);
+       switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+       case INIT_ARR_ZIPPED:
+#ifdef CONFIG_ECORE_ZIPPED_FW
+               offset = dmae_array_offset + 1;
+               input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+               max_size = MAX_ZIPPED_SIZE * 4;
+               OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
+
+               output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
+                               (u8 *) (uintptr_t) &array_data[offset],
+                               max_size,
+                               (u8 *) p_hwfn->unzip_buf);
+               if (output_len) {
+                       rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+                                                  output_len,
+                                                  p_hwfn->unzip_buf,
+                                                  b_must_dmae, b_can_dmae);
+               } else {
+                       DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
+                       rc = ECORE_INVAL;
+               }
+#else
+               DP_NOTICE(p_hwfn, true,
+                         "Using zipped firmware without config enabled\n");
+               rc = ECORE_INVAL;
+#endif
+               break;
+       case INIT_ARR_PATTERN:
+               {
+                       u32 repeats = GET_FIELD(data,
+                                       INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+                       u32 i;
+
+                       size = GET_FIELD(data,
+                                        INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+                       for (i = 0; i < repeats; i++, addr += size << 2) {
+                               rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+                                                          dmae_array_offset +
+                                                          1, size, array_data,
+                                                          b_must_dmae,
+                                                          b_can_dmae);
+                               if (rc)
+                                       break;
+                       }
+                       break;
+               }
+       case INIT_ARR_STANDARD:
+               size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+               rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
+                                          dmae_array_offset + 1,
+                                          size, array_data,
+                                          b_must_dmae, b_can_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+/* init_ops write command */
+static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             struct init_write_op *p_cmd,
+                                             bool b_can_dmae)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       bool b_must_dmae;
+       u32 addr, data;
+
+       data = OSAL_LE32_TO_CPU(p_cmd->data);
+       b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+       addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+       /* Sanitize */
+       if (b_must_dmae && !b_can_dmae) {
+               DP_NOTICE(p_hwfn, true,
+                         "Need to write to %08x for Wide-bus but DMAE isn't"
+                         " allowed\n",
+                         addr);
+               return ECORE_INVAL;
+       }
+
+       switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+       case INIT_SRC_INLINE:
+               data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
+               ecore_wr(p_hwfn, p_ptt, addr, data);
+               break;
+       case INIT_SRC_ZEROS:
+               data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
+               if (b_must_dmae || (b_can_dmae && (data >= 64)))
+                       rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+               else
+                       ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
+               break;
+       case INIT_SRC_ARRAY:
+               rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
+                                         b_must_dmae, b_can_dmae);
+               break;
+       case INIT_SRC_RUNTIME:
+               ecore_init_rt(p_hwfn, p_ptt, addr,
+                             OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+                             OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+                             b_must_dmae);
+               break;
+       }
+
+       return rc;
+}
+
+static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
+{
+       return (val == expected_val);
+}
+
+static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
+{
+       return (val & expected_val) == expected_val;
+}
+
+static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
+{
+       return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt, struct init_read_op *cmd)
+{
+       bool(*comp_check) (u32 val, u32 expected_val);
+       u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
+       u32 data, addr, poll;
+       int i;
+
+       data = OSAL_LE32_TO_CPU(cmd->op_data);
+       addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+       poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay *= 100;
+#endif
+
+       val = ecore_rd(p_hwfn, p_ptt, addr);
+
+       if (poll == INIT_POLL_NONE)
+               return;
+
+       switch (poll) {
+       case INIT_POLL_EQ:
+               comp_check = comp_eq;
+               break;
+       case INIT_POLL_OR:
+               comp_check = comp_or;
+               break;
+       case INIT_POLL_AND:
+               comp_check = comp_and;
+               break;
+       default:
+               DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+                      cmd->op_data);
+               return;
+       }
+
+       data = OSAL_LE32_TO_CPU(cmd->expected_val);
+       for (i = 0;
+            i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
+               OSAL_UDELAY(delay);
+               val = ecore_rd(p_hwfn, p_ptt, addr);
+       }
+
+       if (i == ECORE_INIT_MAX_POLL_COUNT)
+               DP_ERR(p_hwfn,
+                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
+                      " Got: %08x (comparsion %08x)]\n",
+                      addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
+                      OSAL_LE32_TO_CPU(cmd->op_data));
+}
+
+/* init_ops callbacks entry point */
+static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             struct init_callback_op *p_cmd)
+{
+       DP_NOTICE(p_hwfn, true,
+                 "Currently init values have no need of callbacks\n");
+}
+
+static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
+                                   u16 *p_offset, int modes)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       const u8 *modes_tree_buf;
+       u8 arg1, arg2, tree_val;
+
+       modes_tree_buf = p_dev->fw_data->modes_tree_buf;
+       tree_val = modes_tree_buf[(*p_offset)++];
+       switch (tree_val) {
+       case INIT_MODE_OP_NOT:
+               return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
+       case INIT_MODE_OP_OR:
+               arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               return arg1 | arg2;
+       case INIT_MODE_OP_AND:
+               arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
+               return arg1 & arg2;
+       default:
+               tree_val -= MAX_INIT_MODE_OPS;
+               return (modes & (1 << tree_val)) ? 1 : 0;
+       }
+}
+
+static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
+                              struct init_if_mode_op *p_cmd, int modes)
+{
+       u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
+
+       if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
+               return 0;
+       else
+               return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+                                INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
+                               struct init_if_phase_op *p_cmd,
+                               u32 phase, u32 phase_id)
+{
+       u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+
+       if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+             (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+              GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+               return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
+                                INIT_IF_PHASE_OP_CMD_OFFSET);
+       else
+               return 0;
+}
+
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   int phase, int phase_id, int modes)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 cmd_num, num_init_ops;
+       union init_op *init_ops;
+       bool b_dmae = false;
+
+       num_init_ops = p_dev->fw_data->init_ops_size;
+       init_ops = p_dev->fw_data->init_ops;
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+                                       MAX_ZIPPED_SIZE * 4);
+       if (!p_hwfn->unzip_buf) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
+               return ECORE_NOMEM;
+       }
+#endif
+
+       for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+               union init_op *cmd = &init_ops[cmd_num];
+               u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
+
+               switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+               case INIT_OP_WRITE:
+                       rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+                                              b_dmae);
+                       break;
+
+               case INIT_OP_READ:
+                       ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+                       break;
+
+               case INIT_OP_IF_MODE:
+                       cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
+                                                      modes);
+                       break;
+               case INIT_OP_IF_PHASE:
+                       cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
+                                                       phase, phase_id);
+                       b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
+                       break;
+               case INIT_OP_DELAY:
+                       /* ecore_init_run is always invoked from
+                        * sleep-able context
+                        */
+                       OSAL_UDELAY(cmd->delay.delay);
+                       break;
+
+               case INIT_OP_CALLBACK:
+                       ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+                       break;
+               }
+
+               if (rc)
+                       break;
+       }
+#ifdef CONFIG_ECORE_ZIPPED_FW
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
+#endif
+       return rc;
+}
+
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+{
+       u32 gtt_base;
+       u32 i;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               /* This is done by MFW on ASIC; regardless, this should only
+                * be done once per chip [i.e., common]. Implementation is
+                * not too bright, but it should work on the simple FPGA/EMUL
+                * scenarios.
+                */
+               bool initialized = false; /* @DPDK */
+               int poll_cnt = 500;
+               u32 val;
+
+               /* initialize PTT/GTT (poll for completion) */
+               if (!initialized) {
+                       ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+                                PGLUE_B_REG_START_INIT_PTT_GTT, 1);
+                       initialized = true;
+               }
+
+               do {
+                       /* ptt might be overrided by HW until this is done */
+                       OSAL_UDELAY(10);
+                       ecore_ptt_invalidate(p_hwfn);
+                       val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+                                      PGLUE_B_REG_INIT_DONE_PTT_GTT);
+               } while ((val != 1) && --poll_cnt);
+
+               if (!poll_cnt)
+                       DP_ERR(p_hwfn,
+                              "PGLUE_B_REG_INIT_DONE didn't complete\n");
+       }
+#endif
+
+       /* Set the global windows */
+       gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+       for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
+               if (pxp_global_win[i])
+                       REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+                              pxp_global_win[i]);
+}
+
+enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
+                                       const u8 *data)
+{
+       struct ecore_fw_data *fw = p_dev->fw_data;
+
+#ifdef CONFIG_ECORE_BINARY_FW
+       struct bin_buffer_hdr *buf_hdr;
+       u32 offset, len;
+
+       if (!data) {
+               DP_NOTICE(p_dev, true, "Invalid fw data\n");
+               return ECORE_INVAL;
+       }
+
+       buf_hdr = (struct bin_buffer_hdr *) (uintptr_t) data;
+
+       offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+       fw->fw_ver_info = (struct fw_ver_info *) ((uintptr_t) (data + offset));
+
+       offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+       fw->init_ops = (union init_op *) ((uintptr_t) (data + offset));
+
+       offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+       fw->arr_data = (u32 *) ((uintptr_t) (data + offset));
+
+       offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+       fw->modes_tree_buf = (u8 *) ((uintptr_t) (data + offset));
+       len = buf_hdr[BIN_BUF_INIT_CMD].length;
+       fw->init_ops_size = len / sizeof(struct init_raw_op);
+#else
+       fw->init_ops = (union init_op *)init_ops;
+       fw->arr_data = (u32 *) init_val;
+       fw->modes_tree_buf = (u8 *) modes_tree_buf;
+       fw->init_ops_size = init_ops_size;
+#endif
+
+       return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/ecore/ecore_init_ops.h 
b/drivers/net/qede/ecore/ecore_init_ops.h
new file mode 100644
index 0000000..8a6fce4
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_init_ops.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INIT_OPS__
+#define __ECORE_INIT_OPS__
+
+#include "ecore.h"
+
+/**
+ * @brief ecore_init_iro_array - init iro_arr.
+ *
+ *
+ * @param p_dev
+ */
+void ecore_init_iro_array(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_init_run - Run the init-sequence.
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param phase
+ * @param phase_id
+ * @param modes
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   int phase, int phase_id, int modes);
+
+/**
+ * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ *
+ *
+ * @param p_hwfn
+ *
+ * @return _ecore_status_t
+ */
+enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_hwfn_deallocate
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_clear_rt_data - Clears the runtime init array.
+ *
+ *
+ * @param p_hwfn
+ */
+void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_init_store_rt_reg - Store a configuration value in the RT 
array.
+ *
+ *
+ * @param p_hwfn
+ * @param rt_offset
+ * @param val
+ */
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 
val);
+
+#define STORE_RT_REG(hwfn, offset, val)                                \
+       ecore_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val)                    \
+       ecore_init_store_rt_reg(hwfn, offset, val)
+
+/**
+* @brief
+*
+*
+* @param p_hwfn
+* @param rt_offset
+* @param val
+* @param size
+*/
+
+void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
+                            u32 rt_offset, u32 *val, osal_size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val)                    \
+       ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
+/**
+ * @brief
+ *      Initialize GTT global windows and set admin window
+ *      related params of GTT/PTT to default values.
+ *
+ * @param p_hwfn
+ */
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+#endif /* __ECORE_INIT_OPS__ */
diff --git a/drivers/net/qede/ecore/ecore_int.c 
b/drivers/net/qede/ecore/ecore_int.c
new file mode 100644
index 0000000..09aac90
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_int.c
@@ -0,0 +1,2234 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_init_ops.h"
+#include "ecore_rt_defs.h"
+#include "ecore_int.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+#include "ecore_vf.h"
+#include "ecore_hw_defs.h"
+#include "ecore_hsi_common.h"
+#include "ecore_mcp.h"
+#include "ecore_attn_values.h"
+
+struct ecore_pi_info {
+       ecore_int_comp_cb_t comp_cb;
+       void *cookie;           /* Will be sent to the compl cb function */
+};
+
+struct ecore_sb_sp_info {
+       struct ecore_sb_info sb_info;
+       /* per protocol index data */
+       struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+enum ecore_attention_type {
+       ECORE_ATTN_TYPE_ATTN,
+       ECORE_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+       ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+       char bit_name[30];
+
+#define ATTENTION_PARITY               (1 << 0)
+
+#define ATTENTION_LENGTH_MASK          (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT         (4)
+#define ATTENTION_LENGTH(flags)                (((flags) & 
ATTENTION_LENGTH_MASK) >> \
+                                        ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE               (1 << ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR                  (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT              ((2 << ATTENTION_LENGTH_SHIFT) | \
+                                        ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK          (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT         (12)
+
+#define        ATTENTION_CLEAR_ENABLE          (1 << 28)
+#define        ATTENTION_FW_DUMP               (1 << 29)
+#define        ATTENTION_PANIC_DUMP            (1 << 30)
+       unsigned int flags;
+
+       /* Callback to call if attention will be triggered */
+       enum _ecore_status_t (*cb) (struct ecore_hwfn *p_hwfn);
+
+       enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+       struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS          (8)
+#define NUM_ATTN_REGS          (9)
+
+static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+       u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+       DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
+
+       return ECORE_SUCCESS;
+}
+
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK                (0x3c000)
+#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT       (14)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK                (0x03fc0)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT       (6)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK     (0x00020)
+#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT    (5)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK    (0x0001e)
+#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT   (1)
+#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK     (0x1)
+#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT    (0)
+#define ECORE_PSWHST_ATTENTION_VF_DISABLED             (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS                (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK        (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT       (0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK    (0x1e)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT   (1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK  (0x20)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK     (0x3fc0)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT    (6)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK     (0x3c000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT    (14)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK   (0x3fc0000)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT  (18)
+static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+       u32 tmp =
+           ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                    PSWHST_REG_VF_DISABLED_ERROR_VALID);
+
+       /* Disabled VF access */
+       if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
+               u32 addr, data;
+
+               addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
+               data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               PSWHST_REG_VF_DISABLED_ERROR_DATA);
+               DP_INFO(p_hwfn->p_dev,
+                       "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
+                       " Write [0x%02x] Addr [0x%08x]\n",
+                       (u8) ((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
+                             >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
+                       (u8) ((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
+                             >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
+                       (u8) ((data &
+                              ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
+                             ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
+                       (u8) ((data &
+                              ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
+                             ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
+                       (u8) ((data &
+                              ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
+                             ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
+                       addr);
+       }
+
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      PSWHST_REG_INCORRECT_ACCESS_VALID);
+       if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+               u32 addr, data, length;
+
+               addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+               data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               PSWHST_REG_INCORRECT_ACCESS_DATA);
+               length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+               DP_INFO(p_hwfn->p_dev,
+                       "Incorrect access to %08x of length %08x - PF [%02x]"
+                       " VF [%04x] [valid %02x] client [%02x] write [%02x]"
+                       " Byte-Enable [%04x] [%08x]\n",
+                       addr, length,
+                       (u8) ((data &
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK)
+                     >>
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
+                       (u8) ((data &
+                      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK)
+                             >>
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
+                       (u8) ((data &
+                      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK)
+                             >>
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
+                       (u8) ((data &
+                      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK)
+                             >>
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
+                       (u8) ((data &
+                      ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK)
+                             >>
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
+                       (u8) ((data &
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK)
+                             >>
+                     ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
+                       data);
+       }
+
+       /* TODO - We know 'some' of these are legal due to virtualization,
+        * but is it true for all of them?
+        */
+       return ECORE_SUCCESS;
+}
+
+#define ECORE_GRC_ATTENTION_VALID_BIT          (1 << 0)
+#define ECORE_GRC_ATTENTION_ADDRESS_MASK       (0x7fffff << 0)
+#define ECORE_GRC_ATTENTION_RDWR_BIT           (1 << 23)
+#define ECORE_GRC_ATTENTION_MASTER_MASK                (0xf << 24)
+#define ECORE_GRC_ATTENTION_MASTER_SHIFT       (24)
+#define ECORE_GRC_ATTENTION_PF_MASK            (0xf)
+#define ECORE_GRC_ATTENTION_VF_MASK            (0xff << 4)
+#define ECORE_GRC_ATTENTION_VF_SHIFT           (4)
+#define ECORE_GRC_ATTENTION_PRIV_MASK          (0x3 << 14)
+#define ECORE_GRC_ATTENTION_PRIV_SHIFT         (14)
+#define ECORE_GRC_ATTENTION_PRIV_VF            (0)
+static const char *grc_timeout_attn_master_to_str(u8 master)
+{
+       switch (master) {
+       case 1:
+               return "PXP";
+       case 2:
+               return "MCP";
+       case 3:
+               return "MSDM";
+       case 4:
+               return "PSDM";
+       case 5:
+               return "YSDM";
+       case 6:
+               return "USDM";
+       case 7:
+               return "TSDM";
+       case 8:
+               return "XSDM";
+       case 9:
+               return "DBU";
+       case 10:
+               return "DMAE";
+       default:
+               return "Unkown";
+       }
+}
+
+static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+       u32 tmp, tmp2;
+
+       /* We've already cleared the timeout interrupt register, so we learn
+        * of interrupts via the validity register
+        */
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+       if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+               goto out;
+
+       /* Read the GRC timeout information */
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+       tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                       GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+       DP_INFO(p_hwfn->p_dev,
+               "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
+               " [PF: %02x %s %02x]\n",
+               tmp2, tmp,
+               (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+               (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+               grc_timeout_attn_master_to_str((tmp &
+                                       ECORE_GRC_ATTENTION_MASTER_MASK)
+                                              >>
+                                      ECORE_GRC_ATTENTION_MASTER_SHIFT),
+               (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+               (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+                 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
+                ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+               (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+               ECORE_GRC_ATTENTION_VF_SHIFT);
+
+out:
+       /* Regardles of anything else, clean the validity bit */
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+                GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+       return ECORE_SUCCESS;
+}
+
+#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
+#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
+#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_BME     (1 << 22)
+#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
+static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+       u32 tmp, reg_addr;
+
+       reg_addr =
+           attn_blocks[BLOCK_PGLUE_B].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+           int_regs[0]->mask_addr;
+
+       /* Mask unnecessary attentions - at TBD move to MFW */
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
+       tmp |= (1 << 19);       /* Was PGL_PCIE_ATTN */
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+       if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
+               u32 addr_lo, addr_hi, details;
+
+               addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+               addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+               details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+               DP_INFO(p_hwfn,
+                       "Illegal write by chip to [%08x:%08x] blocked."
+                       "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
+                       " Details2 %08x [Was_error %02x BME deassert %02x"
+                       " FID_enable deassert %02x]\n",
+                       addr_hi, addr_lo, details,
+                       (u8) ((details &
+                              ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+                             ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+                       (u8) ((details &
+                              ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+                             ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+                       (u8) ((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
+                             ? 1 : 0), tmp,
+                       (u8) ((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+                             : 0),
+                       (u8) ((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
+                             0),
+                       (u8) ((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+                             : 0));
+       }
+
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+       if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
+               u32 addr_lo, addr_hi, details;
+
+               addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+               addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+               details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+               DP_INFO(p_hwfn,
+                       "Illegal read by chip from [%08x:%08x] blocked."
+                       " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
+                       " Details2 %08x [Was_error %02x BME deassert %02x"
+                       " FID_enable deassert %02x]\n",
+                       addr_hi, addr_lo, details,
+                       (u8) ((details &
+                              ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+                             ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+                       (u8) ((details &
+                              ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+                             ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+                       (u8) ((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
+                             ? 1 : 0), tmp,
+                       (u8) ((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
+                             : 0),
+                       (u8) ((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
+                             0),
+                       (u8) ((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
+                             : 0));
+       }
+
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+       if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
+               DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp);
+
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+       if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
+               u32 addr_hi, addr_lo;
+
+               addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+               addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+               DP_INFO(p_hwfn, "ICPL eror - %08x [Address %08x:%08x]\n",
+                       tmp, addr_hi, addr_lo);
+       }
+
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                      PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+       if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
+               u32 addr_hi, addr_lo, details;
+
+               addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+               addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+               details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+               DP_INFO(p_hwfn,
+                       "ILT eror - Details %08x Details2 %08x"
+                       " [Address %08x:%08x]\n",
+                       details, tmp, addr_hi, addr_lo);
+       }
+
+       /* Clear the indications */
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+                PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_nig_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+       u32 tmp, reg_addr;
+
+       /* Mask unnecessary attentions - at TBD move to MFW */
+       reg_addr =
+           attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+           int_regs[3]->mask_addr;
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
+       tmp |= (1 << 0);        /* Was 3_P0_TX_PAUSE_TOO_LONG_INT */
+       tmp |= NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT;
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+
+       reg_addr =
+           attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+           int_regs[5]->mask_addr;
+       tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
+       tmp |= (1 << 0);        /* Was 5_P1_TX_PAUSE_TOO_LONG_INT */
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+
+       /* TODO - a bit risky to return success here; But alternative is to
+        * actually read the multitdue of interrupt register of the block.
+        */
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
+{
+       DP_NOTICE(p_hwfn, false, "FW assertion!\n");
+
+       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
+
+       return ECORE_INVAL;
+}
+
+static enum _ecore_status_t
+ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
+{
+       DP_INFO(p_hwfn, "General attention 35!\n");
+
+       return ECORE_SUCCESS;
+}
+
+#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK  (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_SHIFT         (16)
+
+static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+       u32 reason;
+
+       reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
+           ECORE_DORQ_ATTENTION_REASON_MASK;
+       if (reason) {
+               u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                      DORQ_REG_DB_DROP_DETAILS);
+
+               DP_INFO(p_hwfn->p_dev,
+                       "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x"
+                       " Size [bytes] 0x%08x Reason: 0x%08x\n",
+                       ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                DORQ_REG_DB_DROP_DETAILS_ADDRESS),
+                       (u16) (details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
+                       ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
+                        ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
+       }
+
+       return ECORE_INVAL;
+}
+
+static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
+               u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                  TM_REG_INT_STS_1);
+
+               if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
+                           TM_REG_INT_STS_1_PEND_CONN_SCAN))
+                       return ECORE_INVAL;
+
+               if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
+                          TM_REG_INT_STS_1_PEND_CONN_SCAN))
+                       DP_INFO(p_hwfn,
+                               "TM attention on emulation - most likely"
+                               " results of clock-ratios\n");
+               val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
+               val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
+                   TM_REG_INT_MASK_1_PEND_TASK_SCAN;
+               ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
+
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       return ECORE_INVAL;
+}
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+       {
+        {                      /* After Invert 1 */
+         {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          MAX_BLOCK_ID},
+         }
+        },
+
+       {
+        {                      /* After Invert 2 */
+         {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+          BLOCK_PGLUE_B},
+         {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"SW timers #%d",
+          (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
+          OSAL_NULL, MAX_BLOCK_ID},
+         {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          BLOCK_PGLCS},
+         }
+        },
+
+       {
+        {                      /* After Invert 3 */
+         {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          MAX_BLOCK_ID},
+         }
+        },
+
+       {
+        {                      /* After Invert 4 */
+         {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+          ecore_fw_assertion, MAX_BLOCK_ID},
+         {"General Attention %d",
+          (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
+          OSAL_NULL, MAX_BLOCK_ID},
+         {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
+          ecore_general_attention_35, MAX_BLOCK_ID},
+         {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          BLOCK_CNIG},
+         {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
+         {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+         {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+         {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          MAX_BLOCK_ID},
+         {"NIG", ATTENTION_PAR_INT, ecore_nig_attn_cb, BLOCK_NIG},
+         {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
+         {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
+         {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
+         {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
+         }
+        },
+
+       {
+        {                      /* After Invert 5 */
+         {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
+         {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
+         {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
+         {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
+         {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
+         {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
+         {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
+         {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
+         {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
+         {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
+         {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
+         {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
+         {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
+         {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
+         {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
+         {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
+         }
+        },
+
+       {
+        {                      /* After Invert 6 */
+         {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
+         {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
+         {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
+         {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
+         {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
+         {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
+         {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
+         {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
+         {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
+         {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
+         {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
+         {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
+         {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
+         {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
+         {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
+         {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
+         }
+        },
+
+       {
+        {                      /* After Invert 7 */
+         {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
+         {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
+         {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
+         {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
+         {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
+         {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
+         {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
+         {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
+         {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
+         {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
+         {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
+         {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
+         {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
+         {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
+         {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
+         {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+         {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
+         }
+        },
+
+       {
+        {                      /* After Invert 8 */
+         {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
+         {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
+         {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
+         {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
+         {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
+         {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
+         {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
+         {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
+         {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
+         {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
+         {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
+         {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
+         {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
+         {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          MAX_BLOCK_ID},
+         }
+        },
+
+       {
+        {                      /* After Invert 9 */
+         {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
+          MAX_BLOCK_ID},
+         {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
+         {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
+          MAX_BLOCK_ID},
+         }
+        },
+
+};
+
+#define ATTN_STATE_BITS                (0xfff)
+#define ATTN_BITS_MASKABLE     (0x3ff)
+struct ecore_sb_attn_info {
+       /* Virtual & Physical address of the SB */
+       struct atten_status_block *sb_attn;
+       dma_addr_t sb_phys;
+
+       /* Last seen running index */
+       u16 index;
+
+       /* A mask of the AEU bits resulting in a parity error */
+       u32 parity_mask[NUM_ATTN_REGS];
+
+       /* A pointer to the attention description structure */
+       struct aeu_invert_reg *p_aeu_desc;
+
+       /* Previously asserted attentions, which are still unasserted */
+       u16 known_attn;
+
+       /* Cleanup address for the link's general hw attention */
+       u32 mfw_attn_addr;
+};
+
+static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
+                                struct ecore_sb_attn_info *p_sb_desc)
+{
+       u16 rc = 0, index;
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
+       if (p_sb_desc->index != index) {
+               p_sb_desc->index = index;
+               rc = ECORE_SB_ATT_IDX;
+       }
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       return rc;
+}
+
+/**
+ * @brief ecore_int_assertion - handles asserted attention bits
+ *
+ * @param p_hwfn
+ * @param asserted_bits newly asserted bits
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
+                                               u16 asserted_bits)
+{
+       struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+       u32 igu_mask;
+
+       /* Mask the source of the attention in the IGU */
+       igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                           IGU_REG_ATTENTION_ENABLE);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+                  igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+       igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "inner known ATTN state: 0x%04x --> 0x%04x\n",
+                  sb_attn_sw->known_attn,
+                  sb_attn_sw->known_attn | asserted_bits);
+       sb_attn_sw->known_attn |= asserted_bits;
+
+       /* Handle MCP events */
+       if (asserted_bits & 0x100) {
+               ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+               /* Clean the MCP attention */
+               ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+                        sb_attn_sw->mfw_attn_addr, 0);
+       }
+
+       /* FIXME - this will change once we'll have GOOD gtt definitions */
+       DIRECT_REG_WR(p_hwfn,
+                     (u8 OSAL_IOMEM *) p_hwfn->regview +
+                     GTT_BAR0_MAP_REG_IGU_CMD +
+                     ((IGU_CMD_ATTN_BIT_SET_UPPER -
+                       IGU_CMD_INT_ACK_BASE) << 3), (u32) asserted_bits);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
+                  asserted_bits);
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
+                                           struct attn_hw_reg *p_reg_desc,
+                                           struct attn_hw_block *p_block,
+                                           enum ecore_attention_type type,
+                                           u32 val, u32 mask)
+{
+       int j;
+#ifdef ATTN_DESC
+       const char **description;
+
+       if (type == ECORE_ATTN_TYPE_ATTN)
+               description = p_block->int_desc;
+       else
+               description = p_block->prty_desc;
+#endif
+
+       for (j = 0; j < p_reg_desc->num_of_bits; j++) {
+               if (val & (1 << j)) {
+#ifdef ATTN_DESC
+                       DP_NOTICE(p_hwfn, false,
+                                 "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n",
+                                 p_block->name,
+                                 type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
+                                 "Parity",
+                                 description[p_reg_desc->bit_attn_idx[j]],
+                                 p_reg_desc->reg_idx,
+                                 p_reg_desc->sts_addr, j,
+                                 (mask & (1 << j)) ? " [MASKED]" : "");
+#else
+                       DP_NOTICE(p_hwfn->p_dev, false,
+                                 "%s (%s): [reg %d [0x%08x], bit %d]%s\n",
+                                 p_block->name,
+                                 type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
+                                 "Parity",
+                                 p_reg_desc->reg_idx,
+                                 p_reg_desc->sts_addr, j,
+                                 (mask & (1 << j)) ? " [MASKED]" : "");
+#endif
+               }
+       }
+}
+
+/**
+ * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
+ * cause of the attention
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the attention
+ * @param aeu_en_reg - register offset of the AEU enable reg. which configured
+ *  this bit to this group.
+ * @param bit_index - index of this bit in the aeu_en_reg
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
+                             struct aeu_invert_reg_bit *p_aeu,
+                             u32 aeu_en_reg, u32 bitmask)
+{
+       enum _ecore_status_t rc = ECORE_INVAL;
+       u32 val, mask;
+
+#ifndef REMOVE_DBG
+       u32 interrupts[20];     /* TODO- change into HSI define once supplied */
+
+       OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20);   /* FIXME real size) */
+#endif
+
+       DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+               p_aeu->bit_name, bitmask);
+
+       /* Call callback before clearing the interrupt status */
+       if (p_aeu->cb) {
+               DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+                       p_aeu->bit_name);
+               rc = p_aeu->cb(p_hwfn);
+       }
+
+       /* Handle HW block interrupt registers */
+       if (p_aeu->block_index != MAX_BLOCK_ID) {
+               u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
+               struct attn_hw_block *p_block;
+               int i;
+
+               p_block = &attn_blocks[p_aeu->block_index];
+
+               /* Handle each interrupt register */
+               for (i = 0;
+                    i < p_block->chip_regs[chip_type].num_of_int_regs; i++) {
+                       struct attn_hw_reg *p_reg_desc;
+                       u32 sts_addr;
+
+                       p_reg_desc = p_block->chip_regs[chip_type].int_regs[i];
+
+                       /* In case of fatal attention, don't clear the status
+                        * so it would appear in idle check.
+                        */
+                       if (rc == ECORE_SUCCESS)
+                               sts_addr = p_reg_desc->sts_clr_addr;
+                       else
+                               sts_addr = p_reg_desc->sts_addr;
+
+                       val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
+                       mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                       p_reg_desc->mask_addr);
+                       ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+                                                       p_block,
+                                                       ECORE_ATTN_TYPE_ATTN,
+                                                       val, mask);
+
+#ifndef REMOVE_DBG
+                       interrupts[i] = val;
+#endif
+               }
+       }
+
+       /* Reach assertion if attention is fatal */
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
+                         p_aeu->bit_name);
+
+               ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+       }
+
+       /* Prevent this Attention from being asserted in the future */
+       if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) {
+               u32 val;
+               u32 mask = ~bitmask;
+               val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+               ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
+               DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+                       p_aeu->bit_name);
+       }
+
+       if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) {
+               /* @@@TODO - what to dump? <yuvalmin 04/02/13> */
+               DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n",
+                      p_aeu->bit_name);
+               return ECORE_NOTIMPL;
+       }
+
+       return rc;
+}
+
+static void ecore_int_parity_print(struct ecore_hwfn *p_hwfn,
+                                  struct aeu_invert_reg_bit *p_aeu,
+                                  struct attn_hw_block *p_block, u8 bit_index)
+{
+       u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
+       int i;
+
+       for (i = 0; i < p_block->chip_regs[chip_type].num_of_prty_regs; i++) {
+               struct attn_hw_reg *p_reg_desc;
+               u32 val, mask;
+
+               p_reg_desc = p_block->chip_regs[chip_type].prty_regs[i];
+
+               val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                              p_reg_desc->sts_clr_addr);
+               mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                               p_reg_desc->mask_addr);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "%s[%d] - parity register[%d] is %08x [mask is 
%08x]\n",
+                          p_aeu->bit_name, bit_index, i, val, mask);
+               ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
+                                               p_block,
+                                               ECORE_ATTN_TYPE_PARITY,
+                                               val, mask);
+       }
+}
+
+/**
+ * @brief ecore_int_deassertion_parity - handle a single parity AEU source
+ *
+ * @param p_hwfn
+ * @param p_aeu - descriptor of an AEU bit which caused the
+ *              parity
+ * @param bit_index
+ */
+static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
+                                        struct aeu_invert_reg_bit *p_aeu,
+                                        u8 bit_index)
+{
+       u32 block_id = p_aeu->block_index;
+
+       DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
+               p_aeu->bit_name, bit_index);
+
+       if (block_id != MAX_BLOCK_ID) {
+               ecore_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
+                                      bit_index);
+
+               /* In A0, there's a single parity bit for several blocks */
+               if (block_id == BLOCK_BTB) {
+                       ecore_int_parity_print(p_hwfn, p_aeu,
+                                              &attn_blocks[BLOCK_OPTE],
+                                              bit_index);
+                       ecore_int_parity_print(p_hwfn, p_aeu,
+                                              &attn_blocks[BLOCK_MCP],
+                                              bit_index);
+               }
+       }
+}
+
+/**
+ * @brief - handles deassertion of previously asserted attentions.
+ *
+ * @param p_hwfn
+ * @param deasserted_bits - newly deasserted bits
+ * @return enum _ecore_status_t
+ *
+ */
+static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
+                                                 u16 deasserted_bits)
+{
+       struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+       u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
+       bool b_parity = false;
+       u8 i, j, k, bit_idx;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* Read the attention registers in the AEU */
+       for (i = 0; i < NUM_ATTN_REGS; i++) {
+               aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                         MISC_REG_AEU_AFTER_INVERT_1_IGU +
+                                         i * 0x4);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
+       }
+
+       /* Handle parity attentions first */
+       for (i = 0; i < NUM_ATTN_REGS; i++) {
+               struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+               u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                                 MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+                                 i * sizeof(u32));
+
+               u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+
+               /* Skip register in which no parity bit is currently set */
+               if (!parities)
+                       continue;
+
+               for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+                       struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+                       if ((p_bit->flags & ATTENTION_PARITY) &&
+                           !!(parities & (1 << bit_idx))) {
+                               ecore_int_deassertion_parity(p_hwfn, p_bit,
+                                                            bit_idx);
+                               b_parity = true;
+                       }
+
+                       bit_idx += ATTENTION_LENGTH(p_bit->flags);
+               }
+       }
+
+       /* Find non-parity cause for attention and act */
+       for (k = 0; k < MAX_ATTN_GRPS; k++) {
+               struct aeu_invert_reg_bit *p_aeu;
+
+               /* Handle only groups whose attention is currently deasserted */
+               if (!(deasserted_bits & (1 << k)))
+                       continue;
+
+               for (i = 0; i < NUM_ATTN_REGS; i++) {
+                       u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+                           i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
+                       u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+                       u32 bits = aeu_inv_arr[i] & en;
+
+                       /* Skip if no bit from this group is currently set */
+                       if (!bits)
+                               continue;
+
+                       /* Find all set bits from current register which belong
+                        * to current group, making them responsible for the
+                        * previous assertion.
+                        */
+                       for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+                               u8 bit, bit_len;
+                               u32 bitmask;
+
+                               p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+
+                               /* No need to handle attention-only bits */
+                               if (p_aeu->flags == ATTENTION_PAR)
+                                       continue;
+
+                               bit = bit_idx;
+                               bit_len = ATTENTION_LENGTH(p_aeu->flags);
+                               if (p_aeu->flags & ATTENTION_PAR_INT) {
+                                       /* Skip Parity */
+                                       bit++;
+                                       bit_len--;
+                               }
+
+                               bitmask = bits & (((1 << bit_len) - 1) << bit);
+                               if (bitmask) {
+                                       /* Handle source of the attention */
+                                       ecore_int_deassertion_aeu_bit(p_hwfn,
+                                                                     p_aeu,
+                                                                     aeu_en,
+                                                                     bitmask);
+                               }
+
+                               bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+                       }
+               }
+       }
+
+       /* Clear IGU indication for the deasserted bits */
+       /* FIXME - this will change once we'll have GOOD gtt definitions */
+       DIRECT_REG_WR(p_hwfn,
+                     (u8 OSAL_IOMEM *) p_hwfn->regview +
+                     GTT_BAR0_MAP_REG_IGU_CMD +
+                     ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+                       IGU_CMD_INT_ACK_BASE) << 3), ~((u32) deasserted_bits));
+
+       /* Unmask deasserted attentions in IGU */
+       aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+                           IGU_REG_ATTENTION_ENABLE);
+       aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+       ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+       /* Clear deassertion from inner state */
+       sb_attn_sw->known_attn &= ~deasserted_bits;
+
+       return rc;
+}
+
+static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+       struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+       u16 index = 0, asserted_bits, deasserted_bits;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 attn_bits = 0, attn_acks = 0;
+
+       /* Read current attention bits/acks - safeguard against attentions
+        * by guaranting work on a synchronized timeframe
+        */
+       do {
+               index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
+               attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
+               attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
+       } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
+       p_sb_attn->sb_index = index;
+
+       /* Attention / Deassertion are meaningful (and in correct state)
+        * only when they differ and consistent with known state - deassertion
+        * when previous attention & current ack, and assertion when current
+        * attention with no previous attention
+        */
+       asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+           ~p_sb_attn_sw->known_attn;
+       deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+           p_sb_attn_sw->known_attn;
+
+       if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
+               DP_INFO(p_hwfn,
+                       "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, 
asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+                       index, attn_bits, attn_acks, asserted_bits,
+                       deasserted_bits, p_sb_attn_sw->known_attn);
+       else if (asserted_bits == 0x100)
+               DP_INFO(p_hwfn, "MFW indication via attention\n");
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "MFW indication [deassertion]\n");
+
+       if (asserted_bits) {
+               rc = ecore_int_assertion(p_hwfn, asserted_bits);
+               if (rc)
+                       return rc;
+       }
+
+       if (deasserted_bits)
+               rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
+
+       return rc;
+}
+
+static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
+                             void OSAL_IOMEM *igu_addr, u32 ack_cons)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+           ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+            (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+            (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+            (IGU_SEG_ACCESS_ATTN <<
+             IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+       DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
+
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       OSAL_MMIOWB(p_hwfn->p_dev);
+       OSAL_BARRIER(p_hwfn->p_dev);
+}
+
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
+{
+       struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+       struct ecore_pi_info *pi_info = OSAL_NULL;
+       struct ecore_sb_attn_info *sb_attn;
+       struct ecore_sb_info *sb_info;
+       static int arr_size;
+       u16 rc = 0;
+
+       if (!p_hwfn) {
+               DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sp_sb) {
+               DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
+               return;
+       }
+
+       sb_info = &p_hwfn->p_sp_sb->sb_info;
+       arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+       if (!sb_info) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Status block is NULL - cannot ack interrupts\n");
+               return;
+       }
+
+       if (!p_hwfn->p_sb_attn) {
+               DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
+               return;
+       }
+       sb_attn = p_hwfn->p_sb_attn;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+                  p_hwfn, p_hwfn->my_id);
+
+       /* Disable ack for def status block. Required both for msix +
+        * inta in non-mask mode, in inta does no harm.
+        */
+       ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+       /* Gather Interrupts/Attentions information */
+       if (!sb_info->sb_virt) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Interrupt Status block is NULL -"
+                      " cannot check for new interrupts!\n");
+       } else {
+               u32 tmp_index = sb_info->sb_ack;
+               rc = ecore_sb_update_sb_idx(sb_info);
+               DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+                          "Interrupt indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_info->sb_ack);
+       }
+
+       if (!sb_attn || !sb_attn->sb_attn) {
+               DP_ERR(p_hwfn->p_dev,
+                      "Attentions Status block is NULL -"
+                      " cannot check for new attentions!\n");
+       } else {
+               u16 tmp_index = sb_attn->index;
+
+               rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
+               DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
+                          "Attention indices: 0x%08x --> 0x%08x\n",
+                          tmp_index, sb_attn->index);
+       }
+
+       /* Check if we expect interrupts at this time. if not just ack them */
+       if (!(rc & ECORE_SB_EVENT_MASK)) {
+               ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+       if (!p_hwfn->p_dpc_ptt) {
+               DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
+               ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+               return;
+       }
+
+       if (rc & ECORE_SB_ATT_IDX)
+               ecore_int_attentions(p_hwfn);
+
+       if (rc & ECORE_SB_IDX) {
+               int pi;
+
+               /* Since we only looked at the SB index, it's possible more
+                * than a single protocol-index on the SB incremented.
+                * Iterate over all configured protocol indices and check
+                * whether something happened for each.
+                */
+               for (pi = 0; pi < arr_size; pi++) {
+                       pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+                       if (pi_info->comp_cb != OSAL_NULL)
+                               pi_info->comp_cb(p_hwfn, pi_info->cookie);
+               }
+       }
+
+       if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
+               /* This should be done before the interrupts are enabled,
+                * since otherwise a new attention will be generated.
+                */
+               ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+       }
+
+       ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+       if (!p_sb)
+               return;
+
+       if (p_sb->sb_attn) {
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
+                                      p_sb->sb_phys,
+                                      SB_ATTN_ALIGNED_SIZE(p_hwfn));
+       }
+       OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+       OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+       sb_info->index = 0;
+       sb_info->known_attn = 0;
+
+       /* Configure Attention Status Block in IGU */
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+                DMA_LO(p_hwfn->p_sb_attn->sb_phys));
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+                DMA_HI(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  void *sb_virt_addr, dma_addr_t sb_phy_addr)
+{
+       struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+       int i, j, k;
+
+       sb_info->sb_attn = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       /* Set the pointer to the AEU descriptors */
+       sb_info->p_aeu_desc = aeu_descs;
+
+       /* Calculate Parity Masks */
+       OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+       for (i = 0; i < NUM_ATTN_REGS; i++) {
+               /* j is array index, k is bit index */
+               for (j = 0, k = 0; k < 32; j++) {
+                       unsigned int flags = aeu_descs[i].bits[j].flags;
+
+                       if (flags & ATTENTION_PARITY)
+                               sb_info->parity_mask[i] |= 1 << k;
+
+                       k += ATTENTION_LENGTH(flags);
+               }
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "Attn Mask [Reg %d]: 0x%08x\n",
+                          i, sb_info->parity_mask[i]);
+       }
+
+       /* Set the address of cleanup for the mcp attention */
+       sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+           MISC_REG_AEU_GENERAL_ATTN_0;
+
+       ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       struct ecore_sb_attn_info *p_sb;
+       dma_addr_t p_phys = 0;
+       void *p_virt;
+
+       /* SB struct */
+       p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info));
+       if (!p_sb) {
+               DP_NOTICE(p_dev, true,
+                         "Failed to allocate `struct ecore_sb_attn_info'");
+               return ECORE_NOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
+                                        SB_ATTN_ALIGNED_SIZE(p_hwfn));
+       if (!p_virt) {
+               DP_NOTICE(p_dev, true,
+                         "Failed to allocate status block (attentions)");
+               OSAL_FREE(p_dev, p_sb);
+               return ECORE_NOMEM;
+       }
+
+       /* Attention setup */
+       p_hwfn->p_sb_attn = p_sb;
+       ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+       return ECORE_SUCCESS;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#ifdef RTE_LIBRTE_QEDE_RX_COAL_US
+#define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
+#else
+#define ECORE_CAU_DEF_RX_USECS 24
+#endif
+
+#ifdef RTE_LIBRTE_QEDE_TX_COAL_US
+#define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
+#else
+#define ECORE_CAU_DEF_TX_USECS 48
+#endif
+
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+                            struct cau_sb_entry *p_sb_entry,
+                            u8 pf_id, u16 vf_number, u8 vf_valid)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       u32 cau_state;
+
+       OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+       /* setting the time resultion to a fixed value ( = 1) */
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
+                 ECORE_CAU_DEF_RX_TIMER_RES);
+       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
+                 ECORE_CAU_DEF_TX_TIMER_RES);
+
+       cau_state = CAU_HC_DISABLE_STATE;
+
+       if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+               cau_state = CAU_HC_ENABLE_STATE;
+               if (!p_dev->rx_coalesce_usecs) {
+                       p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
+                       DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
+                               p_dev->rx_coalesce_usecs);
+               }
+               if (!p_dev->tx_coalesce_usecs) {
+                       p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
+                       DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
+                               p_dev->tx_coalesce_usecs);
+               }
+       }
+
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
+       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+}
+
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          dma_addr_t sb_phys, u16 igu_sb_id,
+                          u16 vf_number, u8 vf_valid)
+{
+       struct cau_sb_entry sb_entry;
+
+       ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+                               vf_number, vf_valid);
+
+       if (p_hwfn->hw_init_done) {
+               /* Wide-bus, initialize via DMAE */
+               u64 phys_addr = (u64) sb_phys;
+
+               ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                   (u64) (osal_uintptr_t) &phys_addr,
+                                   CAU_REG_SB_ADDR_MEMORY +
+                                   igu_sb_id * sizeof(u64), 2, 0);
+               ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                   (u64) (osal_uintptr_t) &sb_entry,
+                                   CAU_REG_SB_VAR_MEMORY +
+                                   igu_sb_id * sizeof(u64), 2, 0);
+       } else {
+               /* Initialize Status Block Address */
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2, sb_phys);
+
+               STORE_RT_REG_AGG(p_hwfn,
+                                CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+                                igu_sb_id * 2, sb_entry);
+       }
+
+       /* Configure pi coalescing if set */
+       if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
+               u8 num_tc = 1;  /* @@@TBD aelior ECORE_MULTI_COS */
+               u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
+                   (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+               u8 i;
+
+               ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+                                     ECORE_COAL_RX_STATE_MACHINE, timeset);
+
+               timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
+                   (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+
+               for (i = 0; i < num_tc; i++) {
+                       ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+                                             igu_sb_id, TX_PI(i),
+                                             ECORE_COAL_TX_STATE_MACHINE,
+                                             timeset);
+               }
+       }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          u16 igu_sb_id, u32 pi_index,
+                          enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
+{
+       struct cau_pi_entry pi_entry;
+       u32 sb_offset, pi_offset;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return;         /* @@@TBD MichalK- VF CAU... */
+
+       sb_offset = igu_sb_id * PIS_PER_SB;
+       OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+       SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+       if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+       else
+               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+       pi_offset = sb_offset + pi_index;
+       if (p_hwfn->hw_init_done) {
+               ecore_wr(p_hwfn, p_ptt,
+                        CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+                        *((u32 *) &(pi_entry)));
+       } else {
+               STORE_RT_REG(p_hwfn,
+                            CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+                            *((u32 *) &(pi_entry)));
+       }
+
+}
+
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
+{
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       if (IS_PF(p_hwfn->p_dev))
+               ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+                                     sb_info->igu_sb_id, 0, 0);
+}
+
+/**
+ * @brief ecore_get_igu_sb_id - given a sw sb_id return the
+ *        igu_sb_id
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return u16
+ */
+static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+       u16 igu_sb_id;
+
+       /* Assuming continuous set of IGU SBs dedicated for given PF */
+       if (sb_id == ECORE_SP_SB_ID)
+               igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       else if (IS_PF(p_hwfn->p_dev))
+               igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+       else
+               igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
+
+       if (sb_id == ECORE_SP_SB_ID)
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                          "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
+
+       return igu_sb_id;
+}
+
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_sb_info *sb_info,
+                                      void *sb_virt_addr,
+                                      dma_addr_t sb_phy_addr, u16 sb_id)
+{
+       sb_info->sb_virt = sb_virt_addr;
+       sb_info->sb_phys = sb_phy_addr;
+
+       sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+
+       if (sb_id != ECORE_SP_SB_ID) {
+               p_hwfn->sbs_info[sb_id] = sb_info;
+               p_hwfn->num_sbs++;
+       }
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+       sb_info->p_hwfn = p_hwfn;
+#endif
+       sb_info->p_dev = p_hwfn->p_dev;
+
+       /* The igu address will hold the absolute address that needs to be
+        * written to for a specific status block
+        */
+       if (IS_PF(p_hwfn->p_dev)) {
+               sb_info->igu_addr = (u8 OSAL_IOMEM *) p_hwfn->regview +
+                   GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
+
+       } else {
+               sb_info->igu_addr =
+                   (u8 OSAL_IOMEM *) p_hwfn->regview +
+                   PXP_VF_BAR0_START_IGU +
+                   ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
+       }
+
+       sb_info->flags |= ECORE_SB_INFO_INIT;
+
+       ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_sb_info *sb_info,
+                                         u16 sb_id)
+{
+       if (sb_id == ECORE_SP_SB_ID) {
+               DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+               return ECORE_INVAL;
+       }
+
+       /* zero status block and ack counter */
+       sb_info->sb_ack = 0;
+       OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+       if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
+               p_hwfn->sbs_info[sb_id] = OSAL_NULL;
+               p_hwfn->num_sbs--;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+       if (!p_sb)
+               return;
+
+       if (p_sb->sb_info.sb_virt) {
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_sb->sb_info.sb_virt,
+                                      p_sb->sb_info.sb_phys,
+                                      SB_ALIGNED_SIZE(p_hwfn));
+       }
+
+       OSAL_FREE(p_hwfn->p_dev, p_sb);
+}
+
+static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt)
+{
+       struct ecore_sb_sp_info *p_sb;
+       dma_addr_t p_phys = 0;
+       void *p_virt;
+
+       /* SB struct */
+       p_sb =
+           OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                      sizeof(struct ecore_sb_sp_info));
+       if (!p_sb) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_sb_info'");
+               return ECORE_NOMEM;
+       }
+
+       /* SB ring  */
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                        &p_phys, SB_ALIGNED_SIZE(p_hwfn));
+       if (!p_virt) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
+               OSAL_FREE(p_hwfn->p_dev, p_sb);
+               return ECORE_NOMEM;
+       }
+
+       /* Status Block setup */
+       p_hwfn->p_sp_sb = p_sb;
+       ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
+                         p_virt, p_phys, ECORE_SP_SB_ID);
+
+       OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+                                          ecore_int_comp_cb_t comp_cb,
+                                          void *cookie,
+                                          u8 *sb_idx, __le16 **p_fw_cons)
+{
+       struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+       enum _ecore_status_t rc = ECORE_NOMEM;
+       u8 pi;
+
+       /* Look for a free index */
+       for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+               if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
+                       continue;
+
+               p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+               p_sp_sb->pi_info_arr[pi].cookie = cookie;
+               *sb_idx = pi;
+               *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+               rc = ECORE_SUCCESS;
+               break;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
+{
+       struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+       if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
+               return ECORE_NOMEM;
+
+       p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
+       p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
+       return ECORE_SUCCESS;
+}
+
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
+{
+       return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             enum ecore_int_mode int_mode)
+{
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
+       else
+#endif
+               igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
+
+       p_hwfn->p_dev->int_mode = int_mode;
+       switch (p_hwfn->p_dev->int_mode) {
+       case ECORE_INT_MODE_INTA:
+               igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case ECORE_INT_MODE_MSI:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+               break;
+
+       case ECORE_INT_MODE_MSIX:
+               igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+               break;
+       case ECORE_INT_MODE_POLL:
+               break;
+       }
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn,
+                       "FPGA - Don't enable Attentions in IGU and MISC\n");
+               return;
+       }
+#endif
+
+       /* Configure AEU signal change to produce attentions */
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       /* Unmask AEU signals toward IGU */
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+enum _ecore_status_t
+ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                    enum ecore_int_mode int_mode)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 tmp, reg_addr;
+
+       /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
+       tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+       tmp |= 0xf;
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+       /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
+        * attentions. Since we're waiting for BRCM answer regarding this
+        * attention, in the meanwhile we simply mask it.
+        */
+       tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+       tmp &= ~0x800;
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+
+       /* @@@tmp - Mask interrupt sources - should move to init tool;
+        * Also, correct for A0 [might still change in B0.
+        */
+       reg_addr =
+           attn_blocks[BLOCK_BRB].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
+           int_regs[0]->mask_addr;
+       tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
+       tmp |= (1 << 21);       /* Was PKT4_LEN_ERROR */
+       ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
+
+       ecore_int_igu_enable_attn(p_hwfn, p_ptt);
+
+       if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+               rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
+               if (rc != ECORE_SUCCESS) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Slowpath IRQ request failed\n");
+                       return ECORE_NORESOURCES;
+               }
+               p_hwfn->b_int_requested = true;
+       }
+
+       /* Enable interrupt Generation */
+       ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+
+       p_hwfn->b_int_enabled = 1;
+
+       return rc;
+}
+
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt)
+{
+       p_hwfn->b_int_enabled = 0;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return;
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH               (1000)
+void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             u32 sb_id, bool cleanup_set, u16 opaque_fid)
+{
+       u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+       u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+       u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+       u8 type = 0;            /* FIXME MichalS type??? */
+
+       OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
+                          IGU_REG_CLEANUP_STATUS_0) != 0x200);
+
+       /* USE Control Command Register to perform cleanup. There is an
+        * option to do this using IGU bar, but then it can't be used for VFs.
+        */
+
+       /* Set the data field */
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+       SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
+       SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+       /* Set the control register */
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+       SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+       OSAL_BARRIER(p_hwfn->p_dev);
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       /* calculate where to read the status bit from */
+       sb_bit = 1 << (sb_id % 32);
+       sb_bit_addr = sb_id / 32 * sizeof(u32);
+
+       sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
+
+       /* Now wait for the command to complete */
+       while (--sleep_cnt) {
+               val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
+               if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+                       break;
+               OSAL_MSLEEP(5);
+       }
+
+       if (!sleep_cnt)
+               DP_NOTICE(p_hwfn, true,
+                         "Timeout waiting for clear status 0x%08x [for sb 
%d]\n",
+                         val, sb_id);
+}
+
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      u32 sb_id, u16 opaque, bool b_set)
+{
+       int pi;
+
+       /* Set */
+       if (b_set)
+               ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+
+       /* Clear */
+       ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+
+       /* Clear the CAU for the SB */
+       for (pi = 0; pi < 12; pi++)
+               ecore_wr(p_hwfn, p_ptt,
+                        CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+}
+
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               bool b_set, bool b_slowpath)
+{
+       u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
+       u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
+       u32 sb_id = 0, val = 0;
+
+       /* @@@TBD MichalK temporary... should be moved to init-tool... */
+       val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+       val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+       val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+       /* end temporary */
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU cleaning SBs [%d,...,%d]\n",
+                  igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+
+       for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
+               ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                                 p_hwfn->hw_info.opaque_fid,
+                                                 b_set);
+
+       if (!b_slowpath)
+               return;
+
+       sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU cleaning slowpath SB [%d]\n", sb_id);
+       ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+                                         p_hwfn->hw_info.opaque_fid, b_set);
+}
+
+static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, u16 sb_id)
+{
+       u32 val = ecore_rd(p_hwfn, p_ptt,
+                          IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+       struct ecore_igu_block *p_block;
+
+       p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+
+       /* stop scanning when hit first invalid PF entry */
+       if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+           GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+               goto out;
+
+       /* Fill the block information */
+       p_block->status = ECORE_IGU_STATUS_VALID;
+       p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+       p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+       p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
+                  " is_pf = %d vector_num = 0x%x\n",
+                  sb_id, val, p_block->function_id, p_block->is_pf,
+                  p_block->vector_number);
+
+out:
+       return val;
+}
+
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt)
+{
+       struct ecore_igu_info *p_igu_info;
+       struct ecore_igu_block *p_block;
+       u16 sb_id, last_iov_sb_id = 0;
+       u32 min_vf, max_vf, val;
+       u16 prev_sb_id = 0xFF;
+
+       p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
+                                               GFP_KERNEL,
+                                               sizeof(*p_igu_info));
+       if (!p_hwfn->hw_info.p_igu_info)
+               return ECORE_NOMEM;
+
+       OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
+
+       p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Initialize base sb / sb cnt for PFs and VFs */
+       p_igu_info->igu_base_sb = 0xffff;
+       p_igu_info->igu_sb_cnt = 0;
+       p_igu_info->igu_dsb_id = 0xffff;
+       p_igu_info->igu_base_sb_iov = 0xffff;
+
+#ifdef CONFIG_ECORE_SRIOV
+       min_vf = p_hwfn->hw_info.first_vf_in_pf;
+       max_vf = p_hwfn->hw_info.first_vf_in_pf +
+           p_hwfn->p_dev->sriov_info.total_vfs;
+#else
+       min_vf = 0;
+       max_vf = 0;
+#endif
+
+       for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+            sb_id++) {
+
+               p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+               val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
+               if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
+                   GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
+                       break;
+
+               if (p_block->is_pf) {
+                       if (p_block->function_id == p_hwfn->rel_pf_id) {
+                               p_block->status |= ECORE_IGU_STATUS_PF;
+
+                               if (p_block->vector_number == 0) {
+                                       if (p_igu_info->igu_dsb_id == 0xffff)
+                                               p_igu_info->igu_dsb_id = sb_id;
+                               } else {
+                                       if (p_igu_info->igu_base_sb == 0xffff) {
+                                               p_igu_info->igu_base_sb = sb_id;
+                                       } else if (prev_sb_id != sb_id - 1) {
+                                               DP_NOTICE(p_hwfn->p_dev, false,
+                                                         "consecutive igu"
+                                                         " vectors for HWFN"
+                                                         " %x broken",
+                                                         p_hwfn->rel_pf_id);
+                                               break;
+                                       }
+                                       prev_sb_id = sb_id;
+                                       /* we don't count the default */
+                                       (p_igu_info->igu_sb_cnt)++;
+                               }
+                       }
+               } else {
+                       if ((p_block->function_id >= min_vf) &&
+                           (p_block->function_id < max_vf)) {
+                               /* Available for VFs of this PF */
+                               if (p_igu_info->igu_base_sb_iov == 0xffff) {
+                                       p_igu_info->igu_base_sb_iov = sb_id;
+                               } else if (last_iov_sb_id != sb_id - 1) {
+                                       if (!val)
+                                               DP_VERBOSE(p_hwfn->p_dev,
+                                                          ECORE_MSG_INTR,
+                                                          "First uninited IGU"
+                                                          " CAM entry at"
+                                                          " index 0x%04x\n",
+                                                          sb_id);
+                                       else
+                                               DP_NOTICE(p_hwfn->p_dev, false,
+                                                         "Consecutive igu"
+                                                         " vectors for HWFN"
+                                                         " %x vfs is broken"
+                                                         " [jumps from %04x"
+                                                         " to %04x]\n",
+                                                         p_hwfn->rel_pf_id,
+                                                         last_iov_sb_id,
+                                                         sb_id);
+                                       break;
+                               }
+                               p_block->status |= ECORE_IGU_STATUS_FREE;
+                               p_hwfn->hw_info.p_igu_info->free_blks++;
+                               last_iov_sb_id = sb_id;
+                       }
+               }
+       }
+       p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+                  "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x]"
+                  "igu_dsb_id=0x%x\n",
+                  p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
+                  p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
+                  p_igu_info->igu_dsb_id);
+
+       if (p_igu_info->igu_base_sb == 0xffff ||
+           p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
+               DP_NOTICE(p_hwfn, true,
+                         "IGU CAM returned invalid values igu_base_sb=0x%x"
+                         "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
+                         p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
+                         p_igu_info->igu_dsb_id);
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+/**
+ * @brief Initialize igu runtime registers
+ *
+ * @param p_hwfn
+ */
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
+{
+       u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+       STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
+                         IGU_CMD_INT_ACK_BASE)
+#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
+                         IGU_CMD_INT_ACK_BASE)
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
+{
+       u32 intr_status_hi = 0, intr_status_lo = 0;
+       u64 intr_status = 0;
+
+       intr_status_lo = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               LSB_IGU_CMD_ADDR * 8);
+       intr_status_hi = REG_RD(p_hwfn,
+                               GTT_BAR0_MAP_REG_IGU_CMD +
+                               MSB_IGU_CMD_ADDR * 8);
+       intr_status = ((u64) intr_status_hi << 32) + (u64) intr_status_lo;
+
+       return intr_status;
+}
+
+static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
+       p_hwfn->b_sp_dpc_enabled = true;
+}
+
+static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
+{
+       p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
+       if (!p_hwfn->sp_dpc)
+               return ECORE_NOMEM;
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
+{
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
+}
+
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       rc = ecore_int_sp_dpc_alloc(p_hwfn);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
+               return rc;
+       }
+
+       rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
+               return rc;
+       }
+
+       rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
+
+       return rc;
+}
+
+void ecore_int_free(struct ecore_hwfn *p_hwfn)
+{
+       ecore_int_sp_sb_free(p_hwfn);
+       ecore_int_sb_attn_free(p_hwfn);
+       ecore_int_sp_dpc_free(p_hwfn);
+}
+
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
+               return;
+
+       ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+       ecore_int_sb_attn_setup(p_hwfn, p_ptt);
+       ecore_int_sp_dpc_setup(p_hwfn);
+}
+
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+                          struct ecore_sb_cnt_info *p_sb_cnt_info)
+{
+       struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+       if (!info || !p_sb_cnt_info)
+               return;
+
+       p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
+       p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
+       p_sb_cnt_info->sb_free_blk = info->free_blks;
+}
+
+u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+       struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+       /* Determine origin of SB id */
+       if ((sb_id >= p_info->igu_base_sb) &&
+           (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+               return sb_id - p_info->igu_base_sb;
+       } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+                  (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+               return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+       } else {
+               DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
+                         sb_id);
+               return 0;
+       }
+}
+
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
+{
+       int i;
+
+       for_each_hwfn(p_dev, i)
+           p_dev->hwfns[i].b_int_requested = false;
+}
diff --git a/drivers/net/qede/ecore/ecore_int.h 
b/drivers/net/qede/ecore/ecore_int.h
new file mode 100644
index 0000000..17c9521
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_int.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_H__
+#define __ECORE_INT_H__
+
+#include "ecore.h"
+#include "ecore_int_api.h"
+
+#define ECORE_CAU_DEF_RX_TIMER_RES 0
+#define ECORE_CAU_DEF_TX_TIMER_RES 0
+
+#define ECORE_SB_ATT_IDX       0x0001
+#define ECORE_SB_EVENT_MASK    0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn)                                        \
+       ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+struct ecore_igu_block {
+       u8 status;
+#define ECORE_IGU_STATUS_FREE  0x01
+#define ECORE_IGU_STATUS_VALID 0x02
+#define ECORE_IGU_STATUS_PF    0x04
+
+       u8 vector_number;
+       u8 function_id;
+       u8 is_pf;
+};
+
+struct ecore_igu_map {
+       struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+};
+
+struct ecore_igu_info {
+       struct ecore_igu_map igu_map;
+       u16 igu_dsb_id;
+       u16 igu_base_sb;
+       u16 igu_base_sb_iov;
+       u16 igu_sb_cnt;
+       u16 igu_sb_cnt_iov;
+       u16 free_blks;
+};
+
+/* TODO Names of function may change... */
+void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               bool b_set, bool b_slowpath);
+
+void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_int_igu_read_cam - Reads the IGU CAM.
+ *     This function needs to be called during hardware
+ *     prepare. It reads the info from igu cam to know which
+ *     status block is the default / base status block etc.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt);
+
+typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn,
+                                                    void *cookie);
+/**
+ * @brief ecore_int_register_cb - Register callback func for
+ *      slowhwfn statusblock.
+ *
+ *     Every protocol that uses the slowhwfn status block
+ *     should register a callback function that will be called
+ *     once there is an update of the sp status block.
+ *
+ * @param p_hwfn
+ * @param comp_cb - function to be called when there is an
+ *                  interrupt on the sp sb
+ *
+ * @param cookie  - passed to the callback function
+ * @param sb_idx  - OUT parameter which gives the chosen index
+ *                  for this protocol.
+ * @param p_fw_cons  - pointer to the actual address of the
+ *                     consumer for this protocol.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
+                                          ecore_int_comp_cb_t comp_cb,
+                                          void *cookie,
+                                          u8 *sb_idx, __le16 **p_fw_cons);
+/**
+ * @brief ecore_int_unregister_cb - Unregisters callback
+ *      function from sp sb.
+ *      Partner of ecore_int_register_cb -> should be called
+ *      when no longer required.
+ *
+ * @param p_hwfn
+ * @param pi
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi);
+
+/**
+ * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
+ *
+ * @param p_hwfn
+ *
+ * @return u16
+ */
+u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param cleanup_set  - set(1) / clear(0)
+ * @param opaque_fid    - the function for which to perform
+ *                     cleanup, for example a PF on behalf of
+ *                     its VFs.
+ */
+void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             u32 sb_id, bool cleanup_set, u16 opaque_fid);
+
+/**
+ * @brief Status block cleanup. Should be called for each status
+ *        block that will be used -> both PF / VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id                - igu status block id
+ * @param opaque       - opaque fid of the sb owner.
+ * @param cleanup_set  - set(1) / clear(0)
+ */
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      u32 sb_id, u16 opaque, bool b_set);
+
+/**
+ * @brief ecore_int_cau_conf - configure cau for a given status
+ *        block
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param sb_phys
+ * @param igu_sb_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          dma_addr_t sb_phys,
+                          u16 igu_sb_id, u16 vf_number, u8 vf_valid);
+
+/**
+* @brief ecore_int_alloc
+*
+* @param p_hwfn
+ * @param p_ptt
+*
+* @return enum _ecore_status_t
+*/
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+* @brief ecore_int_free
+*
+* @param p_hwfn
+*/
+void ecore_int_free(struct ecore_hwfn *p_hwfn);
+
+/**
+* @brief ecore_int_setup
+*
+* @param p_hwfn
+* @param p_ptt
+*/
+void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief - Enable Interrupt & Attention for hw function
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode
+ *
+* @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         enum ecore_int_mode int_mode);
+
+/**
+ * @brief - Initialize CAU status block entry
+ *
+ * @param p_hwfn
+ * @param p_sb_entry
+ * @param pf_id
+ * @param vf_number
+ * @param vf_valid
+ */
+void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
+                            struct cau_sb_entry *p_sb_entry, u8 pf_id,
+                            u16 vf_number, u8 vf_valid);
+
+#ifndef ASIC_ONLY
+#define ECORE_MAPPING_MEMORY_SIZE(dev) \
+       ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
+        136 : NUM_OF_SBS(dev))
+#else
+#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
+#endif
+
+#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/ecore/ecore_int_api.h 
b/drivers/net/qede/ecore/ecore_int_api.h
new file mode 100644
index 0000000..077449d
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_int_api.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_INT_API_H__
+#define __ECORE_INT_API_H__
+
+#ifndef __EXTRACT__LINUX__
+#define ECORE_SB_IDX           0x0002
+
+#define RX_PI          0
+#define TX_PI(tc)      (RX_PI + 1 + tc)
+
+#ifndef ECORE_INT_MODE
+#define ECORE_INT_MODE
+enum ecore_int_mode {
+       ECORE_INT_MODE_INTA,
+       ECORE_INT_MODE_MSIX,
+       ECORE_INT_MODE_MSI,
+       ECORE_INT_MODE_POLL,
+};
+#endif
+
+struct ecore_sb_info {
+       struct status_block *sb_virt;
+       dma_addr_t sb_phys;
+       u32 sb_ack;             /* Last given ack */
+       u16 igu_sb_id;
+       void OSAL_IOMEM *igu_addr;
+       u8 flags;
+#define ECORE_SB_INFO_INIT     0x1
+#define ECORE_SB_INFO_SETUP    0x2
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+       struct ecore_hwfn *p_hwfn;
+#endif
+       struct ecore_dev *p_dev;
+};
+
+struct ecore_sb_cnt_info {
+       int sb_cnt;
+       int sb_iov_cnt;
+       int sb_free_blk;
+};
+
+static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
+{
+       u32 prod = 0;
+       u16 rc = 0;
+
+       /* barrier(); status block is written to by the chip */
+       /* FIXME: need some sort of barrier. */
+       prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
+           STATUS_BLOCK_PROD_INDEX_MASK;
+       if (sb_info->sb_ack != prod) {
+               sb_info->sb_ack = prod;
+               rc |= ECORE_SB_IDX;
+       }
+
+       OSAL_MMIOWB(sb_info->p_dev);
+       return rc;
+}
+
+/**
+ *
+ * @brief This function creates an update command for interrupts that is
+ *        written to the IGU.
+ *
+ * @param sb_info      - This is the structure allocated and
+ *        initialized per status block. Assumption is
+ *        that it was initialized using ecore_sb_init
+ * @param int_cmd      - Enable/Disable/Nop
+ * @param upd_flg      - whether igu consumer should be
+ *        updated.
+ *
+ * @return OSAL_INLINE void
+ */
+static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
+                                    enum igu_int_cmd int_cmd, u8 upd_flg)
+{
+       struct igu_prod_cons_update igu_ack = { 0 };
+
+       igu_ack.sb_id_and_flags =
+           ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+            (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+            (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+            (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+       DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
+                     igu_ack.sb_id_and_flags);
+#else
+       DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
+#endif
+       /* Both segments (interrupts & acks) are written to same place address;
+        * Need to guarantee all commands will be received (in-order) by HW.
+        */
+       OSAL_MMIOWB(sb_info->p_dev);
+       OSAL_BARRIER(sb_info->p_dev);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
+                                         void OSAL_IOMEM *addr,
+                                         int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
+                                         void OSAL_IOMEM *addr,
+                                         int size, u32 *data)
+#endif
+{
+       unsigned int i;
+
+       for (i = 0; i < size / sizeof(*data); i++)
+               DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *) addr)[i], data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
+                                       void OSAL_IOMEM *addr,
+                                       int size, u32 *data)
+{
+       __internal_ram_wr(p_hwfn, addr, size, data);
+}
+#else
+static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
+                                       int size, u32 *data)
+{
+       __internal_ram_wr(OSAL_NULL, addr, size, data);
+}
+#endif
+#endif
+
+struct ecore_hwfn;
+struct ecore_ptt;
+
+enum ecore_coalescing_fsm {
+       ECORE_COAL_RX_STATE_MACHINE,
+       ECORE_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * @brief ecore_int_cau_conf_pi - configure cau for a given
+ *        status block
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param igu_sb_id
+ * @param pi_index
+ * @param state
+ * @param timeset
+ */
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          u16 igu_sb_id,
+                          u32 pi_index,
+                          enum ecore_coalescing_fsm coalescing_fsm,
+                          u8 timeset);
+
+/**
+ *
+ * @brief ecore_int_igu_enable_int - enable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param int_mode - interrupt mode to use
+ */
+void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
+                             struct ecore_ptt *p_ptt,
+                             enum ecore_int_mode int_mode);
+
+/**
+ *
+ * @brief ecore_int_igu_disable_int - disable device interrupts
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt);
+
+/**
+ *
+ * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
+ *        register from igu.
+ *
+ * @param p_hwfn
+ *
+ * @return u64
+ */
+u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
+
+#define ECORE_SP_SB_ID 0xffff
+/**
+ * @brief ecore_int_sb_init - Initializes the sb_info structure.
+ *
+ * once the structure is initialized it can be passed to sb related functions.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      points to an uninitialized (but
+ *                     allocated) sb_info structure
+ * @param sb_virt_addr
+ * @param sb_phy_addr
+ * @param sb_id                the sb_id to be used (zero based in driver)
+ *                     should use ECORE_SP_SB_ID for SP Status block
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_sb_info *sb_info,
+                                      void *sb_virt_addr,
+                                      dma_addr_t sb_phy_addr, u16 sb_id);
+/**
+ * @brief ecore_int_sb_setup - Setup the sb.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_info      initialized sb_info structure
+ */
+void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info);
+
+/**
+ * @brief ecore_int_sb_release - releases the sb_info structure.
+ *
+ * once the structure is released, it's memory can be freed
+ *
+ * @param p_hwfn
+ * @param sb_info      points to an allocated sb_info structure
+ * @param sb_id                the sb_id to be used (zero based in driver)
+ *                     should never be equal to ECORE_SP_SB_ID
+ *                     (SP Status block)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_sb_info *sb_info,
+                                         u16 sb_id);
+
+/**
+ * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
+ *        default status block.
+ *
+ * @param p_hwfn - pointer to hwfn
+ *
+ */
+void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
+
+/**
+ * @brief ecore_int_get_num_sbs - get the number of status
+ *        blocks configured for this funciton in the igu.
+ *
+ * @param p_hwfn
+ * @param p_sb_cnt_info
+ *
+ * @return
+ */
+void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
+                          struct ecore_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
+ *        release. The API need to be called after releasing all slowpath IRQs
+ *        of the device.
+ *
+ * @param p_dev
+ *
+ */
+void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_iov_api.h 
b/drivers/net/qede/ecore/ecore_iov_api.h
new file mode 100644
index 0000000..c823442
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_iov_api.h
@@ -0,0 +1,931 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SRIOV_API_H__
+#define __ECORE_SRIOV_API_H__
+
+#include "ecore_status.h"
+
+#define ECORE_VF_ARRAY_LENGTH (3)
+
+#define IS_VF(p_dev)           ((p_dev)->b_is_vf)
+#define IS_PF(p_dev)           (!((p_dev)->b_is_vf))
+#ifdef CONFIG_ECORE_SRIOV
+#define IS_PF_SRIOV(p_hwfn)    (!!((p_hwfn)->p_dev->sriov_info.total_vfs))
+#else
+#define IS_PF_SRIOV(p_hwfn)    (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn)      (!!((p_hwfn)->pf_iov_info))
+#define IS_PF_PDA(p_hwfn)      0       /* @@TBD Michalk */
+
+/* @@@ TBD MichalK - what should this number be*/
+#define ECORE_MAX_VF_CHAINS_PER_PF 16
+
+/* vport update extended feature tlvs flags */
+enum ecore_iov_vport_update_flag {
+       ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
+       ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
+       ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
+       ECORE_IOV_VP_UPDATE_MCAST = 3,
+       ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
+       ECORE_IOV_VP_UPDATE_RSS = 5,
+       ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
+       ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
+       ECORE_IOV_VP_UPDATE_MAX = 8,
+};
+
+struct ecore_mcp_link_params;
+struct ecore_mcp_link_state;
+struct ecore_mcp_link_capabilities;
+
+/* These defines are used by the hw-channel; should never change order */
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+
+struct ecore_vf_acquire_sw_info {
+       u32 driver_version;
+       u8 os_type;
+};
+
+struct ecore_public_vf_info {
+       /* These copies will later be reflected in the bulletin board,
+        * but this copy should be newer.
+        */
+       u8 forced_mac[ETH_ALEN];
+       u16 forced_vlan;
+};
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* This is SW channel related only... */
+enum mbx_state {
+       VF_PF_UNKNOWN_STATE = 0,
+       VF_PF_WAIT_FOR_START_REQUEST = 1,
+       VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
+       VF_PF_REQUEST_IN_PROCESSING = 3,
+       VF_PF_RESPONSE_READY = 4,
+};
+
+struct ecore_iov_sw_mbx {
+       enum mbx_state mbx_state;
+
+       u32 request_size;
+       u32 request_offset;
+
+       u32 response_size;
+       u32 response_offset;
+};
+
+/**
+ * @brief Get the vf sw mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return struct ecore_iov_sw_mbx*
+ */
+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+                                                u16 rel_vf_id);
+#endif
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief mark/clear all VFs before/after an incoming PCIe sriov
+ *        disable.
+ *
+ * @param p_hwfn
+ * @param to_disable
+ */
+void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable);
+
+/**
+ * @brief mark/clear chosen VFs before/after an incoming PCIe
+ *        sriov disable.
+ *
+ * @param p_hwfn
+ * @param to_disable
+ */
+void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+                                u16 rel_vf_id, u8 to_disable);
+
+/**
+ *
+ * @brief ecore_iov_init_hw_for_vf - initialize the HW for
+ *        enabling access of a VF. Also includes preparing the
+ *        IGU for VF access. This needs to be called AFTER hw is
+ *        initialized and BEFORE VF is loaded inside the VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ * @param num_rx_queues
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u16 rel_vf_id, u16 num_rx_queues);
+
+/**
+ * @brief ecore_iov_process_mbx_req - process a request received
+ *        from the VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt, int vfid);
+
+/**
+ * @brief ecore_iov_release_hw_for_vf - called once upper layer
+ *        knows VF is done with - can release any resources
+ *        allocated for VF at this point. this must be done once
+ *        we know VF is no longer loaded in VM.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                u16 rel_vf_id);
+
+#ifndef LINUX_REMOVE
+/**
+ * @brief ecore_iov_set_vf_ctx - set a context for a given VF
+ *
+ * @param p_hwfn
+ * @param vf_id
+ * @param ctx
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+                                         u16 vf_id, void *ctx);
+#endif
+
+/**
+ * @brief FLR cleanup for all VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt);
+
+/**
+ * @brief FLR cleanup for single VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param rel_vf_id
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt, u16 rel_vf_id);
+
+/**
+ * @brief Update the bulletin with link information. Notice this does NOT
+ *        send a bulletin update, only updates the PF's bulletin.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param params - the link params to use for the VF link configuration
+ * @param link - the link output to use for the VF link configuration
+ * @param p_caps - the link default capabilities.
+ */
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+                       u16 vfid,
+                       struct ecore_mcp_link_params *params,
+                       struct ecore_mcp_link_state *link,
+                       struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief Returns link information as perceived by VF.
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_params - the link params visible to vf.
+ * @param p_link - the link state visible to vf.
+ * @param p_caps - the link default capabilities visible to vf.
+ */
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+                       u16 vfid,
+                       struct ecore_mcp_link_params *params,
+                       struct ecore_mcp_link_state *link,
+                       struct ecore_mcp_link_capabilities *p_caps);
+
+/**
+ * @brief return if the VF is pending FLR
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return bool
+ */
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Check if given VF ID @vfid is valid
+ *        w.r.t. @b_enabled_only value
+ *        if b_enabled_only = true - only enabled VF id is valid
+ *        else any VF id less than max_vfs is valid
+ *
+ * @param p_hwfn
+ * @param rel_vf_id - Relative VF ID
+ * @param b_enabled_only - consider only enabled VF
+ *
+ * @return bool - true for valid VF ID
+ */
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
+                            int rel_vf_id, bool b_enabled_only);
+
+/**
+ * @brief Get VF's public info structure
+ *
+ * @param p_hwfn
+ * @param vfid - Relative VF ID
+ * @param b_enabled_only - false if want to access even if vf is disabled
+ *
+ * @return struct ecore_public_vf_info *
+ */
+struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
+                                                         *p_hwfn, u16 vfid,
+                                                         bool b_enabled_only);
+
+/**
+ * @brief Set pending events bitmap for given @vfid
+ *
+ * @param p_hwfn
+ * @param vfid
+ */
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
+
+/**
+ * @brief Copy pending events bitmap in @events and clear
+ *       original copy of events
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+                                              u64 *events);
+
+/**
+ * @brief Copy VF's message to PF's buffer
+ *
+ * @param p_hwfn
+ * @param ptt
+ * @param vfid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_ptt *ptt, int vfid);
+/**
+ * @brief Set forced MAC address in PFs copy of bulletin board
+ *        and configures FW/HW to support the configuration.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+                                      u8 *mac, int vfid);
+
+/**
+ * @brief Set MAC address in PFs copy of bulletin board without
+ *        configuring FW/HW.
+ *
+ * @param p_hwfn
+ * @param mac
+ * @param vfid
+ */
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+                                               u8 *mac, int vfid);
+
+/**
+ * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
+ *        and configures FW/HW to support the configuration.
+ *        Setting of pvid 0 would clear the feature.
+ * @param p_hwfn
+ * @param pvid
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+                                       u16 pvid, int vfid);
+
+/**
+ * @brief Set default behaviour of VF in case no vlans are configured for it
+ *        whether to accept only untagged traffic or all.
+ *        Must be called prior to the VF vport-start.
+ *
+ * @param p_hwfn
+ * @param b_untagged_only
+ * @param vfid
+ *
+ * @return ECORE_SUCCESS if configuration would stick.
+ */
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+                                              bool b_untagged_only, int vfid);
+/**
+ * @brief Get VFs opaque fid.
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param opaque_fid
+ */
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+                                 u16 *opaque_fid);
+
+/**
+ * @brief Get VFs VPORT id.
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param vport id
+ */
+void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
+                               u8 *p_vport_id);
+
+/**
+ * @brief Check if VF has VPORT instance. This can be used
+ *       to check if VPORT is active.
+ *
+ * @param p_hwfn
+ */
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief PF posts the bulletin to the VF
+ *
+ * @param p_hwfn
+ * @param p_vf
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+                                               int vfid,
+                                               struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Check if given VF (@vfid) is marked as stopped
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool : true if stopped
+ */
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Configure VF anti spoofing
+ *
+ * @param p_hwfn
+ * @param vfid
+ * @param val - spoofchk value - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+                                           int vfid, bool val);
+
+/**
+ * @brief Get VF's configured spoof value.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - spoofchk value - true/false
+ */
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Check for SRIOV sanity by PF.
+ *
+ * @param p_hwfn
+ * @param vfid
+ *
+ * @return bool - true if sanity checks passes, else false
+ */
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief Get the num of VF chains.
+ *
+ * @param p_hwfn
+ *
+ * @return u8
+ */
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Get vf request mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_req_virt_addr
+ * @param p_req_virt_size
+ */
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+                                         u16 rel_vf_id,
+                                         void **pp_req_virt_addr,
+                                         u16 *p_req_virt_size);
+
+/**
+ * @brief Get vf mailbox params
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ * @param pp_reply_virt_addr
+ * @param p_reply_virt_size
+ */
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+                                           u16 rel_vf_id,
+                                           void **pp_reply_virt_addr,
+                                           u16 *p_reply_virt_size);
+
+/**
+ * @brief Validate if the given length is a valid vfpf message
+ *        length
+ *
+ * @param length
+ *
+ * @return bool
+ */
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
+
+/**
+ * @brief Return the max pfvf message length
+ *
+ * @return u32
+ */
+u32 ecore_iov_pfvf_msg_length(void);
+
+/**
+ * @brief Returns forced MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 
rel_vf_id);
+
+/**
+ * @brief Returns pvid if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return 0 if no pvid is configured, otherwise the pvid.
+ */
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+                                      u16 rel_vf_id);
+/**
+ * @brief Configure VFs tx rate
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param val - tx rate value in Mb/sec.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                int vfid, int val);
+
+/**
+ * @brief - Retrieves the statistics associated with a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ * @param p_stats - this will be filled with the VF statistics
+ *
+ * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
+ */
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           int vfid,
+                                           struct ecore_eth_stats *p_stats);
+
+/**
+ * @brief - Retrieves num of rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return num of rxqs chains.
+ */
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves num of active rxqs chains
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves ctx pointer
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Retrieves VF`s num sbs
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is waiting for acquire
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 
rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired but not initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+                                             u16 rel_vf_id);
+
+/**
+ * @brief - Returm true if VF is acquired and initialized
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return
+ */
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return - rate in Mbps
+ */
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+
+/**
+ * @brief - Configure min rate for VF's vport.
+ * @param p_dev
+ * @param vfid
+ * @param - rate in Mbps
+ *
+ * @return
+ */
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+                                                    int vfid, u32 rate);
+#else
+static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn,
+                                                    u8 to_disable)
+{
+}
+
+static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+                                                   u16 rel_vf_id,
+                                                   u8 to_disable)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct
+                                                                ecore_hwfn
+                                                                *p_hwfn,
+                                                                struct
+                                                                ecore_ptt
+                                                                *p_ptt,
+                                                                u16 rel_vf_id,
+                                                                u16
+                                                                num_rx_queues)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt,
+                                                 int vfid)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct
+                                                                   ecore_hwfn
+                                                                   *p_hwfn,
+                                                                   struct
+                                                                   ecore_ptt
+                                                                   *p_ptt,
+                                                                   u16
+                                                                   rel_vf_id)
+{
+       return ECORE_SUCCESS;
+}
+
+#ifndef LINUX_REMOVE
+static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn
+                                                            *p_hwfn, u16 vf_id,
+                                                            void *ctx)
+{
+       return ECORE_INVAL;
+}
+#endif
+static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct
+                                                                ecore_hwfn
+                                                                *p_hwfn,
+                                                                struct
+                                                                ecore_ptt
+                                                                *p_ptt)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(
+       struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid,
+                                          struct ecore_mcp_link_params *params,
+                                          struct ecore_mcp_link_state *link,
+                                          struct ecore_mcp_link_capabilities
+                                          *p_caps)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid,
+                                          struct ecore_mcp_link_params *params,
+                                          struct ecore_mcp_link_state *link,
+                                          struct ecore_mcp_link_capabilities
+                                          *p_caps)
+{
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
+                                                   u16 rel_vf_id)
+{
+       return false;
+}
+
+static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
+                                               int rel_vf_id,
+                                               bool b_enabled_only)
+{
+       return false;
+}
+
+static OSAL_INLINE struct ecore_public_vf_info *
+       ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid,
+                                 bool b_enabled_only)
+{
+       return OSAL_NULL;
+}
+
+static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn
+                                                       *p_hwfn, u8 vfid)
+{
+}
+
+static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct
+                                                                 ecore_hwfn
+                                                                 *p_hwfn,
+                                                                 u64 *events)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn
+                                                             *p_hwfn,
+                                                             struct ecore_ptt
+                                                             *ptt, int vfid)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn
+                                                         *p_hwfn, u8 *mac,
+                                                         int vfid)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct
+                                                                  ecore_hwfn
+                                                                  *p_hwfn,
+                                                                  u8 *mac,
+                                                                  int vfid)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn
+                                                          p_hwfn, u16 pvid,
+                                                          int vfid)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn,
+                                                    int vfid, u16 *opaque_fid)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn,
+                                                  int vfid, u8 *p_vport_id)
+{
+}
+
+static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn
+                                                       *p_hwfn, int vfid)
+{
+       return false;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct
+                                                                  ecore_hwfn
+                                                                  *p_hwfn,
+                                                                  int vfid,
+                                                                  struct
+                                                                  ecore_ptt
+                                                                  *p_ptt)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn,
+                                               int vfid)
+{
+       return false;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct 
ecore_hwfn
+                                                              *p_hwfn,
+                                                              int vfid,
+                                                              bool val)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn,
+                                              int vfid)
+{
+       return false;
+}
+
+static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn,
+                                                 int vfid)
+{
+       return false;
+}
+
+static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
+{
+       return 0;
+}
+
+static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn
+                                                            *p_hwfn,
+                                                            u16 rel_vf_id,
+                                                            void
+                                                            **pp_req_virt_addr,
+                                                            u16 *
+                                                            p_req_virt_size)
+{
+}
+
+static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct 
ecore_hwfn
+                                                              *p_hwfn,
+                                                              u16 rel_vf_id,
+                                                              void
+                                                      **pp_reply_virt_addr,
+                                                              u16 *
+                                                      p_reply_virt_size)
+{
+}
+
+static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
+{
+       return false;
+}
+
+static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void)
+{
+       return 0;
+}
+
+static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn
+                                                        *p_hwfn, u16 rel_vf_id)
+{
+       return OSAL_NULL;
+}
+
+static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn
+                                                         *p_hwfn,
+                                                         u16 rel_vf_id)
+{
+       return 0;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct
+                                                                   ecore_hwfn
+                                                                   *p_hwfn,
+                                                                   struct
+                                                                   ecore_ptt
+                                                                   *p_ptt,
+                                                                   int vfid,
+                                                                   int val)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
+                                               u16 rel_vf_id)
+{
+       return 0;
+}
+
+static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn
+                                                      *p_hwfn, u16 rel_vf_id)
+{
+       return 0;
+}
+
+static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
+                                             u16 rel_vf_id)
+{
+       return OSAL_NULL;
+}
+
+static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
+                                              u16 rel_vf_id)
+{
+       return 0;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn
+                                                        *p_hwfn, u16 rel_vf_id)
+{
+       return false;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct
+                                                                ecore_hwfn
+                                                                *p_hwfn,
+                                                                u16 rel_vf_id)
+{
+       return false;
+}
+
+static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
+                                                   u16 rel_vf_id)
+{
+       return false;
+}
+
+static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn,
+                                                int vfid)
+{
+       return 0;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(
+       struct ecore_dev *p_dev, int vfid, u32 rate)
+{
+       return ECORE_INVAL;
+}
+#endif
+#endif
diff --git a/drivers/net/qede/ecore/ecore_iro.h 
b/drivers/net/qede/ecore/ecore_iro.h
new file mode 100644
index 0000000..51bca18
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_iro.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_H__
+#define __IRO_H__
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET                (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE          (IRO[0].size)
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+(IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE                  (IRO[1].size)
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+(IRO[2].base + ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE              (IRO[2].size)
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+(IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE                (IRO[3].size)
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+(IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE              (IRO[4].size)
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+(IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE                   (IRO[5].size)
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
+(IRO[6].base + ((global_queue_id) * IRO[6].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE          (IRO[6].size)
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET          (IRO[7].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE            (IRO[7].size)
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET          (IRO[8].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE            (IRO[8].size)
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET          (IRO[9].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE            (IRO[9].size)
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET          (IRO[10].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE            (IRO[10].size)
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET          (IRO[11].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE            (IRO[11].size)
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET          (IRO[12].base)
+#define USTORM_INTEG_TEST_DATA_SIZE            (IRO[12].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
+(IRO[13].base + ((core_rx_queue_id) * IRO[13].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE               (IRO[13].size)
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+(IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE    (IRO[14].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+(IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE    (IRO[15].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+(IRO[16].base + ((core_tx_stats_id) * IRO[16].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE    (IRO[16].size)
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[17].base + ((stat_counter_id) * IRO[17].m1))
+#define MSTORM_QUEUE_STAT_SIZE                 (IRO[17].size)
+/* Mstorm producers */
+#define MSTORM_PRODS_OFFSET(queue_id) \
+(IRO[18].base + ((queue_id) * IRO[18].m1))
+#define MSTORM_PRODS_SIZE                      (IRO[18].size)
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET           (IRO[19].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE             (IRO[19].size)
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[20].base + ((stat_counter_id) * IRO[20].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
+/* Ustorm queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+(IRO[21].base + ((queue_id) * IRO[21].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE             (IRO[21].size)
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+(IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define PSTORM_QUEUE_STAT_SIZE                 (IRO[22].size)
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET            (IRO[23].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE              (IRO[23].size)
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+(IRO[24].base + ((pf_id) * IRO[24].m1))
+#define ETH_RX_RATE_LIMIT_SIZE                 (IRO[24].size)
+/* Ystorm queue zone */
+#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+(IRO[25].base + ((queue_id) * IRO[25].m1))
+#define YSTORM_ETH_QUEUE_ZONE_SIZE             (IRO[25].size)
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+(IRO[26].base + ((rss_id) * IRO[26].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE                        (IRO[26].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+(IRO[27].base + ((rss_id) * IRO[27].m1))
+#define USTORM_TOE_CQ_PROD_SIZE                        (IRO[27].size)
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+(IRO[28].base + ((pf_id) * IRO[28].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE               (IRO[28].size)
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
+(IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE             (IRO[29].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+(IRO[30].base + ((func_id) * IRO[30].m1) + ((bdq_id) * IRO[30].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE          (IRO[30].size)
+/* Mstorm rq-cons of given queue-id */
+#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
+(IRO[31].base + ((rq_queue_id) * IRO[31].m1))
+#define MSTORM_SCSI_RQ_CONS_SIZE               (IRO[31].size)
+/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
+(IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE          (IRO[32].size)
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+(IRO[33].base + ((pf_id) * IRO[33].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE             (IRO[33].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+(IRO[34].base + ((pf_id) * IRO[34].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE             (IRO[34].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
+(IRO[35].base + ((pf_id) * IRO[35].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE             (IRO[35].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+(IRO[36].base + ((pf_id) * IRO[36].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE \
+(IRO[36].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+(IRO[37].base + ((pf_id) * IRO[37].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE             (IRO[37].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
+(IRO[38].base + ((pf_id) * IRO[38].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE             (IRO[38].size)
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+(IRO[42].base + ((rdma_stat_counter_id) * IRO[42].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE            (IRO[42].size)
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+(IRO[43].base + ((rdma_stat_counter_id) * IRO[43].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE            (IRO[43].size)
+
+#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/ecore/ecore_iro_values.h 
b/drivers/net/qede/ecore/ecore_iro_values.h
new file mode 100644
index 0000000..c818b58
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_iro_values.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __IRO_VALUES_H__
+#define __IRO_VALUES_H__
+
+static const struct iro iro_arr[44] = {
+       {0x0, 0x0, 0x0, 0x0, 0x8},
+       {0x4db0, 0x60, 0x0, 0x0, 0x60},
+       {0x6418, 0x20, 0x0, 0x0, 0x20},
+       {0x500, 0x8, 0x0, 0x0, 0x4},
+       {0x480, 0x8, 0x0, 0x0, 0x4},
+       {0x0, 0x8, 0x0, 0x0, 0x2},
+       {0x80, 0x8, 0x0, 0x0, 0x2},
+       {0x4938, 0x0, 0x0, 0x0, 0x78},
+       {0x3df0, 0x0, 0x0, 0x0, 0x78},
+       {0x29b0, 0x0, 0x0, 0x0, 0x78},
+       {0x4d38, 0x0, 0x0, 0x0, 0x78},
+       {0x56c8, 0x0, 0x0, 0x0, 0x78},
+       {0x7e48, 0x0, 0x0, 0x0, 0x78},
+       {0xa28, 0x8, 0x0, 0x0, 0x8},
+       {0x61f8, 0x10, 0x0, 0x0, 0x10},
+       {0xb500, 0x30, 0x0, 0x0, 0x30},
+       {0x95b8, 0x30, 0x0, 0x0, 0x30},
+       {0x5898, 0x40, 0x0, 0x0, 0x40},
+       {0x1f8, 0x10, 0x0, 0x0, 0x8},
+       {0xa228, 0x0, 0x0, 0x0, 0x4},
+       {0x8050, 0x40, 0x0, 0x0, 0x30},
+       {0xcf8, 0x8, 0x0, 0x0, 0x8},
+       {0x2b48, 0x80, 0x0, 0x0, 0x38},
+       {0xadf0, 0x0, 0x0, 0x0, 0xf0},
+       {0xaee0, 0x8, 0x0, 0x0, 0x8},
+       {0x80, 0x8, 0x0, 0x0, 0x8},
+       {0xac0, 0x8, 0x0, 0x0, 0x8},
+       {0x2578, 0x8, 0x0, 0x0, 0x8},
+       {0x24f8, 0x8, 0x0, 0x0, 0x8},
+       {0x0, 0x8, 0x0, 0x0, 0x8},
+       {0x200, 0x10, 0x8, 0x0, 0x8},
+       {0x17f8, 0x8, 0x0, 0x0, 0x2},
+       {0x19f8, 0x10, 0x8, 0x0, 0x2},
+       {0xd988, 0x38, 0x0, 0x0, 0x24},
+       {0x11040, 0x10, 0x0, 0x0, 0x8},
+       {0x11670, 0x38, 0x0, 0x0, 0x18},
+       {0xaeb8, 0x30, 0x0, 0x0, 0x10},
+       {0x86f8, 0x28, 0x0, 0x0, 0x18},
+       {0xebf8, 0x10, 0x0, 0x0, 0x10},
+       {0xde08, 0x40, 0x0, 0x0, 0x30},
+       {0x121a0, 0x38, 0x0, 0x0, 0x8},
+       {0xf060, 0x20, 0x0, 0x0, 0x20},
+       {0x2b80, 0x80, 0x0, 0x0, 0x10},
+       {0x50a0, 0x10, 0x0, 0x0, 0x10},
+};
+
+#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/ecore/ecore_l2.c 
b/drivers/net/qede/ecore/ecore_l2.c
new file mode 100644
index 0000000..8e2bd06
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_l2.c
@@ -0,0 +1,1801 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_l2.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_vf.h"
+#include "ecore_sriov.h"
+#include "ecore_mcp.h"
+
+#define ECORE_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+                        struct ecore_sp_vport_start_params *p_params)
+{
+       struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u8 abs_vport_id = 0;
+       u16 rx_mode = 0;
+
+       rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_params->opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_VPORT_START,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vport_start;
+       p_ramrod->vport_id = abs_vport_id;
+
+       p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+       p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+       p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+       p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+       p_ramrod->untagged = p_params->only_untagged;
+       p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
+
+       SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+       SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+       p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+
+       /* TPA related fields */
+       OSAL_MEMSET(&p_ramrod->tpa_param, 0,
+                   sizeof(struct eth_vport_tpa_param));
+       p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+
+       switch (p_params->tpa_mode) {
+       case ECORE_TPA_MODE_GRO:
+               p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+               p_ramrod->tpa_param.tpa_max_size = (u16) -1;
+               p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
+               p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
+               p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
+               p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+               p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
+               p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+               break;
+       default:
+               break;
+       }
+
+       p_ramrod->tx_switching_en = p_params->tx_switching;
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               p_ramrod->tx_switching_en = 0;
+#endif
+
+       /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+       p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
+                                                   p_params->concrete_fid);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+                    struct ecore_sp_vport_start_params *p_params)
+{
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+                                              p_params->mtu,
+                                              p_params->remove_inner_vlan,
+                                              p_params->tpa_mode,
+                                              p_params->max_buffers_per_cqe,
+                                              p_params->only_untagged);
+
+       return ecore_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static enum _ecore_status_t
+ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
+                         struct vport_update_ramrod_data *p_ramrod,
+                         struct ecore_rss_params *p_rss)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct eth_vport_rss_config *p_config;
+       u16 abs_l2_queue = 0;
+       int i;
+
+       if (!p_rss) {
+               p_ramrod->common.update_rss_flg = 0;
+               return rc;
+       }
+       p_config = &p_ramrod->rss_config;
+
+       OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
+                         ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+       rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+       p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+       p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+       p_config->update_rss_key = p_rss->update_rss_key;
+
+       p_config->rss_mode = p_rss->rss_enable ?
+           ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
+
+       p_config->capabilities = 0;
+
+       SET_FIELD(p_config->capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+                 !!(p_rss->rss_caps & ECORE_RSS_IPV4));
+       SET_FIELD(p_config->capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+                 !!(p_rss->rss_caps & ECORE_RSS_IPV6));
+       SET_FIELD(p_config->capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+                 !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
+       SET_FIELD(p_config->capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+                 !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
+       SET_FIELD(p_config->capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+                 !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
+       SET_FIELD(p_config->capabilities,
+                 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+                 !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
+       p_config->tbl_size = p_rss->rss_table_size_log;
+       p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+                  "update rss flag %d, rss_mode = %d, update_caps = %d, 
capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+                  p_ramrod->common.update_rss_flg,
+                  p_config->rss_mode,
+                  p_config->update_rss_capabilities,
+                  p_config->capabilities,
+                  p_config->update_rss_ind_table, p_config->update_rss_key);
+
+       for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+               rc = ecore_fw_l2_queue(p_hwfn,
+                                      (u8) p_rss->rss_ind_table[i],
+                                      &abs_l2_queue);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
+                          i, p_config->indirection_table[i]);
+       }
+
+       for (i = 0; i < 10; i++)
+               p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
+
+       return rc;
+}
+
+static void
+ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
+                           struct vport_update_ramrod_data *p_ramrod,
+                           struct ecore_filter_accept_flags flags)
+{
+       p_ramrod->common.update_rx_mode_flg = flags.update_rx_mode_config;
+       p_ramrod->common.update_tx_mode_flg = flags.update_tx_mode_config;
+
+#ifndef ASIC_ONLY
+       /* On B0 emulation we cannot enable Tx, since this would cause writes
+        * to PVFC HW block which isn't implemented in emulation.
+        */
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "Non-Asic - prevent Tx mode in vport update\n");
+               p_ramrod->common.update_tx_mode_flg = 0;
+       }
+#endif
+
+       /* Set Rx mode accept flags */
+       if (p_ramrod->common.update_rx_mode_flg) {
+               __le16 *state = &p_ramrod->rx_mode.state;
+               u8 accept_filter = flags.rx_accept_filter;
+
+/*
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+                         !!(accept_filter & ECORE_ACCEPT_NONE));
+*/
+/*
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL,
+                         (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) &&
+                          !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+*/
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+                         !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
+                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+                         !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
+/*
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+                         !!(accept_filter & ECORE_ACCEPT_NONE));
+*/
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+                         !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
+                           !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+                         (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+                          !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(*state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "p_ramrod->rx_mode.state = 0x%x\n",
+                          p_ramrod->rx_mode.state);
+       }
+
+       /* Set Tx mode accept flags */
+       if (p_ramrod->common.update_tx_mode_flg) {
+               __le16 *state = &p_ramrod->tx_mode.state;
+               u8 accept_filter = flags.tx_accept_filter;
+
+               SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+                         !!(accept_filter & ECORE_ACCEPT_NONE));
+
+               SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+                         !!(accept_filter & ECORE_ACCEPT_NONE));
+
+               SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+                         (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
+                          !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
+
+               SET_FIELD(*state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+                         !!(accept_filter & ECORE_ACCEPT_BCAST));
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "p_ramrod->tx_mode.state = 0x%x\n",
+                          p_ramrod->tx_mode.state);
+       }
+}
+
+static void
+ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
+                             struct vport_update_ramrod_data *p_ramrod,
+                             struct ecore_sge_tpa_params *p_params)
+{
+       struct eth_vport_tpa_param *p_tpa;
+
+       if (!p_params) {
+               p_ramrod->common.update_tpa_param_flg = 0;
+               p_ramrod->common.update_tpa_en_flg = 0;
+               p_ramrod->common.update_tpa_param_flg = 0;
+               return;
+       }
+
+       p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+       p_tpa = &p_ramrod->tpa_param;
+       p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+       p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+       p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+       p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+       p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+       p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+       p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+       p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+       p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+       p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+       p_tpa->tpa_max_size = p_params->tpa_max_size;
+       p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+       p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
+ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
+                         struct vport_update_ramrod_data *p_ramrod,
+                         struct ecore_sp_vport_update_params *p_params)
+{
+       int i;
+
+       OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
+                   sizeof(p_ramrod->approx_mcast.bins));
+
+       if (!p_params->update_approx_mcast_flg)
+               return;
+
+       p_ramrod->common.update_approx_mcast_flg = 1;
+       for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+               u32 *p_bins = (u32 *) p_params->bins;
+
+               p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+       }
+}
+
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+                     struct ecore_sp_vport_update_params *p_params,
+                     enum spq_mode comp_mode,
+                     struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct ecore_rss_params *p_rss_params = p_params->rss_params;
+       struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u8 abs_vport_id = 0, val;
+       u16 wordval;
+
+       if (IS_VF(p_hwfn->p_dev)) {
+               rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
+               return rc;
+       }
+
+       rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_params->opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_VPORT_UPDATE,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Copy input params to ramrod according to FW struct */
+       p_ramrod = &p_ent->ramrod.vport_update;
+
+       p_ramrod->common.vport_id = abs_vport_id;
+
+       p_ramrod->common.rx_active_flg = p_params->vport_active_rx_flg;
+       p_ramrod->common.tx_active_flg = p_params->vport_active_tx_flg;
+       val = p_params->update_vport_active_rx_flg;
+       p_ramrod->common.update_rx_active_flg = val;
+       val = p_params->update_vport_active_tx_flg;
+       p_ramrod->common.update_tx_active_flg = val;
+       val = p_params->update_inner_vlan_removal_flg;
+       p_ramrod->common.update_inner_vlan_removal_en_flg = val;
+       val = p_params->inner_vlan_removal_flg;
+       p_ramrod->common.inner_vlan_removal_en = val;
+       val = p_params->silent_vlan_removal_flg;
+       p_ramrod->common.silent_vlan_removal_en = val;
+       val = p_params->update_tx_switching_flg;
+       p_ramrod->common.update_tx_switching_en_flg = val;
+       val = p_params->update_default_vlan_enable_flg;
+       p_ramrod->common.update_default_vlan_en_flg = val;
+       p_ramrod->common.default_vlan_en = p_params->default_vlan_enable_flg;
+       val = p_params->update_default_vlan_flg;
+       p_ramrod->common.update_default_vlan_flg = val;
+       wordval = p_params->default_vlan;
+       p_ramrod->common.default_vlan = OSAL_CPU_TO_LE16(wordval);
+
+       p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+               if (p_ramrod->common.tx_switching_en ||
+                   p_ramrod->common.update_tx_switching_en_flg) {
+                       DP_NOTICE(p_hwfn, false,
+                                 "FPGA - why are we seeing tx-switching? 
Overriding it\n");
+                       p_ramrod->common.tx_switching_en = 0;
+                       p_ramrod->common.update_tx_switching_en_flg = 1;
+               }
+#endif
+
+       val = p_params->update_anti_spoofing_en_flg;
+       p_ramrod->common.update_anti_spoofing_en_flg = val;
+       p_ramrod->common.anti_spoofing_en = p_params->anti_spoofing_en;
+       p_ramrod->common.accept_any_vlan = p_params->accept_any_vlan;
+       val = p_params->update_accept_any_vlan_flg;
+       p_ramrod->common.update_accept_any_vlan_flg = val;
+
+       rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+       if (rc != ECORE_SUCCESS) {
+               /* Return spq entry which is taken in ecore_sp_init_request() */
+               ecore_spq_return_entry(p_hwfn, p_ent);
+               return rc;
+       }
+
+       /* Update mcast bins for VFs, PF doesn't use this functionality */
+       ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+       ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+       ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
+                                     p_params->sge_tpa_params);
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+                                        u16 opaque_fid, u8 vport_id)
+{
+       struct vport_stop_ramrod_data *p_ramrod;
+       struct ecore_sp_init_data init_data;
+       struct ecore_spq_entry *p_ent;
+       enum _ecore_status_t rc;
+       u8 abs_vport_id = 0;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_vport_stop(p_hwfn);
+
+       rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_VPORT_STOP,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vport_stop;
+       p_ramrod->vport_id = abs_vport_id;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t
+ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
+                        struct ecore_filter_accept_flags *p_accept_flags)
+{
+       struct ecore_sp_vport_update_params s_params;
+
+       OSAL_MEMSET(&s_params, 0, sizeof(s_params));
+       OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
+                   sizeof(struct ecore_filter_accept_flags));
+
+       return ecore_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+                       u8 vport,
+                       struct ecore_filter_accept_flags accept_flags,
+                       u8 update_accept_any_vlan,
+                       u8 accept_any_vlan,
+                       enum spq_mode comp_mode,
+                       struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct ecore_sp_vport_update_params update_params;
+       int i, rc;
+
+       /* Prepare and send the vport rx_mode change */
+       OSAL_MEMSET(&update_params, 0, sizeof(update_params));
+       update_params.vport_id = vport;
+       update_params.accept_flags = accept_flags;
+       update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+       update_params.accept_any_vlan = accept_any_vlan;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+               if (IS_VF(p_dev)) {
+                       rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
+                       if (rc != ECORE_SUCCESS)
+                               return rc;
+                       continue;
+               }
+
+               rc = ecore_sp_vport_update(p_hwfn, &update_params,
+                                          comp_mode, p_comp_data);
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
+                       return rc;
+               }
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+                          accept_flags.rx_accept_filter,
+                          accept_flags.tx_accept_filter);
+
+               if (update_accept_any_vlan)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                                  "accept_any_vlan=%d configured\n",
+                                  accept_any_vlan);
+       }
+
+       return 0;
+}
+
+static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_hw_cid_data *p_cid_data)
+{
+       if (!p_cid_data->b_cid_allocated)
+               return;
+
+       ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
+       p_cid_data->b_cid_allocated = false;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+                             u16 opaque_fid,
+                             u32 cid,
+                             u16 rx_queue_id,
+                             u8 vport_id,
+                             u8 stats_id,
+                             u16 sb,
+                             u8 sb_index,
+                             u16 bd_max_bytes,
+                             dma_addr_t bd_chain_phys_addr,
+                             dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+{
+       struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u16 abs_rx_q_id = 0;
+       u8 abs_vport_id = 0;
+
+       /* Store information for the stop */
+       p_rx_cid->cid = cid;
+       p_rx_cid->opaque_fid = opaque_fid;
+       p_rx_cid->vport_id = vport_id;
+
+       rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, 
sb_id=0x%x\n",
+                  opaque_fid, cid, rx_queue_id, vport_id, sb);
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = cid;
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_RX_QUEUE_START,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+       p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
+       p_ramrod->sb_index = sb_index;
+       p_ramrod->vport_id = abs_vport_id;
+       p_ramrod->stats_counter_id = stats_id;
+       p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+       p_ramrod->complete_cqe_flg = 0;
+       p_ramrod->complete_event_flg = 1;
+
+       p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
+       DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+       p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
+       DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+                                                u16 opaque_fid,
+                                                u8 rx_queue_id,
+                                                u8 vport_id,
+                                                u8 stats_id,
+                                                u16 sb,
+                                                u8 sb_index,
+                                                u16 bd_max_bytes,
+                                                dma_addr_t bd_chain_phys_addr,
+                                                dma_addr_t cqe_pbl_addr,
+                                                u16 cqe_pbl_size,
+                                                void OSAL_IOMEM **pp_prod)
+{
+       struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       u8 abs_stats_id = 0;
+       u16 abs_l2_queue = 0;
+       enum _ecore_status_t rc;
+       u64 init_prod_val = 0;
+
+       if (IS_VF(p_hwfn->p_dev)) {
+               return ecore_vf_pf_rxq_start(p_hwfn,
+                                            rx_queue_id,
+                                            sb,
+                                            sb_index,
+                                            bd_max_bytes,
+                                            bd_chain_phys_addr,
+                                            cqe_pbl_addr,
+                                            cqe_pbl_size, pp_prod);
+       }
+
+       rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       *pp_prod = (u8 OSAL_IOMEM *) p_hwfn->regview +
+           GTT_BAR0_MAP_REG_MSDM_RAM + MSTORM_PRODS_OFFSET(abs_l2_queue);
+
+       /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+       __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+                         (u32 *) (&init_prod_val));
+
+       /* Allocate a CID for the queue */
+       rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+               return rc;
+       }
+       p_rx_cid->b_cid_allocated = true;
+
+       rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
+                                          opaque_fid,
+                                          p_rx_cid->cid,
+                                          rx_queue_id,
+                                          vport_id,
+                                          abs_stats_id,
+                                          sb,
+                                          sb_index,
+                                          bd_max_bytes,
+                                          bd_chain_phys_addr,
+                                          cqe_pbl_addr, cqe_pbl_size);
+
+       if (rc != ECORE_SUCCESS)
+               ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+                             u16 rx_queue_id,
+                             u8 num_rxqs,
+                             u8 complete_cqe_flg,
+                             u8 complete_event_flg,
+                             enum spq_mode comp_mode,
+                             struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       struct ecore_hw_cid_data *p_rx_cid;
+       u16 qid, abs_rx_q_id = 0;
+       u8 i;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_rxqs_update(p_hwfn,
+                                              rx_queue_id,
+                                              num_rxqs,
+                                              complete_cqe_flg,
+                                              complete_event_flg);
+
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       for (i = 0; i < num_rxqs; i++) {
+               qid = rx_queue_id + i;
+               p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+               /* Get SPQ entry */
+               init_data.cid = p_rx_cid->cid;
+               init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+               rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                          ETH_RAMROD_RX_QUEUE_UPDATE,
+                                          PROTOCOLID_ETH, &init_data);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+
+               p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+               ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+               ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+               p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+               p_ramrod->complete_cqe_flg = complete_cqe_flg;
+               p_ramrod->complete_event_flg = complete_event_flg;
+
+               rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+               if (rc)
+                       return rc;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+                          u16 rx_queue_id,
+                          bool eq_completion_only, bool cqe_completion)
+{
+       struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u16 abs_rx_q_id = 0;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
+                                           cqe_completion);
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_rx_cid->cid;
+       init_data.opaque_fid = p_rx_cid->opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_RX_QUEUE_STOP,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.rx_queue_stop;
+
+       ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+       ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+       p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+       /* Cleaning the queue requires the completion to arrive there.
+        * In addition, VFs require the answer to come as eqe to PF.
+        */
+       p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
+                                         p_hwfn->hw_info.opaque_fid) &&
+                                     !eq_completion_only) || cqe_completion;
+       p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
+                                        p_hwfn->hw_info.opaque_fid) ||
+           eq_completion_only;
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+                             u16 opaque_fid,
+                             u16 tx_queue_id,
+                             u32 cid,
+                             u8 vport_id,
+                             u8 stats_id,
+                             u16 sb,
+                             u8 sb_index,
+                             dma_addr_t pbl_addr,
+                             u16 pbl_size,
+                             union ecore_qm_pq_params *p_pq_params)
+{
+       struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u16 pq_id, abs_tx_q_id = 0;
+       u8 abs_vport_id;
+
+       /* Store information for the stop */
+       p_tx_cid->cid = cid;
+       p_tx_cid->opaque_fid = opaque_fid;
+
+       rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = cid;
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_TX_QUEUE_START,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.tx_queue_start;
+       p_ramrod->vport_id = abs_vport_id;
+
+       p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
+       p_ramrod->sb_index = sb_index;
+       p_ramrod->stats_counter_id = stats_id;
+
+       p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+
+       p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
+       p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
+       p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+
+       pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
+       p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+                                                u16 opaque_fid,
+                                                u16 tx_queue_id,
+                                                u8 vport_id,
+                                                u8 stats_id,
+                                                u16 sb,
+                                                u8 sb_index,
+                                                dma_addr_t pbl_addr,
+                                                u16 pbl_size,
+                                                void OSAL_IOMEM **pp_doorbell)
+{
+       struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       union ecore_qm_pq_params pq_params;
+       enum _ecore_status_t rc;
+       u8 abs_stats_id = 0;
+
+       if (IS_VF(p_hwfn->p_dev)) {
+               return ecore_vf_pf_txq_start(p_hwfn,
+                                            tx_queue_id,
+                                            sb,
+                                            sb_index,
+                                            pbl_addr, pbl_size, pp_doorbell);
+       }
+
+       rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
+       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+
+       /* Allocate a CID for the queue */
+       rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+               return rc;
+       }
+       p_tx_cid->b_cid_allocated = true;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, 
sb_id=0x%x\n",
+                  opaque_fid, p_tx_cid->cid, tx_queue_id, vport_id, sb);
+
+       /* TODO - set tc in the pq_params for multi-cos */
+       rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
+                                          opaque_fid,
+                                          tx_queue_id,
+                                          p_tx_cid->cid,
+                                          vport_id,
+                                          abs_stats_id,
+                                          sb,
+                                          sb_index,
+                                          pbl_addr, pbl_size, &pq_params);
+
+       *pp_doorbell = (u8 OSAL_IOMEM *) p_hwfn->doorbells +
+           DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+
+       if (rc != ECORE_SUCCESS)
+               ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
+{
+       return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+                                               u16 tx_queue_id)
+{
+       struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       struct tx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = p_tx_cid->cid;
+       init_data.opaque_fid = p_tx_cid->opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_TX_QUEUE_STOP,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.tx_queue_stop;
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+       return rc;
+}
+
+static enum eth_filter_action
+ecore_filter_action(enum ecore_filter_opcode opcode)
+{
+       enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+       switch (opcode) {
+       case ECORE_FILTER_ADD:
+               action = ETH_FILTER_ACTION_ADD;
+               break;
+       case ECORE_FILTER_REMOVE:
+               action = ETH_FILTER_ACTION_REMOVE;
+               break;
+       case ECORE_FILTER_FLUSH:
+               action = ETH_FILTER_ACTION_REMOVE_ALL;
+               break;
+       default:
+               action = MAX_ETH_FILTER_ACTION;
+       }
+
+       return action;
+}
+
+static void ecore_set_fw_mac_addr(__le16 *fw_msb,
+                                 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+       ((u8 *) fw_msb)[0] = mac[1];
+       ((u8 *) fw_msb)[1] = mac[0];
+       ((u8 *) fw_mid)[0] = mac[3];
+       ((u8 *) fw_mid)[1] = mac[2];
+       ((u8 *) fw_lsb)[0] = mac[5];
+       ((u8 *) fw_lsb)[1] = mac[4];
+}
+
+static enum _ecore_status_t
+ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct ecore_filter_ucast *p_filter_cmd,
+                         struct vport_filter_update_ramrod_data **pp_ramrod,
+                         struct ecore_spq_entry **pp_ent,
+                         enum spq_mode comp_mode,
+                         struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct vport_filter_update_ramrod_data *p_ramrod;
+       u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+       struct eth_filter_cmd *p_first_filter;
+       struct eth_filter_cmd *p_second_filter;
+       struct ecore_sp_init_data init_data;
+       enum eth_filter_action action;
+       enum _ecore_status_t rc;
+
+       rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+                           &vport_to_remove_from);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+                           &vport_to_add_to);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = ecore_sp_init_request(p_hwfn, pp_ent,
+                                  ETH_RAMROD_FILTERS_UPDATE,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+       p_ramrod = *pp_ramrod;
+       p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+       p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "Non-Asic - prevent Tx filters\n");
+               p_ramrod->filter_cmd_hdr.tx = 0;
+       }
+#endif
+
+       switch (p_filter_cmd->opcode) {
+       case ECORE_FILTER_REPLACE:
+       case ECORE_FILTER_MOVE:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
+               break;
+       default:
+               p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
+               break;
+       }
+
+       p_first_filter = &p_ramrod->filter_cmds[0];
+       p_second_filter = &p_ramrod->filter_cmds[1];
+
+       switch (p_filter_cmd->type) {
+       case ECORE_FILTER_MAC:
+               p_first_filter->type = ETH_FILTER_TYPE_MAC;
+               break;
+       case ECORE_FILTER_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_VLAN;
+               break;
+       case ECORE_FILTER_MAC_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_PAIR;
+               break;
+       case ECORE_FILTER_INNER_MAC:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
+               break;
+       case ECORE_FILTER_INNER_VLAN:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
+               break;
+       case ECORE_FILTER_INNER_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
+               break;
+       case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+               break;
+       case ECORE_FILTER_MAC_VNI_PAIR:
+               p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
+               break;
+       case ECORE_FILTER_VNI:
+               p_first_filter->type = ETH_FILTER_TYPE_VNI;
+               break;
+       }
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
+               ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
+                                     &p_first_filter->mac_mid,
+                                     &p_first_filter->mac_lsb,
+                                     (u8 *) p_filter_cmd->mac);
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+               p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
+
+       if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+           (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+               p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
+
+       if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
+               p_second_filter->type = p_first_filter->type;
+               p_second_filter->mac_msb = p_first_filter->mac_msb;
+               p_second_filter->mac_mid = p_first_filter->mac_mid;
+               p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+               p_second_filter->vlan_id = p_first_filter->vlan_id;
+               p_second_filter->vni = p_first_filter->vni;
+
+               p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+               p_first_filter->vport_id = vport_to_remove_from;
+
+               p_second_filter->action = ETH_FILTER_ACTION_ADD;
+               p_second_filter->vport_id = vport_to_add_to;
+       } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
+               p_first_filter->vport_id = vport_to_add_to;
+               OSAL_MEMCPY(p_second_filter, p_first_filter,
+                           sizeof(*p_second_filter));
+               p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+               p_second_filter->action = ETH_FILTER_ACTION_ADD;
+       } else {
+               action = ecore_filter_action(p_filter_cmd->opcode);
+
+               if (action == MAX_ETH_FILTER_ACTION) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "%d is not suppported yet\n",
+                                 p_filter_cmd->opcode);
+                       return ECORE_NOTIMPL;
+               }
+
+               p_first_filter->action = action;
+               p_first_filter->vport_id =
+                   (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+                   vport_to_remove_from : vport_to_add_to;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct ecore_filter_ucast *p_filter_cmd,
+                         enum spq_mode comp_mode,
+                         struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct eth_filter_cmd_header *p_header;
+       enum _ecore_status_t rc;
+
+       rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+                                      &p_ramrod, &p_ent,
+                                      comp_mode, p_comp_data);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+               return rc;
+       }
+       p_header = &p_ramrod->filter_cmd_hdr;
+       p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
+               return rc;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "Unicast filter configured, opcode = %s, type = %s, cmd_cnt 
= %d, is_rx_filter = %d, is_tx_filter = %d\n",
+                  (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
+                  ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
+                   "REMOVE" :
+                   ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
+                    "MOVE" : "REPLACE")),
+                  (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
+                  ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
+                   "VLAN" : "MAC & VLAN"),
+                  p_ramrod->filter_cmd_hdr.cmd_cnt,
+                  p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "vport_to_add_to = %d, vport_to_remove_from = %d, mac = 
%2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+                  p_filter_cmd->vport_to_add_to,
+                  p_filter_cmd->vport_to_remove_from,
+                  p_filter_cmd->mac[0], p_filter_cmd->mac[1],
+                  p_filter_cmd->mac[2], p_filter_cmd->mac[3],
+                  p_filter_cmd->mac[4], p_filter_cmd->mac[5],
+                  p_filter_cmd->vlan);
+
+       return ECORE_SUCCESS;
+}
+
+/*******************************************************************************
+ * Description:
+ *         Calculates crc 32 on a buffer
+ *         Note: crc32_length MUST be aligned to 8
+ * Return:
+ 
******************************************************************************/
+static u32 ecore_calc_crc32c(u8 *crc32_packet,
+                            u32 crc32_length, u32 crc32_seed, u8 complement)
+{
+       u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+       u8 msb = 0, current_byte = 0;
+
+       if ((crc32_packet == OSAL_NULL) ||
+           (crc32_length == 0) || ((crc32_length % 8) != 0)) {
+               return crc32_result;
+       }
+
+       for (byte = 0; byte < crc32_length; byte++) {
+               current_byte = crc32_packet[byte];
+               for (bit = 0; bit < 8; bit++) {
+                       msb = (u8) (crc32_result >> 31);
+                       crc32_result = crc32_result << 1;
+                       if (msb != (0x1 & (current_byte >> bit))) {
+                               crc32_result = crc32_result ^ CRC32_POLY;
+                               crc32_result |= 1;
+                       }
+               }
+       }
+
+       return crc32_result;
+}
+
+static OSAL_INLINE u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+{
+       u32 packet_buf[2] = { 0 };
+
+       OSAL_MEMCPY((u8 *) (&packet_buf[0]), &mac[0], 6);
+       return ecore_calc_crc32c((u8 *) packet_buf, 8, seed, 0);
+}
+
+u8 ecore_mcast_bin_from_mac(u8 *mac)
+{
+       u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+                                 mac, ETH_ALEN);
+
+       return crc & 0xff;
+}
+
+static enum _ecore_status_t
+ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct ecore_filter_mcast *p_filter_cmd,
+                         enum spq_mode comp_mode,
+                         struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+       long unsigned bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+       enum _ecore_status_t rc;
+       u8 abs_vport_id = 0;
+       int i;
+
+       rc = ecore_fw_vport(p_hwfn,
+                           (p_filter_cmd->opcode == ECORE_FILTER_ADD) ?
+                           p_filter_cmd->vport_to_add_to :
+                           p_filter_cmd->vport_to_remove_from, &abs_vport_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  ETH_RAMROD_VPORT_UPDATE,
+                                  PROTOCOLID_ETH, &init_data);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+               return rc;
+       }
+
+       p_ramrod = &p_ent->ramrod.vport_update;
+       p_ramrod->common.update_approx_mcast_flg = 1;
+
+       /* explicitly clear out the entire vector */
+       OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
+                   0, sizeof(p_ramrod->approx_mcast.bins));
+       OSAL_MEMSET(bins, 0, sizeof(long unsigned) *
+                   ETH_MULTICAST_MAC_BINS_IN_REGS);
+
+       if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+               /* filter ADD op is explicit set op and it removes
+                *  any existing filters for the vport.
+                */
+               for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+                       u32 bit;
+
+                       bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+                       OSAL_SET_BIT(bit, bins);
+               }
+
+               /* Convert to correct endianity */
+               for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+                       struct vport_update_ramrod_mcast *p_ramrod_bins;
+                       u32 *p_bins = (u32 *) bins;
+
+                       p_ramrod_bins = &p_ramrod->approx_mcast;
+                       p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+               }
+       }
+
+       p_ramrod->common.vport_id = abs_vport_id;
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+                      struct ecore_filter_mcast *p_filter_cmd,
+                      enum spq_mode comp_mode,
+                      struct ecore_spq_comp_cb *p_comp_data)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       int i;
+
+       /* only ADD and REMOVE operations are supported for multi-cast */
+       if ((ECORE_FILTER_ADD != p_filter_cmd->opcode &&
+            (ECORE_FILTER_REMOVE != p_filter_cmd->opcode)) ||
+           (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
+               return ECORE_INVAL;
+       }
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               if (IS_VF(p_dev)) {
+                       ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+                       continue;
+               }
+
+               rc = ecore_sp_eth_filter_mcast(p_hwfn,
+                                              p_hwfn->hw_info.opaque_fid,
+                                              p_filter_cmd,
+                                              comp_mode, p_comp_data);
+               if (rc != ECORE_SUCCESS)
+                       break;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+                      struct ecore_filter_ucast *p_filter_cmd,
+                      enum spq_mode comp_mode,
+                      struct ecore_spq_comp_cb *p_comp_data)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               if (IS_VF(p_dev)) {
+                       rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+                       continue;
+               }
+
+               rc = ecore_sp_eth_filter_ucast(p_hwfn,
+                                              p_hwfn->hw_info.opaque_fid,
+                                              p_filter_cmd,
+                                              comp_mode, p_comp_data);
+               if (rc != ECORE_SUCCESS)
+                       break;
+       }
+
+       return rc;
+}
+
+/* IOV related */
+enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+                                      u32 concrete_vfid, u16 opaque_vfid)
+{
+       struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_vfid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_VF_START,
+                                  PROTOCOLID_COMMON, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vf_start;
+
+       p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+       p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(opaque_vfid);
+
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ETH:
+               p_ramrod->personality = PERSONALITY_ETH;
+               break;
+       case ECORE_PCI_ETH_ROCE:
+               p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unkown VF personality %d\n",
+                         p_hwfn->hw_info.personality);
+               return ECORE_INVAL;
+       }
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn)
+{
+       return ECORE_NOTIMPL;
+}
+
+enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+                                     u32 concrete_vfid, u16 opaque_vfid)
+{
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_vfid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_VF_STOP,
+                                  PROTOCOLID_COMMON, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vf_stop;
+
+       p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Statistics related code */
+static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
+                                            u32 *p_addr, u32 *p_len,
+                                            u16 statistics_bin)
+{
+       if (IS_PF(p_hwfn->p_dev)) {
+               *p_addr = BAR0_MAP_REG_PSDM_RAM +
+                   PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+       } else {
+               struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+               *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+       }
+}
+
+static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_eth_stats *p_stats,
+                                    u16 statistics_bin)
+{
+       struct eth_pstorm_per_queue_stat pstats;
+       u32 pstats_addr = 0, pstats_len = 0;
+
+       __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+                                        statistics_bin);
+
+       OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+       ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+       p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+       p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+       p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+       p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+       p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+       p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+       p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_eth_stats *p_stats,
+                                    u16 statistics_bin)
+{
+       struct tstorm_per_port_stat tstats;
+       u32 tstats_addr, tstats_len;
+
+       if (IS_PF(p_hwfn->p_dev)) {
+               tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+                   TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+               tstats_len = sizeof(struct tstorm_per_port_stat);
+       } else {
+               struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+               tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+       }
+
+       OSAL_MEMSET(&tstats, 0, sizeof(tstats));
+       ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+       p_stats->mftag_filter_discards +=
+           HILO_64_REGPAIR(tstats.mftag_filter_discard);
+       p_stats->mac_filter_discards +=
+           HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+}
+
+static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
+                                            u32 *p_addr, u32 *p_len,
+                                            u16 statistics_bin)
+{
+       if (IS_PF(p_hwfn->p_dev)) {
+               *p_addr = BAR0_MAP_REG_USDM_RAM +
+                   USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+       } else {
+               struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+               *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+       }
+}
+
+static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_eth_stats *p_stats,
+                                    u16 statistics_bin)
+{
+       struct eth_ustorm_per_queue_stat ustats;
+       u32 ustats_addr = 0, ustats_len = 0;
+
+       __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+                                        statistics_bin);
+
+       OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+       ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+       p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+       p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+       p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+       p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+       p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+       p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
+                                            u32 *p_addr, u32 *p_len,
+                                            u16 statistics_bin)
+{
+       if (IS_PF(p_hwfn->p_dev)) {
+               *p_addr = BAR0_MAP_REG_MSDM_RAM +
+                   MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+               *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+       } else {
+               struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+               struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+               *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+               *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+       }
+}
+
+static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_eth_stats *p_stats,
+                                    u16 statistics_bin)
+{
+       struct eth_mstorm_per_queue_stat mstats;
+       u32 mstats_addr = 0, mstats_len = 0;
+
+       __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+                                        statistics_bin);
+
+       OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+       ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+       p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
+       p_stats->packet_too_big_discard +=
+           HILO_64_REGPAIR(mstats.packet_too_big_discard);
+       p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+       p_stats->tpa_coalesced_pkts +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+       p_stats->tpa_coalesced_events +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+       p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
+       p_stats->tpa_coalesced_bytes +=
+           HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_eth_stats *p_stats)
+{
+       struct port_stats port_stats;
+       int j;
+
+       OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
+
+       ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
+                         p_hwfn->mcp_info->port_addr +
+                         OFFSETOF(struct public_port, stats),
+                         sizeof(port_stats));
+
+       p_stats->rx_64_byte_packets += port_stats.pmm.r64;
+       p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
+       p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
+       p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
+       p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
+       p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
+       p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
+       p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
+       p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
+       p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
+       p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
+       p_stats->rx_crc_errors += port_stats.pmm.rfcs;
+       p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
+       p_stats->rx_pause_frames += port_stats.pmm.rxpf;
+       p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
+       p_stats->rx_align_errors += port_stats.pmm.raln;
+       p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
+       p_stats->rx_oversize_packets += port_stats.pmm.rovr;
+       p_stats->rx_jabbers += port_stats.pmm.rjbr;
+       p_stats->rx_undersize_packets += port_stats.pmm.rund;
+       p_stats->rx_fragments += port_stats.pmm.rfrg;
+       p_stats->tx_64_byte_packets += port_stats.pmm.t64;
+       p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
+       p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
+       p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
+       p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
+       p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
+       p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
+       p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
+       p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
+       p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
+       p_stats->tx_pause_frames += port_stats.pmm.txpf;
+       p_stats->tx_pfc_frames += port_stats.pmm.txpp;
+       p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
+       p_stats->tx_total_collisions += port_stats.pmm.tncl;
+       p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
+       p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
+       p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
+       p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
+       p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
+       p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
+       p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
+       p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
+       p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
+       p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+       for (j = 0; j < 8; j++) {
+               p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
+               p_stats->brb_discards += port_stats.brb.brb_discard[j];
+       }
+}
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            struct ecore_eth_stats *stats,
+                            u16 statistics_bin, bool b_get_port_stats)
+{
+       __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+       __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+       __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+       __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+#ifndef ASIC_ONLY
+       /* Avoid getting PORT stats for emulation. */
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               return;
+#endif
+
+       if (b_get_port_stats && p_hwfn->mcp_info)
+               __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
+                                  struct ecore_eth_stats *stats)
+{
+       u8 fw_vport = 0;
+       int i;
+
+       OSAL_MEMSET(stats, 0, sizeof(*stats));
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+                   ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+
+               if (IS_PF(p_dev)) {
+                       /* The main vport index is relative first */
+                       if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
+                               DP_ERR(p_hwfn, "No vport available!\n");
+                               goto out;
+                       }
+               }
+
+               if (IS_PF(p_dev) && !p_ptt) {
+                       DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+                       continue;
+               }
+
+               __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+                                       IS_PF(p_dev) ? true : false);
+
+out:
+               if (IS_PF(p_dev))
+                       ecore_ptt_release(p_hwfn, p_ptt);
+       }
+}
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+                          struct ecore_eth_stats *stats)
+{
+       u32 i;
+
+       if (!p_dev) {
+               OSAL_MEMSET(stats, 0, sizeof(*stats));
+               return;
+       }
+
+       _ecore_get_vport_stats(p_dev, stats);
+
+       if (!p_dev->reset_stats)
+               return;
+
+       /* Reduce the statistics baseline */
+       for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
+               ((u64 *) stats)[i] -= ((u64 *) p_dev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void ecore_reset_vport_stats(struct ecore_dev *p_dev)
+{
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+               struct eth_mstorm_per_queue_stat mstats;
+               struct eth_ustorm_per_queue_stat ustats;
+               struct eth_pstorm_per_queue_stat pstats;
+               struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
+                   ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+               u32 addr = 0, len = 0;
+
+               if (IS_PF(p_dev) && !p_ptt) {
+                       DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+                       continue;
+               }
+
+               OSAL_MEMSET(&mstats, 0, sizeof(mstats));
+               __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+               ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+               OSAL_MEMSET(&ustats, 0, sizeof(ustats));
+               __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+               ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+               OSAL_MEMSET(&pstats, 0, sizeof(pstats));
+               __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+               ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+               if (IS_PF(p_dev))
+                       ecore_ptt_release(p_hwfn, p_ptt);
+       }
+
+       /* PORT statistics are not necessarily reset, so we need to
+        * read and create a baseline for future statistics.
+        */
+       if (!p_dev->reset_stats)
+               DP_INFO(p_dev, "Reset stats not allocated\n");
+       else
+               _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+}
diff --git a/drivers/net/qede/ecore/ecore_l2.h 
b/drivers/net/qede/ecore/ecore_l2.h
new file mode 100644
index 0000000..b0850ca
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_l2.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_H__
+#define __ECORE_L2_H__
+
+#include "ecore.h"
+#include "ecore_hw.h"
+#include "ecore_spq.h"
+#include "ecore_l2_api.h"
+
+/**
+ * @brief ecore_sp_vf_start -  VF Function Start
+ *
+ * This ramrod is sent to initialize a virtual function (VF) is loaded.
+ * It will configure the function related parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param concrete_vfid                                VF ID
+ * @param opaque_vfid
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+                                      u32 concrete_vfid, u16 opaque_vfid);
+
+/**
+ * @brief ecore_sp_vf_update - VF Function Update Ramrod
+ *
+ * This ramrod performs updates of a virtual function (VF).
+ * It currently contains no functionality.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_vf_stop - VF Function Stop Ramrod
+ *
+ * This ramrod is sent to unload a virtual function (VF).
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param concrete_vfid
+ * @param opaque_vfid
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+                                     u32 concrete_vfid, u16 opaque_vfid);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_update -
+ *
+ * This ramrod updates a TX queue. It is used for setting the active
+ * state of the queue.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t
+ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
+                        struct ecore_sp_vport_start_params *p_params);
+
+/**
+ * @brief - Starts an Rx queue; Should be used where contexts are handled
+ * outside of the ramrod area [specifically iov scenarios]
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param cid
+ * @param rx_queue_id
+ * @param vport_id
+ * @param stats_id
+ * @param sb
+ * @param sb_index
+ * @param bd_max_bytes
+ * @param bd_chain_phys_addr
+ * @param cqe_pbl_addr
+ * @param cqe_pbl_size
+ * @param leading
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+                             u16 opaque_fid,
+                             u32 cid,
+                             u16 rx_queue_id,
+                             u8 vport_id,
+                             u8 stats_id,
+                             u16 sb,
+                             u8 sb_index,
+                             u16 bd_max_bytes,
+                             dma_addr_t bd_chain_phys_addr,
+                             dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+/**
+ * @brief - Starts a Tx queue; Should be used where contexts are handled
+ * outside of the ramrod area [specifically iov scenarios]
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param tx_queue_id
+ * @param cid
+ * @param vport_id
+ * @param stats_id
+ * @param sb
+ * @param sb_index
+ * @param pbl_addr
+ * @param pbl_size
+ * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+                             u16 opaque_fid,
+                             u16 tx_queue_id,
+                             u32 cid,
+                             u8 vport_id,
+                             u8 stats_id,
+                             u16 sb,
+                             u8 sb_index,
+                             dma_addr_t pbl_addr,
+                             u16 pbl_size,
+                             union ecore_qm_pq_params *p_pq_params);
+
+u8 ecore_mcast_bin_from_mac(u8 *mac);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_l2_api.h 
b/drivers/net/qede/ecore/ecore_l2_api.h
new file mode 100644
index 0000000..1e01b57
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_l2_api.h
@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_L2_API_H__
+#define __ECORE_L2_API_H__
+
+#include "ecore_status.h"
+#include "ecore_sp_api.h"
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_rss_caps {
+       ECORE_RSS_IPV4 = 0x1,
+       ECORE_RSS_IPV6 = 0x2,
+       ECORE_RSS_IPV4_TCP = 0x4,
+       ECORE_RSS_IPV6_TCP = 0x8,
+       ECORE_RSS_IPV4_UDP = 0x10,
+       ECORE_RSS_IPV6_UDP = 0x20,
+};
+
+/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
+#define ECORE_RSS_IND_TABLE_SIZE 128
+#define ECORE_RSS_KEY_SIZE 10  /* size in 32b chunks */
+#endif
+
+struct ecore_rss_params {
+       u8 update_rss_config;
+       u8 rss_enable;
+       u8 rss_eng_id;
+       u8 update_rss_capabilities;
+       u8 update_rss_ind_table;
+       u8 update_rss_key;
+       u8 rss_caps;
+       u8 rss_table_size_log;  /* The table size is 2 ^ rss_table_size_log */
+       u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+       u32 rss_key[ECORE_RSS_KEY_SIZE];
+};
+
+struct ecore_sge_tpa_params {
+       u8 max_buffers_per_cqe;
+
+       u8 update_tpa_en_flg;
+       u8 tpa_ipv4_en_flg;
+       u8 tpa_ipv6_en_flg;
+       u8 tpa_ipv4_tunn_en_flg;
+       u8 tpa_ipv6_tunn_en_flg;
+
+       u8 update_tpa_param_flg;
+       u8 tpa_pkt_split_flg;
+       u8 tpa_hdr_data_split_flg;
+       u8 tpa_gro_consistent_flg;
+       u8 tpa_max_aggs_num;
+       u16 tpa_max_size;
+       u16 tpa_min_size_to_start;
+       u16 tpa_min_size_to_cont;
+};
+
+enum ecore_filter_opcode {
+       ECORE_FILTER_ADD,
+       ECORE_FILTER_REMOVE,
+       ECORE_FILTER_MOVE,
+       ECORE_FILTER_REPLACE,   /* Delete all MACs and add new one instead */
+       ECORE_FILTER_FLUSH,     /* Removes all filters */
+};
+
+enum ecore_filter_ucast_type {
+       ECORE_FILTER_MAC,
+       ECORE_FILTER_VLAN,
+       ECORE_FILTER_MAC_VLAN,
+       ECORE_FILTER_INNER_MAC,
+       ECORE_FILTER_INNER_VLAN,
+       ECORE_FILTER_INNER_PAIR,
+       ECORE_FILTER_INNER_MAC_VNI_PAIR,
+       ECORE_FILTER_MAC_VNI_PAIR,
+       ECORE_FILTER_VNI,
+};
+
+struct ecore_filter_ucast {
+       enum ecore_filter_opcode opcode;
+       enum ecore_filter_ucast_type type;
+       u8 is_rx_filter;
+       u8 is_tx_filter;
+       u8 vport_to_add_to;
+       u8 vport_to_remove_from;
+       unsigned char mac[ETH_ALEN];
+       u8 assert_on_error;
+       u16 vlan;
+       u32 vni;
+};
+
+struct ecore_filter_mcast {
+       /* MOVE is not supported for multicast */
+       enum ecore_filter_opcode opcode;
+       u8 vport_to_add_to;
+       u8 vport_to_remove_from;
+       u8 num_mc_addrs;
+#define ECORE_MAX_MC_ADDRS     64
+       unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+struct ecore_filter_accept_flags {
+       u8 update_rx_mode_config;
+       u8 update_tx_mode_config;
+       u8 rx_accept_filter;
+       u8 tx_accept_filter;
+#define        ECORE_ACCEPT_NONE               0x01
+#define ECORE_ACCEPT_UCAST_MATCHED     0x02
+#define ECORE_ACCEPT_UCAST_UNMATCHED   0x04
+#define ECORE_ACCEPT_MCAST_MATCHED     0x08
+#define ECORE_ACCEPT_MCAST_UNMATCHED   0x10
+#define ECORE_ACCEPT_BCAST             0x20
+};
+
+/* Add / remove / move / remove-all unicast MAC-VLAN filters.
+ * FW will assert in the following cases, so driver should take care...:
+ * 1. Adding a filter to a full table.
+ * 2. Adding a filter which already exists on that vport.
+ * 3. Removing a filter which doesn't exist.
+ */
+
+enum _ecore_status_t
+ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
+                      struct ecore_filter_ucast *p_filter_cmd,
+                      enum spq_mode comp_mode,
+                      struct ecore_spq_comp_cb *p_comp_data);
+
+/* Add / remove / move multicast MAC filters. */
+enum _ecore_status_t
+ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
+                      struct ecore_filter_mcast *p_filter_cmd,
+                      enum spq_mode comp_mode,
+                      struct ecore_spq_comp_cb *p_comp_data);
+
+/* Set "accept" filters */
+enum _ecore_status_t
+ecore_filter_accept_cmd(struct ecore_dev *p_dev,
+                       u8 vport,
+                       struct ecore_filter_accept_flags accept_flags,
+                       u8 update_accept_any_vlan,
+                       u8 accept_any_vlan,
+                       enum spq_mode comp_mode,
+                       struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ *
+ * This ramrod initializes an RX Queue for a VPort. An Assert is generated if
+ * the VPort ID is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param rx_queue_id          RX Queue ID: Zero based, per VPort, allocated
+ *                             by assignment (=rssId)
+ * @param vport_id             VPort ID
+ * @param u8 stats_id           VPort ID which the queue stats
+ *                             will be added to
+ * @param sb                   Status Block of the Function Event Ring
+ * @param sb_index             Index into the status block of the
+ *                     Function Event Ring
+ * @param bd_max_bytes         Maximum bytes that can be placed on a BD
+ * @param bd_chain_phys_addr   Physical address of BDs for receive.
+ * @param cqe_pbl_addr         Physical address of the CQE PBL Table.
+ * @param cqe_pbl_size         Size of the CQE PBL Table
+ * @param pp_prod              Pointer to place producer's
+ *                              address for the Rx Q (May be
+ *                             NULL).
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+                                                u16 opaque_fid,
+                                                u8 rx_queue_id,
+                                                u8 vport_id,
+                                                u8 stats_id,
+                                                u16 sb,
+                                                u8 sb_index,
+                                                u16 bd_max_bytes,
+                                                dma_addr_t bd_chain_phys_addr,
+                                                dma_addr_t cqe_pbl_addr,
+                                                u16 cqe_pbl_size,
+                                                void OSAL_IOMEM **pp_prod);
+
+/**
+ * @brief ecore_sp_eth_rx_queue_stop -
+ *
+ * This ramrod closes an RX queue. It sends RX queue stop ramrod
+ * + CFC delete ramrod
+ *
+ * @param p_hwfn
+ * @param rx_queue_id          RX Queue ID
+ * @param eq_completion_only   If True completion will be on
+ *                             EQe, if False completion will be
+ *                             on EQe if p_hwfn opaque
+ *                             different from the RXQ opaque
+ *                             otherwise on CQe.
+ * @param cqe_completion       If True completion will be
+ *                             recieve on CQe.
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+                          u16 rx_queue_id,
+                          bool eq_completion_only, bool cqe_completion);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ *
+ * This ramrod initializes a TX Queue for a VPort. An Assert is generated if
+ * the VPort is not currently initialized.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param tx_queue_id          TX Queue ID
+ * @param vport_id             VPort ID
+ * @param stats_id              VPort ID which the queue stats
+ *                             will be added to
+ * @param sb                   Status Block of the Function Event Ring
+ * @param sb_index             Index into the status block of the Function
+ *                             Event Ring
+ * @param pbl_addr             address of the pbl array
+ * @param pbl_size             number of entries in pbl
+ * @param pp_doorbell          Pointer to place doorbell pointer (May be NULL).
+ *                     This address should be used with the
+ *                             DIRECT_REG_WR macro.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+                                                u16 opaque_fid,
+                                                u16 tx_queue_id,
+                                                u8 vport_id,
+                                                u8 stats_id,
+                                                u16 sb,
+                                                u8 sb_index,
+                                                dma_addr_t pbl_addr,
+                                                u16 pbl_size,
+                                                void OSAL_IOMEM **
+                                                pp_doorbell);
+
+/**
+ * @brief ecore_sp_eth_tx_queue_stop -
+ *
+ * This ramrod closes a TX queue. It sends TX queue stop ramrod
+ * + CFC delete ramrod
+ *
+ * @param p_hwfn
+ * @param tx_queue_id          TX Queue ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+                                               u16 tx_queue_id);
+
+enum ecore_tpa_mode {
+       ECORE_TPA_MODE_NONE,
+       ECORE_TPA_MODE_RSC,
+       ECORE_TPA_MODE_GRO,
+       ECORE_TPA_MODE_MAX
+};
+
+struct ecore_sp_vport_start_params {
+       enum ecore_tpa_mode tpa_mode;
+       bool remove_inner_vlan; /* Inner VLAN removal is enabled */
+       bool tx_switching;      /* Vport supports tx-switching */
+       bool handle_ptp_pkts;   /* Handle PTP packets */
+       bool only_untagged;     /* Untagged pkt control */
+       bool drop_ttl0;         /* Drop packets with TTL = 0 */
+       u8 max_buffers_per_cqe;
+       u32 concrete_fid;
+       u16 opaque_fid;
+       u8 vport_id;            /* VPORT ID */
+       u16 mtu;                /* VPORT MTU */
+       bool zero_placement_offset;
+};
+
+/**
+ * @brief ecore_sp_vport_start -
+ *
+ * This ramrod initializes a VPort. An Assert if generated if the Function ID
+ * of the VPort is not enabled.
+ *
+ * @param p_hwfn
+ * @param p_params             VPORT start params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
+                    struct ecore_sp_vport_start_params *p_params);
+
+struct ecore_sp_vport_update_params {
+       u16 opaque_fid;
+       u8 vport_id;
+       u8 update_vport_active_rx_flg;
+       u8 vport_active_rx_flg;
+       u8 update_vport_active_tx_flg;
+       u8 vport_active_tx_flg;
+       u8 update_inner_vlan_removal_flg;
+       u8 inner_vlan_removal_flg;
+       u8 silent_vlan_removal_flg;
+       u8 update_default_vlan_enable_flg;
+       u8 default_vlan_enable_flg;
+       u8 update_default_vlan_flg;
+       u16 default_vlan;
+       u8 update_tx_switching_flg;
+       u8 tx_switching_flg;
+       u8 update_approx_mcast_flg;
+       u8 update_anti_spoofing_en_flg;
+       u8 anti_spoofing_en;
+       u8 update_accept_any_vlan_flg;
+       u8 accept_any_vlan;
+       unsigned long bins[8];
+       struct ecore_rss_params *rss_params;
+       struct ecore_filter_accept_flags accept_flags;
+       struct ecore_sge_tpa_params *sge_tpa_params;
+};
+
+/**
+ * @brief ecore_sp_vport_update -
+ *
+ * This ramrod updates the parameters of the VPort. Every field can be updated
+ * independently, according to flags.
+ *
+ * This ramrod is also used to set the VPort state to active after creation.
+ * An Assert is generated if the VPort does not contain an RX queue.
+ *
+ * @param p_hwfn
+ * @param p_params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
+                     struct ecore_sp_vport_update_params *p_params,
+                     enum spq_mode comp_mode,
+                     struct ecore_spq_comp_cb *p_comp_data);
+/**
+ * @brief ecore_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
+                                        u16 opaque_fid, u8 vport_id);
+
+enum _ecore_status_t
+ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
+                         u16 opaque_fid,
+                         struct ecore_filter_ucast *p_filter_cmd,
+                         enum spq_mode comp_mode,
+                         struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id          RX Queue ID
+ * @param num_rxqs              Allow to update multiple rx
+ *                             queues, from rx_queue_id to
+ *                             (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg     Post completion to the CQE Ring if set
+ * @param complete_event_flg   Post completion to the Event Ring if set
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
+                             u16 rx_queue_id,
+                             u8 num_rxqs,
+                             u8 complete_cqe_flg,
+                             u8 complete_event_flg,
+                             enum spq_mode comp_mode,
+                             struct ecore_spq_comp_cb *p_comp_data);
+
+void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt,
+                            struct ecore_eth_stats *stats,
+                            u16 statistics_bin, bool b_get_port_stats);
+
+void ecore_get_vport_stats(struct ecore_dev *p_dev,
+                          struct ecore_eth_stats *stats);
+
+void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_mcp.c 
b/drivers/net/qede/ecore/ecore_mcp.c
new file mode 100644
index 0000000..f2206b6
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_mcp.c
@@ -0,0 +1,1952 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "mcp_public.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_sriov.h"
+#include "ecore_iov_api.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_dcbx.h"
+
+#define CHIP_MCP_RESP_ITER_US 10
+#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
+
+#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)  /* Account for 5 sec */
+#define ECORE_MCP_RESET_RETRIES (50 * 1000)    /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+       ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+                _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+       ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+       DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+                    OFFSETOF(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+       DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+                    OFFSETOF(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+       DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+#ifndef ASIC_ONLY
+static int loaded;
+static int loaded_port[MAX_NUM_PORTS] = { 0 };
+#endif
+
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+               return false;
+       return true;
+}
+
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt 
*p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PORT);
+       u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+
+       p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+                                                  MFW_PORT(p_hwfn));
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "port_addr = 0x%x, port_id 0x%02x\n",
+                  p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+       OSAL_BE32 tmp;
+       u32 i;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
+               return;
+#endif
+
+       if (!p_hwfn->mcp_info->public_base)
+               return;
+
+       for (i = 0; i < length; i++) {
+               tmp = ecore_rd(p_hwfn, p_ptt,
+                              p_hwfn->mcp_info->mfw_mb_addr +
+                              (i << 2) + sizeof(u32));
+
+               ((u32 *) p_hwfn->mcp_info->mfw_mb_cur)[i] =
+                   OSAL_BE32_TO_CPU(tmp);
+       }
+}
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
+{
+       if (p_hwfn->mcp_info) {
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
+               OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+       }
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+       p_hwfn->mcp_info = OSAL_NULL;
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+       u32 drv_mb_offsize, mfw_mb_offsize;
+       u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
+               p_info->public_base = 0;
+               return ECORE_INVAL;
+       }
+#endif
+
+       p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+       if (!p_info->public_base)
+               return ECORE_INVAL;
+
+       p_info->public_base |= GRCBASE_MCP;
+
+       /* Calculate the driver and MFW mailbox address */
+       drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                      PUBLIC_DRV_MB));
+       p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 
0x%x\n",
+                  drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+       /* Set the MFW MB address */
+       mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                      PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16) ecore_rd(p_hwfn, p_ptt,
+                                              p_info->mfw_mb_addr);
+
+       /* Get the current driver mailbox sequence before sending
+        * the first command
+        */
+       p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+           DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Get current FW pulse sequence */
+       p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+           DRV_PULSE_SEQ_MASK;
+
+       p_info->mcp_hist = (u16) ecore_rd(p_hwfn, p_ptt,
+                                         MISCS_REG_GENERIC_POR_0);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_info *p_info;
+       u32 size;
+
+       /* Allocate mcp_info structure */
+       p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+                                      sizeof(*p_hwfn->mcp_info));
+       if (!p_hwfn->mcp_info)
+               goto err;
+       p_info = p_hwfn->mcp_info;
+
+       if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
+               /* Do not free mcp_info here, since public_base indicate that
+                * the MCP is not initialized
+                */
+               return ECORE_SUCCESS;
+       }
+
+       size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+       p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+       p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+       if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
+               goto err;
+
+       /* Initialize the MFW spinlock */
+       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
+       OSAL_SPIN_LOCK_INIT(&p_info->lock);
+
+       return ECORE_SUCCESS;
+
+err:
+       DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+       ecore_mcp_free(p_hwfn);
+       return ECORE_NOMEM;
+
+}
+
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
+       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 org_mcp_reset_seq, cnt = 0;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+       OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+       /* Set drv command along with the updated sequence */
+       org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+
+       do {
+               /* Wait for MFW response */
+               OSAL_UDELAY(delay);
+               /* Give the FW up to 500 second (50*1000*10usec) */
+       } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
+                                               MISCS_REG_GENERIC_POR_0)) &&
+                (cnt++ < ECORE_MCP_RESET_RETRIES));
+
+       if (org_mcp_reset_seq !=
+           ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                          "MCP was reset after %d usec\n", cnt * delay);
+       } else {
+               DP_ERR(p_hwfn, "Failed to reset MCP\n");
+               rc = ECORE_AGAIN;
+       }
+
+       OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+
+       return rc;
+}
+
+/* Should be called while the dedicated spinlock is acquired */
+static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 cmd, u32 param,
+                                            u32 *o_mcp_resp,
+                                            u32 *o_mcp_param)
+{
+       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 seq, cnt = 1, actual_mb_seq;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               delay = EMUL_MCP_RESP_ITER_US;
+#endif
+
+       /* Get actual driver mailbox sequence */
+       actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+           DRV_MSG_SEQ_NUMBER_MASK;
+
+       /* Use MCP history register to check if MCP reset occurred between
+        * init time and now.
+        */
+       if (p_hwfn->mcp_info->mcp_hist !=
+           ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
+               ecore_load_mcp_offsets(p_hwfn, p_ptt);
+               ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+       }
+       seq = ++p_hwfn->mcp_info->drv_mb_seq;
+
+       /* Set drv param */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+
+       /* Set drv command along with the updated sequence */
+       DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "wrote command (%x) to MFW MB param 0x%08x\n",
+                  (cmd | seq), param);
+
+       do {
+               /* Wait for MFW response */
+               OSAL_UDELAY(delay);
+               *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+
+               /* Give the FW up to 5 second (500*10ms) */
+       } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
+                (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+                  cnt * delay, *o_mcp_resp, seq);
+
+       /* Is this a reply to our command? */
+       if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
+               *o_mcp_resp &= FW_MSG_CODE_MASK;
+               /* Get the MCP param */
+               *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+       } else {
+               /* FW BUG! */
+               DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
+                      cmd, param);
+               *o_mcp_resp = 0;
+               rc = ECORE_AGAIN;
+               ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+       }
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+                                  u32 *o_mcp_resp, u32 *o_mcp_param)
+{
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+                       loaded--;
+                       loaded_port[p_hwfn->port_id]--;
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
+                                  loaded);
+               }
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
+                                      o_mcp_resp, o_mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 cmd, u32 param,
+                                            union drv_union_data *p_union_data,
+                                            u32 *o_mcp_resp,
+                                            u32 *o_mcp_param)
+{
+       u32 union_data_addr;
+       enum _ecore_status_t rc;
+
+       /* MCP not initialized */
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               return ECORE_BUSY;
+       }
+
+       /* Acquiring a spinlock is needed to ensure that only a single thread
+        * is accessing the mailbox at a certain time.
+        */
+       OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+       if (p_union_data != OSAL_NULL) {
+               union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+                   OFFSETOF(struct public_drv_mb, union_data);
+               ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
+                               sizeof(*p_union_data));
+       }
+
+       rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
+                             o_mcp_param);
+
+       OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 i_txn_size, u32 *i_buf)
+{
+       union drv_union_data union_data;
+
+       OSAL_MEMCPY(&union_data.raw_data, i_buf, i_txn_size);
+
+       return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
+                                      o_mcp_resp, o_mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 *o_txn_size, u32 *o_buf)
+{
+       enum _ecore_status_t rc;
+       u32 i;
+
+       /* MCP not initialized */
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               return ECORE_BUSY;
+       }
+
+       OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+       rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
+                             o_mcp_param);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       /* Get payload after operation completes successfully */
+       *o_txn_size = *o_mcp_param;
+       for (i = 0; i < *o_txn_size; i += 4)
+               o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
+                                                  union_data.raw_data[i]);
+
+out:
+       OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+       return rc;
+}
+
+#ifndef ASIC_ONLY
+static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
+                                   u32 *p_load_code)
+{
+       static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+       if (!loaded)
+               load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+       else if (!loaded_port[p_hwfn->port_id])
+               load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
+       else
+               load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+
+       /* On CMT, always tell that it's engine */
+       if (p_hwfn->p_dev->num_hwfns > 1)
+               load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
+
+       *p_load_code = load_phase;
+       loaded++;
+       loaded_port[p_hwfn->port_id]++;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
+                  *p_load_code, loaded, p_hwfn->port_id,
+                  loaded_port[p_hwfn->port_id]);
+}
+#endif
+
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       u32 *p_load_code)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       union drv_union_data union_data;
+       u32 param;
+       enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               ecore_mcp_mf_workaround(p_hwfn, p_load_code);
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
+                                    (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+                                     p_dev->drv_type),
+                                    &union_data, p_load_code, &param);
+
+       /* if mcp fails to respond we must abort */
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* If MFW refused (e.g. other port is in diagnostic mode) we
+        * must abort. This can happen in the following cases:
+        * - Other port is in diagnostic mode
+        * - Previously loaded function on the engine is not compliant with
+        *   the requester.
+        * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
+        *      -
+        */
+       if (!(*p_load_code) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
+           ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
+               DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+               return ECORE_BUSY;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_PATH);
+       u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+                                    ECORE_PATH_ID(p_hwfn));
+       u32 disabled_vfs[VF_MAX_STATIC / 32];
+       int i;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+                  "Reading Disabled VF information from [offset %08x],"
+                  " path_addr %08x\n",
+                  mfw_path_offsize, path_addr);
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+               disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
+                                          path_addr +
+                                          OFFSETOF(struct public_path,
+                                                   mcp_vf_disabled) +
+                                          sizeof(u32) * i);
+               DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+                          "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+       }
+
+       if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+               OSAL_VF_FLR_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 *vfs_to_ack)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+                                    MCP_PF_ID(p_hwfn));
+       union drv_union_data union_data;
+       u32 resp, param;
+       enum _ecore_status_t rc;
+       int i;
+
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+                          "Acking VFs [%08x,...,%08x] - %08x\n",
+                          i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+       OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
+                                    DRV_MSG_CODE_VF_DISABLED_DONE, 0,
+                                    &union_data, &resp, &param);
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+                         "Failed to pass ACK for VF flr to MFW\n");
+               return ECORE_TIMEOUT;
+       }
+
+       /* TMP - clear the ACK bits; should be done by MFW */
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               ecore_wr(p_hwfn, p_ptt,
+                        func_addr +
+                        OFFSETOF(struct public_func, drv_ack_vf_disabled) +
+                        i * sizeof(u32), 0);
+
+       return rc;
+}
+
+static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_ptt *p_ptt)
+{
+       u32 transceiver_state;
+
+       transceiver_state = ecore_rd(p_hwfn, p_ptt,
+                                    p_hwfn->mcp_info->port_addr +
+                                    OFFSETOF(struct public_port,
+                                             transceiver_data));
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
+                  "Received transceiver state update [0x%08x] from mfw"
+                  "[Addr 0x%x]\n",
+                  transceiver_state, (u32) (p_hwfn->mcp_info->port_addr +
+                                            OFFSETOF(struct public_port,
+                                                     transceiver_data)));
+
+       transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
+
+       if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+               DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
+       else
+               DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+}
+
+static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt, bool b_reset)
+{
+       struct ecore_mcp_link_state *p_link;
+       u32 status = 0;
+
+       p_link = &p_hwfn->mcp_info->link_output;
+       OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+       if (!b_reset) {
+               status = ecore_rd(p_hwfn, p_ptt,
+                                 p_hwfn->mcp_info->port_addr +
+                                 OFFSETOF(struct public_port, link_status));
+               DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
+                          "Received link update [0x%08x] from mfw"
+                          " [Addr 0x%x]\n",
+                          status, (u32) (p_hwfn->mcp_info->port_addr +
+                                         OFFSETOF(struct public_port,
+                                                  link_status)));
+       } else {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Resetting link indications\n");
+               return;
+       }
+
+       if (p_hwfn->b_drv_link_init)
+               p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+       else
+               p_link->link_up = false;
+
+       p_link->full_duplex = true;
+       switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+       case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+               p_link->speed = 100000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+               p_link->speed = 50000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+               p_link->speed = 40000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+               p_link->speed = 25000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+               p_link->speed = 20000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+               p_link->speed = 10000;
+               break;
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+               p_link->full_duplex = false;
+               /* Fall-through */
+       case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+               p_link->speed = 1000;
+               break;
+       default:
+               p_link->speed = 0;
+       }
+
+       /* We never store total line speed as p_link->speed is
+        * again changes according to bandwidth allocation.
+        */
+       if (p_link->link_up && p_link->speed)
+               p_link->line_speed = p_link->speed;
+       else
+               p_link->line_speed = 0;
+
+       /* Correct speed according to bandwidth allocation */
+       if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
+               u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+
+               __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                  p_link, max_bw);
+       }
+
+       if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
+               u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+               __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                  p_link, min_bw);
+
+               ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+                                                     p_link->min_pf_rate);
+       }
+
+       p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+       p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+       p_link->parallel_detection = !!(status &
+                                        LINK_STATUS_PARALLEL_DETECTION_USED);
+       p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_10G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_20G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_25G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_40G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_50G : 0;
+       p_link->partner_adv_speed |=
+           (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+           ECORE_LINK_PARTNER_SPEED_100G : 0;
+
+       p_link->partner_tx_flow_ctrl_en =
+           !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+       p_link->partner_rx_flow_ctrl_en =
+           !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+       switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+       case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+               p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
+               break;
+       case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+               p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
+               break;
+       default:
+               p_link->partner_adv_pause = 0;
+       }
+
+       p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+       if (p_link->link_up)
+               ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
+
+       OSAL_LINK_UPDATE(p_hwfn);
+}
+
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, bool b_up)
+{
+       struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+       union drv_union_data union_data;
+       struct pmm_phy_cfg *p_phy_cfg;
+       u32 param = 0, reply = 0, cmd;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+#endif
+
+       /* Set the shmem configuration according to params */
+       p_phy_cfg = &union_data.drv_phy_cfg;
+       OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+       cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+       if (!params->speed.autoneg)
+               p_phy_cfg->speed = params->speed.forced_speed;
+       p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
+       p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
+       p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+       p_phy_cfg->adv_speed = params->speed.advertised_speeds;
+       p_phy_cfg->loopback_mode = params->loopback_mode;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn,
+                       "Link on FPGA - Ask for loopback mode '5' at 10G\n");
+               p_phy_cfg->loopback_mode = 5;
+               p_phy_cfg->speed = 10000;
+       }
+#endif
+
+       p_hwfn->b_drv_link_init = b_up;
+
+       if (b_up)
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
+                          " adv_speed 0x%08x, loopback 0x%08x,"
+                          " features 0x%08x\n",
+                          p_phy_cfg->speed, p_phy_cfg->pause,
+                          p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
+                          p_phy_cfg->feature_config_flags);
+       else
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
+                                    &param);
+
+       /* if mcp fails to respond we must abort */
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
+
+       /* Reset the link status if needed */
+       if (!b_up)
+               ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+
+       return rc;
+}
+
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt)
+{
+       u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+       /* TODO - Add support for VFs */
+       if (IS_VF(p_hwfn->p_dev))
+               return ECORE_INVAL;
+
+       path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                                PUBLIC_PATH);
+       path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
+       path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
+
+       proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
+                                path_addr +
+                                OFFSETOF(struct public_path, process_kill)) &
+           PROCESS_KILL_COUNTER_MASK;
+
+       return proc_kill_cnt;
+}
+
+static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       u32 proc_kill_cnt;
+
+       /* Prevent possible attentions/interrupts during the recovery handling
+        * and till its load phase, during which they will be re-enabled.
+        */
+       ecore_int_igu_disable_int(p_hwfn, p_ptt);
+
+       DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
+
+       /* The following operations should be done once, and thus in CMT mode
+        * are carried out by only the first HW function.
+        */
+       if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
+               return;
+
+       if (p_dev->recov_in_prog) {
+               DP_NOTICE(p_hwfn, false,
+                         "Ignoring the indication since a recovery"
+                         " process is already in progress\n");
+               return;
+       }
+
+       p_dev->recov_in_prog = true;
+
+       proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
+       DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
+
+       OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
+}
+
+static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         enum MFW_DRV_MSG_TYPE type)
+{
+       enum ecore_mcp_protocol_type stats_type;
+       union ecore_mcp_protocol_stats stats;
+       u32 hsi_param, param = 0, reply = 0;
+       union drv_union_data union_data;
+
+       switch (type) {
+       case MFW_DRV_MSG_GET_LAN_STATS:
+               stats_type = ECORE_MCP_LAN_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+               break;
+       case MFW_DRV_MSG_GET_ISCSI_STATS:
+               stats_type = ECORE_MCP_ISCSI_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+               break;
+       case MFW_DRV_MSG_GET_RDMA_STATS:
+               stats_type = ECORE_MCP_RDMA_STATS;
+               hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+               return;
+       }
+
+       OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
+
+       OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
+
+       ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
+                               hsi_param, &union_data, &reply, &param);
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   struct public_func *p_data, int pfid)
+{
+       u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+                                       PUBLIC_FUNC);
+       u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+       u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+       u32 i, size;
+
+       OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+       size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
+       for (i = 0; i < size / sizeof(u32); i++)
+               ((u32 *) p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+                                              func_addr + (i << 2));
+
+       return size;
+}
+
+static void
+ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+                       struct public_func *p_shmem_info)
+{
+       struct ecore_mcp_function_info *p_info;
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       /* TODO - bandwidth min/max should have valid values of 1-100,
+        * as well as some indication that the feature is disabled.
+        * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
+        * limit and correct value to min `1' and max `100' if limit isn't in
+        * range.
+        */
+       p_info->bandwidth_min = (p_shmem_info->config &
+                                FUNC_MF_CFG_MIN_BW_MASK) >>
+           FUNC_MF_CFG_MIN_BW_SHIFT;
+       if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+                       p_info->bandwidth_min);
+               p_info->bandwidth_min = 1;
+       }
+
+       p_info->bandwidth_max = (p_shmem_info->config &
+                                FUNC_MF_CFG_MAX_BW_MASK) >>
+           FUNC_MF_CFG_MAX_BW_SHIFT;
+       if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+               DP_INFO(p_hwfn,
+                       "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+                       p_info->bandwidth_max);
+               p_info->bandwidth_max = 100;
+       }
+}
+
+static void
+ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_function_info *p_info;
+       struct public_func shmem_info;
+       u32 resp = 0, param = 0;
+
+       ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+       ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       p_info = &p_hwfn->mcp_info->func_info;
+
+       ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
+
+       ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
+
+       /* Acknowledge the MFW */
+       ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+                     &param);
+}
+
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt)
+{
+       /* A single notification should be sent to upper driver in CMT mode */
+       if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+               return;
+
+       DP_NOTICE(p_hwfn, false,
+                 "Fan failure was detected on the network interface card"
+                 " and it's going to be shut down.\n");
+
+       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
+}
+
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_info *info = p_hwfn->mcp_info;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       bool found = false;
+       u16 i;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
+
+       /* Read Messages from MFW */
+       ecore_mcp_read_mb(p_hwfn, p_ptt);
+
+       /* Compare current messages to old ones */
+       for (i = 0; i < info->mfw_mb_length; i++) {
+               if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+                       continue;
+
+               found = true;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                          "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+                          i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+               switch (i) {
+               case MFW_DRV_MSG_LINK_CHANGE:
+                       ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
+                       break;
+               case MFW_DRV_MSG_VF_DISABLED:
+                       ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+                       ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                   ECORE_DCBX_REMOTE_LLDP_MIB);
+                       break;
+               case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+                       ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                   ECORE_DCBX_REMOTE_MIB);
+                       break;
+               case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+                       ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+                                                   ECORE_DCBX_OPERATIONAL_MIB);
+                       break;
+               case MFW_DRV_MSG_ERROR_RECOVERY:
+                       ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_GET_LAN_STATS:
+               case MFW_DRV_MSG_GET_FCOE_STATS:
+               case MFW_DRV_MSG_GET_ISCSI_STATS:
+               case MFW_DRV_MSG_GET_RDMA_STATS:
+                       ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+                       break;
+               case MFW_DRV_MSG_BW_UPDATE:
+                       ecore_mcp_update_bw(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+                       ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+                       break;
+               case MFW_DRV_MSG_FAILURE_DETECTED:
+                       ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+                       break;
+               default:
+                       DP_NOTICE(p_hwfn, true,
+                                 "Unimplemented MFW message %d\n", i);
+                       rc = ECORE_INVAL;
+               }
+       }
+
+       /* ACK everything */
+       for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+               OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *) info->mfw_mb_cur)[i]);
+
+               /* MFW expect answer in BE, so we force write in that format */
+               ecore_wr(p_hwfn, p_ptt,
+                        info->mfw_mb_addr + sizeof(u32) +
+                        MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+                        sizeof(u32) + i * sizeof(u32), val);
+       }
+
+       if (!found) {
+               DP_NOTICE(p_hwfn, false,
+                         "Received an MFW message indication but no"
+                         " new message!\n");
+               rc = ECORE_INVAL;
+       }
+
+       /* Copy the new mfw messages into the shadow */
+       OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+                                          struct ecore_ptt *p_ptt,
+                                          u32 *p_mfw_ver,
+                                          u32 *p_running_bundle_id)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 global_offsize;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_dev)) {
+               DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
+               return ECORE_SUCCESS;
+       }
+#endif
+
+       if (IS_VF(p_dev)) {
+               if (p_hwfn->vf_iov_info) {
+                       struct pfvf_acquire_resp_tlv *p_resp;
+
+                       p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+                       *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+                       return ECORE_SUCCESS;
+               } else {
+                       DP_VERBOSE(p_dev, ECORE_MSG_IOV,
+                                  "VF requested MFW vers prior to ACQUIRE\n");
+                       return ECORE_INVAL;
+               }
+       }
+
+       global_offsize = ecore_rd(p_hwfn, p_ptt,
+                                 SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
+                                                      public_base,
+                                                      PUBLIC_GLOBAL));
+       *p_mfw_ver =
+           ecore_rd(p_hwfn, p_ptt,
+                    SECTION_ADDR(global_offsize,
+                                 0) + OFFSETOF(struct public_global, mfw_ver));
+
+       if (p_running_bundle_id != OSAL_NULL) {
+               *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
+                                               SECTION_ADDR(global_offsize,
+                                                            0) +
+                                               OFFSETOF(struct public_global,
+                                                        running_bundle_id));
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+                                             u32 *p_media_type)
+{
+       struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
+       struct ecore_ptt *p_ptt;
+
+       /* TODO - Add support for VFs */
+       if (IS_VF(p_dev))
+               return ECORE_INVAL;
+
+       if (!ecore_mcp_is_init(p_hwfn)) {
+               DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+               return ECORE_BUSY;
+       }
+
+       *p_media_type = MEDIA_UNSPECIFIED;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+                                OFFSETOF(struct public_port, media_type));
+
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
+                         struct public_func *p_info,
+                         enum ecore_pci_personality *p_proto)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+       case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+               if (OSAL_TEST_BIT(ECORE_DEV_CAP_ROCE,
+                                 &p_hwfn->hw_info.device_capabilities))
+                       *p_proto = ECORE_PCI_ETH_ROCE;
+               else
+                       *p_proto = ECORE_PCI_ETH;
+               break;
+       case FUNC_MF_CFG_PROTOCOL_ISCSI:
+               *p_proto = ECORE_PCI_ISCSI;
+               break;
+       case FUNC_MF_CFG_PROTOCOL_ROCE:
+               DP_NOTICE(p_hwfn, true,
+                         "RoCE personality is not a valid value!\n");
+               rc = ECORE_INVAL;
+               break;
+       default:
+               rc = ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt)
+{
+       struct ecore_mcp_function_info *info;
+       struct public_func shmem_info;
+
+       ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+       info = &p_hwfn->mcp_info->func_info;
+
+       info->pause_on_host = (shmem_info.config &
+                              FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+       if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+               DP_ERR(p_hwfn, "Unknown personality %08x\n",
+                      (u32) (shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+               return ECORE_INVAL;
+       }
+
+       ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+       if (shmem_info.mac_upper || shmem_info.mac_lower) {
+               info->mac[0] = (u8) (shmem_info.mac_upper >> 8);
+               info->mac[1] = (u8) (shmem_info.mac_upper);
+               info->mac[2] = (u8) (shmem_info.mac_lower >> 24);
+               info->mac[3] = (u8) (shmem_info.mac_lower >> 16);
+               info->mac[4] = (u8) (shmem_info.mac_lower >> 8);
+               info->mac[5] = (u8) (shmem_info.mac_lower);
+       } else {
+               /* TODO - are there protocols for which there's no MAC? */
+               DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
+       }
+
+       info->ovlan = (u16) (shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+       DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
+                  "Read configuration from shmem: pause_on_host %02x"
+                   " protocol %02x BW [%02x - %02x]"
+                   " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
+                   " node %lx ovlan %04x\n",
+                  info->pause_on_host, info->protocol,
+                  info->bandwidth_min, info->bandwidth_max,
+                  info->mac[0], info->mac[1], info->mac[2],
+                  info->mac[3], info->mac[4], info->mac[5],
+                  info->wwn_port, info->wwn_node, info->ovlan);
+
+       return ECORE_SUCCESS;
+}
+
+struct ecore_mcp_link_params
+*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+       return &p_hwfn->mcp_info->link_input;
+}
+
+struct ecore_mcp_link_state
+*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+               DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
+               p_hwfn->mcp_info->link_output.link_up = true;
+       }
+#endif
+
+       return &p_hwfn->mcp_info->link_output;
+}
+
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+       return &p_hwfn->mcp_info->link_capabilities;
+}
+
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt,
+                          DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
+
+       /* Wait for the drain to complete before returning */
+       OSAL_MSLEEP(120);
+
+       return rc;
+}
+
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn || !p_hwfn->mcp_info)
+               return OSAL_NULL;
+       return &p_hwfn->mcp_info->func_info;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_ptt *p_ptt,
+                                          struct ecore_mcp_nvm_params *params)
+{
+       enum _ecore_status_t rc;
+
+       switch (params->type) {
+       case ECORE_MCP_NVM_RD:
+               rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+                                         params->nvm_common.offset,
+                                         &params->nvm_common.resp,
+                                         &params->nvm_common.param,
+                                         params->nvm_rd.buf_size,
+                                         params->nvm_rd.buf);
+               break;
+       case ECORE_MCP_CMD:
+               rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+                                  params->nvm_common.offset,
+                                  &params->nvm_common.resp,
+                                  &params->nvm_common.param);
+               break;
+       case ECORE_MCP_NVM_WR:
+               rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
+                                         params->nvm_common.offset,
+                                         &params->nvm_common.resp,
+                                         &params->nvm_common.param,
+                                         params->nvm_wr.buf_size,
+                                         params->nvm_wr.buf);
+               break;
+       default:
+               rc = ECORE_NOTIMPL;
+               break;
+       }
+       return rc;
+}
+
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt, u32 personalities)
+{
+       enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
+       struct public_func shmem_info;
+       int i, count = 0, num_pfs;
+
+       num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+
+       for (i = 0; i < num_pfs; i++) {
+               ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+                                        MCP_PF_ID_BY_REL(p_hwfn, i));
+               if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+                       continue;
+
+               if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
+                                             &protocol) != ECORE_SUCCESS)
+                       continue;
+
+               if ((1 << ((u32) protocol)) & personalities)
+                       count++;
+       }
+
+       return count;
+}
+
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u32 *p_flash_size)
+{
+       u32 flash_size;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
+               return ECORE_INVAL;
+       }
+#endif
+
+       if (IS_VF(p_hwfn->p_dev))
+               return ECORE_INVAL;
+
+       flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+       flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+           MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+       flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+       *p_flash_size = flash_size;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt)
+{
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+       if (p_dev->recov_in_prog) {
+               DP_NOTICE(p_hwfn, false,
+                         "Avoid triggering a recovery since such a process"
+                         " is already in progress\n");
+               return ECORE_AGAIN;
+       }
+
+       DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
+       ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 vf_id, u8 num)
+{
+       u32 resp = 0, param = 0, rc_param = 0;
+       enum _ecore_status_t rc;
+
+       param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+           DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+       param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+           DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+                          &resp, &rc_param);
+
+       if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+               DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
+                         vf_id);
+               rc = ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                          struct ecore_mcp_drv_version *p_ver)
+{
+       u32 param = 0, reply = 0, num_words, i;
+       struct drv_version_stc *p_drv_version;
+       union drv_union_data union_data;
+       void *p_name;
+       OSAL_BE32 val;
+       enum _ecore_status_t rc;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+#endif
+
+       p_drv_version = &union_data.drv_version;
+       p_drv_version->version = p_ver->version;
+       num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
+       for (i = 0; i < num_words; i++) {
+               p_name = &p_ver->name[i * sizeof(u32)];
+               val = OSAL_CPU_TO_BE32(*(u32 *) p_name);
+               *(u32 *) &p_drv_version->name[i * sizeof(u32)] = val;
+       }
+
+       rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
+                                    &union_data, &reply, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+                          &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt)
+{
+       u32 value, cpu_mode;
+
+       ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+       value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+       ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
+       cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+
+       return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  enum ecore_ov_config_method config,
+                                  enum ecore_ov_client client)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+
+       switch (config) {
+       case ECORE_OV_CLIENT_DRV:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+               break;
+       case ECORE_OV_CLIENT_USER:
+               drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+                          drv_mb_param, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                enum ecore_ov_driver_state drv_state)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+       u32 drv_mb_param;
+
+       switch (drv_state) {
+       case ECORE_OV_DRIVER_STATE_NOT_LOADED:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+               break;
+       case ECORE_OV_DRIVER_STATE_DISABLED:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+               break;
+       case ECORE_OV_DRIVER_STATE_ACTIVE:
+               drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+                          drv_state, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                        struct ecore_fc_npiv_tbl *p_table)
+{
+       return 0;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+                       struct ecore_ptt *p_ptt, u16 mtu)
+{
+       return 0;
+}
+
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      enum ecore_led_mode mode)
+{
+       u32 resp = 0, param = 0, drv_mb_param;
+       enum _ecore_status_t rc;
+
+       switch (mode) {
+       case ECORE_LED_MODE_ON:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+               break;
+       case ECORE_LED_MODE_OFF:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+               break;
+       case ECORE_LED_MODE_RESTORE:
+               drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
+               return ECORE_INVAL;
+       }
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+                          drv_mb_param, &resp, &param);
+       if (rc != ECORE_SUCCESS)
+               DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 mask_parities)
+{
+       enum _ecore_status_t rc;
+       u32 resp = 0, param = 0;
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+                          mask_parities, &resp, &param);
+
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn,
+                      "MCP response failure for mask parities, aborting\n");
+       } else if (resp != FW_MSG_CODE_OK) {
+               DP_ERR(p_hwfn,
+                      "MCP did not ack mask parity request. Old MFW?\n");
+               rc = ECORE_INVAL;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+                                       u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       u32 bytes_left, offset, bytes_to_copy, buf_size;
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       bytes_left = len;
+       offset = 0;
+       params.type = ECORE_MCP_NVM_RD;
+       params.nvm_rd.buf_size = &buf_size;
+       params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
+       while (bytes_left > 0) {
+               bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+                                          MCP_DRV_NVM_BUF_LEN);
+               params.nvm_common.offset = (addr + offset) |
+                   (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
+               params.nvm_rd.buf = (u32 *) (p_buf + offset);
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
+                                           FW_MSG_CODE_NVM_OK)) {
+                       DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+                       break;
+               }
+               offset += *params.nvm_rd.buf_size;
+               bytes_left -= *params.nvm_rd.buf_size;
+       }
+
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+                                       u32 addr, u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_NVM_RD;
+       params.nvm_rd.buf_size = &len;
+       params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
+           DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
+       params.nvm_common.offset = addr;
+       params.nvm_rd.buf = (u32 *) p_buf;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_CMD;
+       params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
+       params.nvm_common.offset = addr;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+                                                 u32 addr)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_CMD;
+       params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+       params.nvm_common.offset = addr;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+/* rc recieves ECORE_INVAL as default parameter because
+ * it might not enter the while loop if the len is 0
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       enum _ecore_status_t rc = ECORE_INVAL;
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       u32 buf_idx, buf_size;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_NVM_WR;
+       if (cmd == ECORE_PUT_FILE_DATA)
+               params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+       else
+               params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+       buf_idx = 0;
+       while (buf_idx < len) {
+               buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+                                     MCP_DRV_NVM_BUF_LEN);
+               params.nvm_common.offset = ((buf_size <<
+                                            DRV_MB_PARAM_NVM_LEN_SHIFT)
+                                           | addr) + buf_idx;
+               params.nvm_wr.buf_size = buf_size;
+               params.nvm_wr.buf = (u32 *) &p_buf[buf_idx];
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if (rc != ECORE_SUCCESS ||
+                   ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
+                    (params.nvm_common.resp !=
+                     FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
+                       DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+
+               buf_idx += buf_size;
+       }
+
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_NVM_WR;
+       params.nvm_wr.buf_size = len;
+       params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
+           DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
+       params.nvm_common.offset = addr;
+       params.nvm_wr.buf = (u32 *) p_buf;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+                                                  u32 addr)
+{
+       struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+       struct ecore_mcp_nvm_params params;
+       struct ecore_ptt *p_ptt;
+       enum _ecore_status_t rc;
+
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_BUSY;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       params.type = ECORE_MCP_CMD;
+       params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
+       params.nvm_common.offset = addr;
+       rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+       p_dev->mcp_nvm_resp = params.nvm_common.resp;
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u32 port, u32 addr, u32 offset,
+                                           u32 len, u8 *p_buf)
+{
+       struct ecore_mcp_nvm_params params;
+       enum _ecore_status_t rc;
+       u32 bytes_left, bytes_to_copy, buf_size;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+       addr = offset;
+       offset = 0;
+       bytes_left = len;
+       params.type = ECORE_MCP_NVM_RD;
+       params.nvm_rd.buf_size = &buf_size;
+       params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
+       while (bytes_left > 0) {
+               bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
+                                          MAX_I2C_TRANSACTION_SIZE);
+               params.nvm_rd.buf = (u32 *) (p_buf + offset);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+                   FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+                       return ECORE_NODEV;
+               } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+                          FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+                       return ECORE_UNKNOWN_ERROR;
+
+               offset += *params.nvm_rd.buf_size;
+               bytes_left -= *params.nvm_rd.buf_size;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 port, u32 addr, u32 offset,
+                                            u32 len, u8 *p_buf)
+{
+       struct ecore_mcp_nvm_params params;
+       enum _ecore_status_t rc;
+       u32 buf_idx, buf_size;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_PORT, port);
+       SET_FIELD(params.nvm_common.offset,
+                 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+       params.type = ECORE_MCP_NVM_WR;
+       params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+       buf_idx = 0;
+       while (buf_idx < len) {
+               buf_size = OSAL_MIN_T(u32, (len - buf_idx),
+                                     MAX_I2C_TRANSACTION_SIZE);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
+               SET_FIELD(params.nvm_common.offset,
+                         DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
+               params.nvm_wr.buf_size = buf_size;
+               params.nvm_wr.buf = (u32 *) &p_buf[buf_idx];
+               rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+               if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
+                   FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+                       return ECORE_NODEV;
+               } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+                          FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+                       return ECORE_UNKNOWN_ERROR;
+
+               buf_idx += buf_size;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 gpio, u32 *gpio_val)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 drv_mb_param = 0, rsp;
+
+       SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
+                          drv_mb_param, &rsp, gpio_val);
+
+       if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+               return ECORE_UNKNOWN_ERROR;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u16 gpio, u16 gpio_val)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u32 drv_mb_param = 0, param, rsp;
+
+       SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+       SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
+
+       rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
+                          drv_mb_param, &rsp, &param);
+
+       if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+               return ECORE_UNKNOWN_ERROR;
+
+       return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/ecore/ecore_mcp.h 
b/drivers/net/qede/ecore/ecore_mcp.h
new file mode 100644
index 0000000..5f4ffb5
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_mcp.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_H__
+#define __ECORE_MCP_H__
+
+#include "bcm_osal.h"
+#include "mcp_public.h"
+#include "ecore_mcp_api.h"
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in ecore_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
+                                           ((rel_pfid) | \
+                                            ((p_hwfn)->abs_pf_id & 1) << 3) : \
+                                            rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+/* TODO - this is only correct as long as only BB is supported, and
+ * no port-swapping is implemented; Afterwards we'll need to fix it.
+ */
+#define MFW_PORT(_p_hwfn)      ((_p_hwfn)->abs_pf_id % \
+                                ((_p_hwfn)->p_dev->num_ports_in_engines * 2))
+struct ecore_mcp_info {
+       osal_spinlock_t lock;   /* Spinlock used for accessing MCP mailbox */
+       u32 public_base;        /* Address of the MCP public area */
+       u32 drv_mb_addr;        /* Address of the driver mailbox */
+       u32 mfw_mb_addr;        /* Address of the MFW mailbox */
+       u32 port_addr;          /* Address of the port configuration (link) */
+       u16 drv_mb_seq;         /* Current driver mailbox sequence */
+       u16 drv_pulse_seq;      /* Current driver pulse sequence */
+       struct ecore_mcp_link_params link_input;
+       struct ecore_mcp_link_state link_output;
+       struct ecore_mcp_link_capabilities link_capabilities;
+       struct ecore_mcp_function_info func_info;
+
+       u8 *mfw_mb_cur;
+       u8 *mfw_mb_shadow;
+       u16 mfw_mb_length;
+       u16 mcp_hist;
+};
+
+/**
+ * @brief Initialize the interface with the MCP
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Intialize the port interface with the MCP
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
+                            struct ecore_ptt *p_ptt);
+/**
+ * @brief Releases resources allocated during the init process.
+ *
+ * @param p_hwfn - HW func
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief This function is called from the DPC context. After
+ * pointing PTT to the mfw mb, check for events sent by the MCP
+ * to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @param p_hwfn - HW function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt);
+
+/**
+ * @brief When MFW doesn't get driver pulse for couple of seconds, at some
+ * threshold before timeout expires, it will generate interrupt
+ * through a dedicated status block (DPSB - Driver Pulse Status
+ * Block), which the driver should respond immediately, by
+ * providing keepalive indication after setting the PTT to the
+ * driver-MFW mailbox. This function is called directly from the
+ * DPC upon receiving the DPSB attention.
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation
+ * was successul.
+ */
+enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a LOAD_REQ to the MFW, and in case operation
+ *        succeed, returns whether this PF is the first on the
+ *        chip/engine/port or function. This function should be
+ *        called when driver is ready to accept MFW events after
+ *        Storms initializations are done.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param p_load_code  - The MCP response param containing one
+ *      of the following:
+ *      FW_MSG_CODE_DRV_LOAD_ENGINE
+ *      FW_MSG_CODE_DRV_LOAD_PORT
+ *      FW_MSG_CODE_DRV_LOAD_FUNCTION
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - Operation was successul.
+ *      ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       u32 *p_load_code);
+
+/**
+ * @brief Read the MFW mailbox into Current buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 *vfs_to_ack);
+
+/**
+ * @brief - calls during init to read shmem of all function-related info.
+ *
+ * @param p_hwfn
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Reset the MCP using mailbox command.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Sets the union data in the MCP mailbox and sends a mailbox command.
+ *
+ * @param p_hwfn       - hw function
+ * @param p_ptt        - PTT required for register access
+ * @param cmd          - command to be sent to the MCP
+ * @param param        - optional param
+ * @param p_union_data - pointer to a drv_union_data
+ * @param o_mcp_resp   - the MCP response code (exclude sequence)
+ * @param o_mcp_param  - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - operation was successful
+ *      ECORE_BUSY    - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 cmd, u32 param,
+                                            union drv_union_data *p_union_data,
+                                            u32 *o_mcp_resp,
+                                            u32 *o_mcp_param);
+
+/**
+ * @brief - Sends an NVM write command request to the MFW with
+ *          payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ *            DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size -  Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 i_txn_size, u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ *        a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ *            DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size -  Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u32 cmd,
+                                         u32 param,
+                                         u32 *o_mcp_resp,
+                                         u32 *o_mcp_param,
+                                         u32 *o_txn_size, u32 *o_buf);
+
+/**
+ * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ *
+ * @param p_hwfn
+ *
+ * @return true iff MFW is running and mcp_info is initialized
+ */
+bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u8 vf_id, u8 num);
+
+/**
+ * @brief - Halt the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Wake up the MCP.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt);
+int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 max_bw);
+int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_mcp_link_state *p_link,
+                                      u8 min_bw);
+enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 mask_parities);
+#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/ecore/ecore_mcp_api.h 
b/drivers/net/qede/ecore/ecore_mcp_api.h
new file mode 100644
index 0000000..5c33081
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_mcp_api.h
@@ -0,0 +1,629 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_MCP_API_H__
+#define __ECORE_MCP_API_H__
+
+#include "ecore_status.h"
+
+struct ecore_mcp_link_speed_params {
+       bool autoneg;
+       u32 advertised_speeds;  /* bitmask of DRV_SPEED_CAPABILITY */
+       u32 forced_speed;       /* In Mb/s */
+};
+
+struct ecore_mcp_link_pause_params {
+       bool autoneg;
+       bool forced_rx;
+       bool forced_tx;
+};
+
+struct ecore_mcp_link_params {
+       struct ecore_mcp_link_speed_params speed;
+       struct ecore_mcp_link_pause_params pause;
+       u32 loopback_mode;      /* in PMM_LOOPBACK values */
+};
+
+struct ecore_mcp_link_capabilities {
+       u32 speed_capabilities;
+};
+
+struct ecore_mcp_link_state {
+       bool link_up;
+
+       u32 line_speed;         /* In Mb/s */
+       u32 min_pf_rate;        /* In Mb/s */
+       u32 speed;              /* In Mb/s */
+       bool full_duplex;
+
+       bool an;
+       bool an_complete;
+       bool parallel_detection;
+       bool pfc_enabled;
+
+#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0)
+#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1)
+#define ECORE_LINK_PARTNER_SPEED_10G   (1 << 2)
+#define ECORE_LINK_PARTNER_SPEED_20G   (1 << 3)
+#define ECORE_LINK_PARTNER_SPEED_25G   (1 << 4)
+#define ECORE_LINK_PARTNER_SPEED_40G   (1 << 5)
+#define ECORE_LINK_PARTNER_SPEED_50G   (1 << 6)
+#define ECORE_LINK_PARTNER_SPEED_100G  (1 << 7)
+       u32 partner_adv_speed;
+
+       bool partner_tx_flow_ctrl_en;
+       bool partner_rx_flow_ctrl_en;
+
+#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1)
+#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
+#define ECORE_LINK_PARTNER_BOTH_PAUSE (3)
+       u8 partner_adv_pause;
+
+       bool sfp_tx_fault;
+};
+
+struct ecore_mcp_function_info {
+       u8 pause_on_host;
+
+       enum ecore_pci_personality protocol;
+
+       u8 bandwidth_min;
+       u8 bandwidth_max;
+
+       u8 mac[ETH_ALEN];
+
+       u64 wwn_port;
+       u64 wwn_node;
+
+#define ECORE_MCP_VLAN_UNSET           (0xffff)
+       u16 ovlan;
+};
+
+struct ecore_mcp_nvm_common {
+       u32 offset;
+       u32 param;
+       u32 resp;
+       u32 cmd;
+};
+
+struct ecore_mcp_nvm_rd {
+       u32 *buf_size;
+       u32 *buf;
+};
+
+struct ecore_mcp_nvm_wr {
+       u32 buf_size;
+       u32 *buf;
+};
+
+struct ecore_mcp_nvm_params {
+#define ECORE_MCP_CMD          (1 << 0)
+#define ECORE_MCP_NVM_RD       (1 << 1)
+#define ECORE_MCP_NVM_WR       (1 << 2)
+       u8 type;
+
+       struct ecore_mcp_nvm_common nvm_common;
+
+       union {
+               struct ecore_mcp_nvm_rd nvm_rd;
+               struct ecore_mcp_nvm_wr nvm_wr;
+       };
+};
+
+struct ecore_mcp_drv_version {
+       u32 version;
+       u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct ecore_mcp_lan_stats {
+       u64 ucast_rx_pkts;
+       u64 ucast_tx_pkts;
+       u32 fcs_err;
+};
+
+#ifndef ECORE_PROTO_STATS
+#define ECORE_PROTO_STATS
+
+struct ecore_mcp_iscsi_stats {
+       u64 rx_pdus;
+       u64 tx_pdus;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
+struct ecore_mcp_rdma_stats {
+       u64 rx_pkts;
+       u64 tx_pkts;
+       u64 rx_bytes;
+       u64 tx_byts;
+};
+
+enum ecore_mcp_protocol_type {
+       ECORE_MCP_LAN_STATS,
+       ECORE_MCP_ISCSI_STATS,
+       ECORE_MCP_RDMA_STATS
+};
+
+union ecore_mcp_protocol_stats {
+       struct ecore_mcp_lan_stats lan_stats;
+       struct ecore_mcp_iscsi_stats iscsi_stats;
+       struct ecore_mcp_rdma_stats rdma_stats;
+};
+#endif
+
+enum ecore_ov_config_method {
+       ECORE_OV_CONFIG_MTU,
+       ECORE_OV_CONFIG_MAC,
+       ECORE_OV_CONFIG_WOL
+};
+
+enum ecore_ov_client {
+       ECORE_OV_CLIENT_DRV,
+       ECORE_OV_CLIENT_USER
+};
+
+enum ecore_ov_driver_state {
+       ECORE_OV_DRIVER_STATE_NOT_LOADED,
+       ECORE_OV_DRIVER_STATE_DISABLED,
+       ECORE_OV_DRIVER_STATE_ACTIVE
+};
+
+#define ECORE_MAX_NPIV_ENTRIES 128
+#define ECORE_WWN_SIZE 8
+struct ecore_fc_npiv_tbl {
+       u32 count;
+       u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+       u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE];
+};
+
+#ifndef __EXTRACT__LINUX__
+enum ecore_led_mode {
+       ECORE_LED_MODE_OFF,
+       ECORE_LED_MODE_ON,
+       ECORE_LED_MODE_RESTORE
+};
+#endif
+
+/**
+ * @brief - returns the link params of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link params
+ */
+struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link state of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link state
+ */
+struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *);
+
+/**
+ * @brief - return the link capabilities of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to link capabilities
+ */
+struct ecore_mcp_link_capabilities
+*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief Request the MFW to set the the link according to 'link_input'.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_up - raise link if `true'. Reset link if `false'.
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt, bool b_up);
+
+/**
+ * @brief Get the management firmware version value
+ *
+ * @param p_dev       - ecore dev pointer
+ * @param p_ptt
+ * @param p_mfw_ver    - mfw version value
+ * @param p_running_bundle_id  - image id in nvram; Optional.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+                                          struct ecore_ptt *p_ptt,
+                                          u32 *p_mfw_ver,
+                                          u32 *p_running_bundle_id);
+
+/**
+ * @brief Get media type value of the port.
+ *
+ * @param p_dev      - ecore dev pointer
+ * @param mfw_ver    - media type value
+ *
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - Operation was successful.
+ *      ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+                                             u32 *media_type);
+
+/**
+ * @brief - Sends a command to the MCP mailbox.
+ *
+ * @param p_hwfn      - hw function
+ * @param p_ptt       - PTT required for register access
+ * @param cmd         - command to be sent to the MCP
+ * @param param       - optional param
+ * @param o_mcp_resp  - the MCP response code (exclude sequence)
+ * @param o_mcp_param - optional parameter provided by the MCP response
+ *
+ * @return enum _ecore_status_t -
+ *      ECORE_SUCCESS - operation was successful
+ *      ECORE_BUSY    - operation failed
+ */
+enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt, u32 cmd, u32 param,
+                                  u32 *o_mcp_resp, u32 *o_mcp_param);
+
+/**
+ * @brief - drains the nig, allowing completion to pass in case of pauses.
+ *          (Should be called only from sleepable context)
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - return the mcp function info of the hw function
+ *
+ * @param p_hwfn
+ *
+ * @returns pointer to mcp function info
+ */
+const struct ecore_mcp_function_info
+*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - Function for reading/manipulating the nvram. Following are 
supported
+ *          functionalities.
+ *          1. Read: Read the specified nvram offset.
+ *             input values:
+ *               type   - ECORE_MCP_NVM_RD
+ *               cmd    - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
+ *               offset - nvm offset
+ *
+ *             output values:
+ *               buf      - buffer
+ *               buf_size - buffer size
+ *
+ *          2. Write: Write the data at the specified nvram offset
+ *             input values:
+ *               type     - ECORE_MCP_NVM_WR
+ *               cmd      - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
+ *               offset   - nvm offset
+ *               buf      - buffer
+ *               buf_size - buffer size
+ *
+ *          3. Command: Send the NVM command to MCP.
+ *             input values:
+ *               type   - ECORE_MCP_CMD
+ *               cmd    - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
+ *               offset - nvm offset
+ *
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param params
+ *
+ * @return ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_ptt *p_ptt,
+                                          struct ecore_mcp_nvm_params *params);
+
+/**
+ * @brief - count number of function with a matching personality on engine.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param personalities - a bitmask of ecore_pci_personality values
+ *
+ * @returns the count of all devices on engine whose personality match one of
+ *          the bitsmasks.
+ */
+int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_ptt *p_ptt, u32 personalities);
+
+/**
+ * @brief Get the flash size value
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_flash_size  - flash size in bytes to be filled.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u32 *p_flash_size);
+
+/**
+ * @brief Send driver version to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param version - Version value
+ * @param name - Protocol driver name
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                          struct ecore_mcp_drv_version *p_ver);
+
+/**
+ * @brief Read the MFW process kill counter
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return u32
+ */
+u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Trigger a recovery process
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Notify MFW about the change in base device properties
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param config - Configuation that has been updated
+ *  @param client - ecore client type
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  enum ecore_ov_config_method config,
+                                  enum ecore_ov_client client);
+
+/**
+ * @brief Notify MFW about the driver state
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param drv_state - Driver state
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                enum ecore_ov_driver_state drv_state);
+
+/**
+ * @brief Read NPIV settings form the MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param p_table - Array to hold the FC NPIV data. Client need allocate the
+ *                   required buffer. The field 'count' specifies number of 
NPIV
+ *                   entries. A value of 0 means the table was not populated.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+                        struct ecore_fc_npiv_tbl *p_table);
+
+/**
+ * @brief Send MTU size to MFW
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mtu - MTU size
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt, u16 mtu);
+
+/**
+ * @brief Set LED status
+ *
+ *  @param p_hwfn
+ *  @param p_ptt
+ *  @param mode - LED mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      enum ecore_led_mode mode);
+
+/**
+ * @brief Set secure mode
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
+                                                  u32 addr);
+
+/**
+ * @brief Write to phy
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param cmd - nvm command
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Write to nvm
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param cmd - nvm command
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
+                                        u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Put file begin
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
+                                                 u32 addr);
+
+/**
+ * @brief Delete file
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr);
+
+/**
+ * @brief Check latest response
+ *
+ *  @param p_dev
+ *  @param p_buf - nvm write buffer
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
+
+/**
+ * @brief Read from phy
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param cmd - nvm command
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
+                                       u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from nvm
+ *
+ *  @param p_dev
+ *  @param addr - nvm offset
+ *  @param p_buf - nvm write buffer
+ *  @param len - buffer len
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
+                                       u8 *p_buf, u32 len);
+
+/**
+ * @brief Read from sfp
+ *
+ *  @param p_hwfn - hw function
+ *  @param p_ptt  - PTT required for register access
+ *  @param port   - transceiver port
+ *  @param addr   - I2C address
+ *  @param offset - offset in sfp
+ *  @param len    - buffer length
+ *  @param p_buf  - buffer to read into
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           u32 port, u32 addr, u32 offset,
+                                           u32 len, u8 *p_buf);
+
+/**
+ * @brief Write to sfp
+ *
+ *  @param p_hwfn - hw function
+ *  @param p_ptt  - PTT required for register access
+ *  @param port   - transceiver port
+ *  @param addr   - I2C address
+ *  @param offset - offset in sfp
+ *  @param len    - buffer length
+ *  @param p_buf  - buffer to write from
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
+                                            struct ecore_ptt *p_ptt,
+                                            u32 port, u32 addr, u32 offset,
+                                            u32 len, u8 *p_buf);
+
+/**
+ * @brief Gpio read
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *  @param gpio      - gpio number
+ *  @param gpio_val  - value read from gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        u16 gpio, u32 *gpio_val);
+
+/**
+ * @brief Gpio write
+ *
+ *  @param p_hwfn    - hw function
+ *  @param p_ptt     - PTT required for register access
+ *  @param gpio      - gpio number
+ *  @param gpio_val  - value to write to gpio
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         u16 gpio, u16 gpio_val);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_proto_if.h 
b/drivers/net/qede/ecore/ecore_proto_if.h
new file mode 100644
index 0000000..0b638e4
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_proto_if.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_PROTO_IF_H__
+#define __ECORE_PROTO_IF_H__
+
+/*
+ * PF parameters (according to personality/protocol)
+ */
+
+struct ecore_eth_pf_params {
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+};
+
+/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+struct ecore_iscsi_pf_params {
+
+       u64 glbl_q_params_addr;
+       u64 bdq_pbl_base_addr[2];
+       u32 max_cwnd;
+       u16 rq_num_entries;
+       u16 cq_num_entries;
+       u16 cmdq_num_entries;
+       u16 dup_ack_threshold;
+       u16 tx_sws_timer;
+       u16 min_rto;
+       u16 min_rto_rt;
+       u16 max_rto;
+       /* The following parameters are used during HW-init
+        * and these parameters need to be passed as arguments
+        * to update_pf_params routine invoked before slowpath start
+        */
+       u16 num_cons;
+       u16 num_tasks;
+
+       /* The following parameters are used during protocol-init */
+       u16 half_way_close_timeout;
+       u16 bdq_xoff_threshold[2];
+       u16 bdq_xon_threshold[2];
+       u16 cmdq_xoff_threshold;
+       u16 cmdq_xon_threshold;
+
+       u8 num_sq_pages_in_ring;
+       u8 num_r2tq_pages_in_ring;
+       u8 num_uhq_pages_in_ring;
+       u8 num_queues;
+       u8 log_page_size;
+       u8 rqe_log_size;
+       u8 max_fin_rt;
+       u8 gl_rq_pi;
+       u8 gl_cmd_pi;
+       u8 debug_mode;
+       u8 ll2_ooo_queue_id;
+       u8 ooo_enable;
+
+       u8 is_target;
+       u8 bdq_pbl_num_entries[2];
+};
+
+struct ecore_roce_pf_params {
+       /* The following must be supplied to ECORE during resource allocation
+        * since they affect the ILT and the doorbell BAR.
+        */
+       u32 min_dpis;           /* the number of requested DPIs */
+       u32 min_qps;            /* the number of requested QPs */
+       u8 roce_edpm_mode;      /* see QED_ROCE_EDPM_MODE_ENABLE */
+
+       u32 num_cons;
+       u32 num_tasks;
+       u8 gl_pi;
+};
+
+struct ecore_pf_params {
+       struct ecore_eth_pf_params eth_pf_params;
+       struct ecore_iscsi_pf_params iscsi_pf_params;
+       struct ecore_roce_pf_params roce_pf_params;
+};
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_rt_defs.h 
b/drivers/net/qede/ecore/ecore_rt_defs.h
new file mode 100644
index 0000000..ad98428
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_rt_defs.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __RT_DEFS_H__
+#define __RT_DEFS_H__
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET       0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET       1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET       2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET       3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET       4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET       5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET       6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET       7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET       8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET       9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET       10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET       11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET       12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET       13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET       14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET       15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET         16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET              17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET             18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET             19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET              20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET              21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET           22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET          23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET            24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE          736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET                761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE          736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET       1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE         736
+#define CAU_REG_PI_MEMORY_RT_OFFSET            2233
+#define CAU_REG_PI_MEMORY_RT_SIZE              4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET           6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET             6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET             6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET        6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET        6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET           6654
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET          6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET          6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET          6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET              6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET            6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET          6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET             6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET              6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET        6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET            6665
+#define SRC_REG_FIRSTFREE_RT_SIZE              2
+#define SRC_REG_LASTFREE_RT_OFFSET             6667
+#define SRC_REG_LASTFREE_RT_SIZE               2
+#define SRC_REG_COUNTFREE_RT_OFFSET            6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET             6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET       6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET       6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET         6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET         6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET                6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET       6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET              6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET       6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET              6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET       6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET             6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET              6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET            6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET             6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET            6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET             6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET            6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET             6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET            6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET  6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET              6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET            6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET            6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET          6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET        6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET        6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET           6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET       6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET           6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET           6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET             6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET             6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET                6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE          22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET          28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET          28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET             28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET             28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET             28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET                28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET                28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET                28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET            28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET            28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET       28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE         416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET       29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE         512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET           29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET           29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET           29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET              29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET              29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET              29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET              29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET              29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET              29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET              29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET              29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET              29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET              29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET             29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET             29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET             29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET             29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET             29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET             29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET             29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET             29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET             29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET             29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET             29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET             29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET             29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET             29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET             29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET             29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET             29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET             29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET             29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET             29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET             29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET             29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET             29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET             29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET             29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET             29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET             29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET             29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET             29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET             29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET             29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET             29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET             29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET             29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET             29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET             29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET             29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET             29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET             29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET             29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET             29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET             29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET             29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET             29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET             29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET             29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET             29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET             29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET             29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET             29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET             29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET             29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET             29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET             29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET       29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE         128
+#define QM_REG_VOQCRDLINE_RT_OFFSET            29837
+#define QM_REG_VOQCRDLINE_RT_SIZE              20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET                29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE          20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET            29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET            29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET             29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET           29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET          29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET       29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET       29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET       29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET       29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET       29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET       29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET       29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET       29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET       29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET       29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET              29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET              29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET              29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET              29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET              29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET              29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET           29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET           29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET           29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET           29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET              29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET              29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET             29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET             29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET             29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET             29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET             29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET             29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET             29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET             29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET             29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET             29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET            29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET            29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET            29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET            29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET            29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET            29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET            29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET            29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET            29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET            29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET            29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET            29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET            29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET            29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET            29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET            29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET            29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET            29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET            29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET            29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET            29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET            29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET            29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET            29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET            29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET            29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET            29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET            29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET            29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET            29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET            29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET            29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET            29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET            29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET            29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET            29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET            29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET            29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET            29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET            29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET            29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET            29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET            29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET            29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET            29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET            29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET            29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET            29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET            29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET            29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET            29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET            29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET            29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET            29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET          29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET          29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET          29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET          29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET          29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET          29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET          29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET          29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET          29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET          29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET         29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET         29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET         29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET         29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET         29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET         29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET                29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET                29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET           29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET           29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET             29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET             29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET             29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET             29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET             29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET             29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET             29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET             29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET          29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE            256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET              30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE                256
+#define QM_REG_RLGLBLCRD_RT_OFFSET             30508
+#define QM_REG_RLGLBLCRD_RT_SIZE               256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET          30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET            30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET       30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET            30767
+#define QM_REG_RLPFINCVAL_RT_SIZE              16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET                30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE          16
+#define QM_REG_RLPFCRD_RT_OFFSET               30799
+#define QM_REG_RLPFCRD_RT_SIZE                 16
+#define QM_REG_RLPFENABLE_RT_OFFSET            30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET         30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET           30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE             16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET       30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE         16
+#define QM_REG_WFQPFCRD_RT_OFFSET              30849
+#define QM_REG_WFQPFCRD_RT_SIZE                        160
+#define QM_REG_WFQPFENABLE_RT_OFFSET           31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET           31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET          31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE            512
+#define QM_REG_TXPQMAP_RT_OFFSET               31523
+#define QM_REG_TXPQMAP_RT_SIZE                 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET           32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE             512
+#define QM_REG_WFQVPCRD_RT_OFFSET              32547
+#define QM_REG_WFQVPCRD_RT_SIZE                        512
+#define QM_REG_WFQVPMAP_RT_OFFSET              33059
+#define QM_REG_WFQVPMAP_RT_SIZE                        512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET          33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE            160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET              33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET        33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET        33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET        33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET        33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET         33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET             33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET              33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE                4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET         33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE           4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET           33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE             4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET              33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET        33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE          32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET           33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE             16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET         33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE           16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET                33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE  16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET              33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE        16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET         33847
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET              33848
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET              33849
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET              33850
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET          33851
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET          33852
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET          33853
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET          33854
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET               33855
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET               33856
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET               33857
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET               33858
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET           33859
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET        33860
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET              33861
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET         33862
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET               33863
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET          33864
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET           33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET               33866
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET          33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET           33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET               33869
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET          33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET           33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET               33872
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET          33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET           33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET               33875
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET          33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET           33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET               33878
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET          33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET           33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET               33881
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET          33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET           33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET               33884
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET          33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET           33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET               33887
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET          33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET           33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET               33890
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET          33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET           33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET              33893
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET         33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET  33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET              33896
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET         33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET  33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET              33899
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET         33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET  33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET              33902
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET         33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET  33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET              33905
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET         33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET  33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET              33908
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET         33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET  33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET              33911
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET         33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET  33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET              33914
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET         33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET  33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET              33917
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET         33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET  33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET              33920
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET         33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET  33922
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET           33923
+
+#define RUNTIME_ARRAY_SIZE 33924
+
+#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/ecore/ecore_sp_api.h 
b/drivers/net/qede/ecore/ecore_sp_api.h
new file mode 100644
index 0000000..4efa334
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_sp_api.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_API_H__
+#define __ECORE_SP_API_H__
+
+#include "ecore_status.h"
+
+enum spq_mode {
+       ECORE_SPQ_MODE_BLOCK,   /* Client will poll a designated mem. address */
+       ECORE_SPQ_MODE_CB,      /* Client supplies a callback */
+       ECORE_SPQ_MODE_EBLOCK,  /* ECORE should block until completion */
+};
+
+struct ecore_hwfn;
+union event_ring_data;
+struct eth_slow_path_rx_cqe;
+
+struct ecore_spq_comp_cb {
+       void (*function) (struct ecore_hwfn *,
+                         void *, union event_ring_data *, u8 fw_return_code);
+       void *cookie;
+};
+
+/**
+ * @brief ecore_eth_cqe_completion - handles the completion of a
+ *        ramrod on the cqe ring
+ *
+ * @param p_hwfn
+ * @param cqe
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+                                             struct eth_slow_path_rx_cqe *cqe);
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_sp_commands.c 
b/drivers/net/qede/ecore/ecore_sp_commands.c
new file mode 100644
index 0000000..12dd2de
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_sp_commands.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_chain.h"
+#include "ecore_spq.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_sp_commands.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_hw.h"
+#include "ecore_dcbx.h"
+
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_spq_entry **pp_ent,
+                                          u8 cmd,
+                                          u8 protocol,
+                                          struct ecore_sp_init_data *p_data)
+{
+       u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+       /* Get an SPQ entry */
+       rc = ecore_spq_get_entry(p_hwfn, pp_ent);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Fill the SPQ entry */
+       p_ent = *pp_ent;
+       p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
+       p_ent->elem.hdr.cmd_id = cmd;
+       p_ent->elem.hdr.protocol_id = protocol;
+       p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
+       p_ent->comp_mode = p_data->comp_mode;
+       p_ent->comp_done.done = 0;
+
+       switch (p_ent->comp_mode) {
+       case ECORE_SPQ_MODE_EBLOCK:
+               p_ent->comp_cb.cookie = &p_ent->comp_done;
+               break;
+
+       case ECORE_SPQ_MODE_BLOCK:
+               if (!p_data->p_comp_data)
+                       return ECORE_INVAL;
+
+               p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+               break;
+
+       case ECORE_SPQ_MODE_CB:
+               if (!p_data->p_comp_data)
+                       p_ent->comp_cb.function = OSAL_NULL;
+               else
+                       p_ent->comp_cb = *p_data->p_comp_data;
+               break;
+
+       default:
+               DP_NOTICE(p_hwfn, true, "Unkown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu 
comp_mode [%s]\n",
+                  opaque_cid, cmd, protocol,
+                  (long unsigned)&p_ent->ramrod,
+                  D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+                          ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+       return ECORE_SUCCESS;
+}
+
+static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+{
+       switch (type) {
+       case ECORE_TUNN_CLSS_MAC_VLAN:
+               return TUNNEL_CLSS_MAC_VLAN;
+       case ECORE_TUNN_CLSS_MAC_VNI:
+               return TUNNEL_CLSS_MAC_VNI;
+       case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
+               return TUNNEL_CLSS_INNER_MAC_VLAN;
+       case ECORE_TUNN_CLSS_INNER_MAC_VNI:
+               return TUNNEL_CLSS_INNER_MAC_VNI;
+       default:
+               return TUNNEL_CLSS_MAC_VLAN;
+       }
+}
+
+static void
+ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
+                               struct ecore_tunn_update_params *p_src,
+                               struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
+       unsigned long update_mask = p_src->tunn_mode_update_mask;
+       unsigned long tunn_mode = p_src->tunn_mode;
+       unsigned long new_tunn_mode = 0;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
+       }
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               if (p_src->update_geneve_udp_port)
+                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+               p_src->update_geneve_udp_port = 0;
+               p_src->tunn_mode = new_tunn_mode;
+               return;
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       } else {
+               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+       }
+
+       p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+                               struct ecore_tunn_update_params *p_src,
+                               struct pf_update_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode = p_src->tunn_mode;
+       enum tunnel_clss type;
+
+       ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               if (p_src->update_geneve_udp_port)
+                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+               p_src->update_geneve_udp_port = 0;
+               return;
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  unsigned long tunn_mode)
+{
+       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+               l2gre_enable = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+               ipgre_enable = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+               vxlan_enable = 1;
+
+       ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+       ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+               return;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+               l2geneve_enable = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+               ipgeneve_enable = 1;
+
+       ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+                               ipgeneve_enable);
+}
+
+static void
+ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
+                              struct ecore_tunn_start_params *p_src,
+                              struct pf_start_tunnel_config *p_tunn_cfg)
+{
+       unsigned long tunn_mode;
+       enum tunnel_clss type;
+
+       if (!p_src)
+               return;
+
+       tunn_mode = p_src->tunn_mode;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+       p_tunn_cfg->tunnel_clss_vxlan = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+       p_tunn_cfg->tunnel_clss_l2gre = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+       p_tunn_cfg->tunnel_clss_ipgre = type;
+
+       if (p_src->update_vxlan_udp_port) {
+               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+               p_tunn_cfg->vxlan_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2gre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgre = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_vxlan = 1;
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               if (p_src->update_geneve_udp_port)
+                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
+               p_src->update_geneve_udp_port = 0;
+               return;
+       }
+
+       if (p_src->update_geneve_udp_port) {
+               p_tunn_cfg->set_geneve_udp_port_flg = 1;
+               p_tunn_cfg->geneve_udp_port =
+                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
+       }
+
+       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_l2geneve = 1;
+
+       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
+               p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+       p_tunn_cfg->tunnel_clss_l2geneve = type;
+       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_tunn_start_params *p_tunn,
+                                      enum ecore_mf_mode mode,
+                                      bool allow_npar_tx_switch)
+{
+       struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
+       u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+       u8 page_cnt;
+
+       /* update initial eq producer */
+       ecore_eq_prod_update(p_hwfn,
+                            ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+       /* Initialize the SPQ entry for the ramrod */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_START,
+                                  PROTOCOLID_COMMON, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       /* Fill the ramrod data */
+       p_ramrod = &p_ent->ramrod.pf_start;
+       p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
+       p_ramrod->event_ring_sb_index = sb_index;
+       p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
+       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
+
+       /* For easier debugging */
+       p_ramrod->dont_log_ramrods = 0;
+       p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+
+       switch (mode) {
+       case ECORE_MF_DEFAULT:
+       case ECORE_MF_NPAR:
+               p_ramrod->mf_mode = MF_NPAR;
+               break;
+       case ECORE_MF_OVLAN:
+               p_ramrod->mf_mode = MF_OVLAN;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true,
+                         "Unsupported MF mode, init as DEFAULT\n");
+               p_ramrod->mf_mode = MF_NPAR;
+       }
+
+       /* Place EQ address in RAMROD */
+       DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+       page_cnt = (u8) ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+       p_ramrod->event_ring_num_pages = page_cnt;
+       DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+
+       ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
+                                      &p_ramrod->tunnel_config);
+
+       if (IS_MF_SI(p_hwfn))
+               p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ETH:
+               p_ramrod->personality = PERSONALITY_ETH;
+               break;
+       case ECORE_PCI_ISCSI:
+               p_ramrod->personality = PERSONALITY_ISCSI;
+               break;
+       case ECORE_PCI_ETH_ROCE:
+               p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unkown personality %d\n",
+                         p_hwfn->hw_info.personality);
+               p_ramrod->personality = PERSONALITY_ETH;
+       }
+
+       p_ramrod->base_vf_id = (u8) p_hwfn->hw_info.first_vf_in_pf;
+       p_ramrod->num_vfs = (u8) p_hwfn->p_dev->sriov_info.total_vfs;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Setting event_ring_sb [id %04x index %02x], outer_tag 
[%d]\n",
+                  sb, sb_index, p_ramrod->outer_tag);
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+       if (p_tunn) {
+               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+                                      p_tunn->tunn_mode);
+               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+                                       &p_ent->ramrod.pf_update);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+/* Set pf update ramrod command params */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+                           struct ecore_tunn_update_params *p_tunn,
+                           enum spq_mode comp_mode,
+                           struct ecore_spq_comp_cb *p_comp_data)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = comp_mode;
+       init_data.p_comp_data = p_comp_data;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
+                                       &p_ent->ramrod.pf_update.tunnel_config);
+
+       rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+       if ((rc == ECORE_SUCCESS) && p_tunn) {
+               if (p_tunn->update_vxlan_udp_port)
+                       ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                                 p_tunn->vxlan_udp_port);
+               if (p_tunn->update_geneve_udp_port)
+                       ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                                  p_tunn->geneve_udp_port);
+
+               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+                                      p_tunn->tunn_mode);
+               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_sp_init_data init_data;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/ecore/ecore_sp_commands.h 
b/drivers/net/qede/ecore/ecore_sp_commands.h
new file mode 100644
index 0000000..e281ab0
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_sp_commands.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SP_COMMANDS_H__
+#define __ECORE_SP_COMMANDS_H__
+
+#include "ecore.h"
+#include "ecore_spq.h"
+#include "ecore_sp_api.h"
+
+#define ECORE_SP_EQ_COMPLETION  0x01
+#define ECORE_SP_CQE_COMPLETION 0x02
+
+struct ecore_sp_init_data {
+       /* The CID and FID aren't necessarily derived from hwfn,
+        * e.g., in IOV scenarios. CID might defer between SPQ and
+        * other elements.
+        */
+       u32 cid;
+       u16 opaque_fid;
+
+       /* Information regarding operation upon sending & completion */
+       enum spq_mode comp_mode;
+       struct ecore_spq_comp_cb *p_comp_data;
+
+};
+
+/**
+ * @brief Acquire and initialize and SPQ entry for a given ramrod.
+ *
+ * @param p_hwfn
+ * @param pp_ent - will be filled with a pointer to an entry upon success
+ * @param cmd - dependent upon protocol
+ * @param protocol
+ * @param p_data - various configuration required for ramrod
+ *
+ * @return ECORE_SUCCESS upon success, otherwise failure.
+ */
+enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_spq_entry **pp_ent,
+                                          u8 cmd,
+                                          u8 protocol,
+                                          struct ecore_sp_init_data *p_data);
+
+/**
+ * @brief ecore_sp_pf_start - PF Function Start Ramrod
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf start tunneling configuration
+ * @param mode
+ * @param allow_npar_tx_switch - npar tx switching to be used
+ *       for vports configured for tx-switching.
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_tunn_start_params *p_tunn,
+                                      enum ecore_mf_mode mode,
+                                      bool allow_npar_tx_switch);
+
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ *                                     update  Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+                           struct ecore_tunn_update_params *p_tunn,
+                           enum spq_mode comp_mode,
+                           struct ecore_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief ecore_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @note Final phase API.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_stop - PF Function Stop Ramrod
+ *
+ * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
+ * sent and the last completion written to the PFs Event Ring. This ramrod also
+ * deletes the context for the Slowhwfn connection on this PF.
+ *
+ * @note Not required for first packet.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
+
+#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/ecore/ecore_spq.c 
b/drivers/net/qede/ecore/ecore_spq.c
new file mode 100644
index 0000000..a9b0700
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_spq.c
@@ -0,0 +1,989 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "reg_addr.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_hsi_common.h"
+#include "ecore.h"
+#include "ecore_sp_api.h"
+#include "ecore_spq.h"
+#include "ecore_iro.h"
+#include "ecore_init_fw_funcs.h"
+#include "ecore_cxt.h"
+#include "ecore_int.h"
+#include "ecore_dev_api.h"
+#include "ecore_mcp.h"
+#ifdef CONFIG_ECORE_ROCE
+#include "ecore_roce.h"
+#endif
+#include "ecore_hw.h"
+#include "ecore_sriov.h"
+#ifdef CONFIG_ECORE_ISCSI
+#include "ecore_iscsi.h"
+#include "ecore_ooo.h"
+#endif
+
+/***************************************************************************
+ * Structures & Definitions
+ ***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT   (1)
+#define SPQ_BLOCK_SLEEP_LENGTH         (1000)
+
+/***************************************************************************
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
+                                 void *cookie,
+                                 union event_ring_data *data,
+                                 u8 fw_return_code)
+{
+       struct ecore_spq_comp_done *comp_done;
+
+       comp_done = (struct ecore_spq_comp_done *)cookie;
+
+       comp_done->done = 0x1;
+       comp_done->fw_return_code = fw_return_code;
+
+       /* make update visible to waiting thread */
+       OSAL_SMP_WMB(p_hwfn->p_dev);
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_spq_entry *p_ent,
+                                           u8 *p_fw_ret)
+{
+       int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       struct ecore_spq_comp_done *comp_done;
+       enum _ecore_status_t rc;
+
+       comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
+       while (sleep_count) {
+               OSAL_POLL_MODE_DPC(p_hwfn);
+               /* validate we receive completion update */
+               OSAL_SMP_RMB(p_hwfn->p_dev);
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return ECORE_SUCCESS;
+               }
+               OSAL_MSLEEP(5);
+               sleep_count--;
+       }
+
+       DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+       rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+
+       /* Retry after drain */
+       sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
+       while (sleep_count) {
+               /* validate we receive completion update */
+               OSAL_SMP_RMB(p_hwfn->p_dev);
+               if (comp_done->done == 1) {
+                       if (p_fw_ret)
+                               *p_fw_ret = comp_done->fw_return_code;
+                       return ECORE_SUCCESS;
+               }
+               OSAL_MSLEEP(5);
+               sleep_count--;
+       }
+
+       if (comp_done->done == 1) {
+               if (p_fw_ret)
+                       *p_fw_ret = comp_done->fw_return_code;
+               return ECORE_SUCCESS;
+       }
+
+       DP_NOTICE(p_hwfn, true,
+                 "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
+                 OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
+                 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
+                 OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+
+       ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
+
+       return ECORE_BUSY;
+}
+
+/***************************************************************************
+ * SPQ entries inner API
+ ***************************************************************************/
+static enum _ecore_status_t
+ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
+{
+       p_ent->flags = 0;
+
+       switch (p_ent->comp_mode) {
+       case ECORE_SPQ_MODE_EBLOCK:
+       case ECORE_SPQ_MODE_BLOCK:
+               p_ent->comp_cb.function = ecore_spq_blocking_cb;
+               break;
+       case ECORE_SPQ_MODE_CB:
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unkown SPQE completion mode %d\n",
+                         p_ent->comp_mode);
+               return ECORE_INVAL;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
+                  " Data pointer: [%08x:%08x] Completion Mode: %s\n",
+                  p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
+                  p_ent->elem.hdr.protocol_id,
+                  p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
+                  D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
+                          ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+                          "MODE_CB"));
+
+       return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * HSI access
+ ***************************************************************************/
+static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_spq *p_spq)
+{
+       u16 pq;
+       struct ecore_cxt_info cxt_info;
+       struct core_conn_context *p_cxt;
+       union ecore_qm_pq_params pq_params;
+       enum _ecore_status_t rc;
+
+       cxt_info.iid = p_spq->cid;
+
+       rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+       if (rc < 0) {
+               DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
+                         p_spq->cid);
+               return;
+       }
+
+       p_cxt = cxt_info.p_cxt;
+
+       SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+       SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+                 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+       /*SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+          XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1); */
+       SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+                 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+       /* CDU validation - FIXME currently disabled */
+
+       /* QM physical queue */
+       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+       pq_params.core.tc = LB_TC;
+       pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
+       p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+
+       p_cxt->xstorm_st_context.spq_base_lo =
+           DMA_LO_LE(p_spq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.spq_base_hi =
+           DMA_HI_LE(p_spq->chain.p_phys_addr);
+
+       p_cxt->xstorm_st_context.consolid_base_addr.lo =
+           DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
+       p_cxt->xstorm_st_context.consolid_base_addr.hi =
+           DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+}
+
+static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_spq *p_spq,
+                                             struct ecore_spq_entry *p_ent)
+{
+       struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+       u16 echo = ecore_chain_get_prod_idx(p_chain);
+       struct slow_path_element *elem;
+       struct core_db_data db;
+
+       p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
+       elem = ecore_chain_produce(p_chain);
+       if (!elem) {
+               DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
+               return ECORE_INVAL;
+       }
+
+       *elem = p_ent->elem;    /* struct assignment */
+
+       /* send a doorbell on the slow hwfn session */
+       OSAL_MEMSET(&db, 0, sizeof(db));
+       SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+       SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
+                 DQ_XCM_CORE_SPQ_PROD_CMD);
+       db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+       /* validate producer is up to-date */
+       OSAL_RMB(p_hwfn->p_dev);
+
+       db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+
+       /* do not reorder */
+       OSAL_BARRIER(p_hwfn->p_dev);
+
+       DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *) &db);
+
+       /* make sure doorbell is rang */
+       OSAL_MMIOWB(p_hwfn->p_dev);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
+                  " agg_params: %02x, prod: %04x\n",
+                  DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
+                  db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+
+       return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Asynchronous events
+ ***************************************************************************/
+
+static enum _ecore_status_t
+ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
+                            struct event_ring_entry *p_eqe)
+{
+       switch (p_eqe->protocol_id) {
+#ifdef CONFIG_ECORE_ROCE
+       case PROTOCOLID_ROCE:
+               {
+                       p_hwfn->p_roce_info->events.affiliated_event(p_hwfn->
+                                    p_roce_info->events.context,
+                                    p_eqe->opcode,
+                                    &p_eqe->data);
+
+                       return ECORE_SUCCESS;
+               }
+#endif
+       case PROTOCOLID_COMMON:
+               return ecore_sriov_eqe_event(p_hwfn,
+                                            p_eqe->opcode,
+                                            p_eqe->echo, &p_eqe->data);
+#ifdef CONFIG_ECORE_ISCSI
+       case PROTOCOLID_ISCSI:
+               if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
+                       u32 cid = OSAL_LE32_TO_CPU(p_eqe->data.iscsi_info.cid);
+
+                       ecore_ooo_release_connection_isles(p_hwfn,
+                                                          p_hwfn->p_ooo_info,
+                                                          cid);
+                       return ECORE_SUCCESS;
+               }
+               if (p_hwfn->p_iscsi_info->event_cb != OSAL_NULL) {
+                       return p_hwfn->p_iscsi_info->event_cb(p_hwfn->
+                                                             p_iscsi_info->
+                                                             event_context,
+                                                             p_eqe->opcode,
+                                                             &p_eqe->data);
+               } else {
+                       DP_NOTICE(p_hwfn,
+                                 false, "iSCSI async completion is not set\n");
+                       return ECORE_NOTIMPL;
+               }
+#endif
+       default:
+               DP_NOTICE(p_hwfn,
+                         true, "Unknown Async completion for protocol: %d\n",
+                         p_eqe->protocol_id);
+               return ECORE_INVAL;
+       }
+}
+
+/***************************************************************************
+ * EQ API
+ ***************************************************************************/
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
+{
+       u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
+           USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+
+       REG_WR16(p_hwfn, addr, prod);
+
+       /* keep prod updates ordered */
+       OSAL_MMIOWB(p_hwfn->p_dev);
+}
+
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+                                        void *cookie)
+{
+       struct ecore_eq *p_eq = cookie;
+       struct ecore_chain *p_chain = &p_eq->chain;
+       enum _ecore_status_t rc = 0;
+
+       /* take a snapshot of the FW consumer */
+       u16 fw_cons_idx = OSAL_LE16_TO_CPU(*(p_eq->p_fw_cons));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+       /* Need to guarantee the fw_cons index we use points to a usuable
+        * element (to comply with our chain), so our macros would comply
+        */
+       if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
+           ecore_chain_get_usable_per_page(p_chain)) {
+               fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
+       }
+
+       /* Complete current segment of eq entries */
+       while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
+               struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
+               if (!p_eqe) {
+                       rc = ECORE_INVAL;
+                       break;
+               }
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                               "op %x prot %x res0 %x echo %x"
+                               "fwret %x flags %x\n", p_eqe->opcode,
+                          p_eqe->protocol_id,  /* Event Protocol ID */
+                          p_eqe->reserved0,    /* Reserved */
+                          OSAL_LE16_TO_CPU(p_eqe->echo),
+                          p_eqe->fw_return_code,       /* FW return code for SP
+                                                          ramrods
+                                                        */
+                          p_eqe->flags);
+
+               if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+                       if (ecore_async_event_completion(p_hwfn, p_eqe))
+                               rc = ECORE_INVAL;
+               } else if (ecore_spq_completion(p_hwfn,
+                                               p_eqe->echo,
+                                               p_eqe->fw_return_code,
+                                               &p_eqe->data)) {
+                       rc = ECORE_INVAL;
+               }
+
+               ecore_chain_recycle_consumed(p_chain);
+       }
+
+       ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
+
+       return rc;
+}
+
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+{
+       struct ecore_eq *p_eq;
+
+       /* Allocate EQ struct */
+       p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
+       if (!p_eq) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_eq'\n");
+               return OSAL_NULL;
+       }
+
+       /* Allocate and initialize EQ chain */
+       if (ecore_chain_alloc(p_hwfn->p_dev,
+                             ECORE_CHAIN_USE_TO_PRODUCE,
+                             ECORE_CHAIN_MODE_PBL,
+                             ECORE_CHAIN_CNT_TYPE_U16,
+                             num_elem,
+                             sizeof(union event_ring_element), &p_eq->chain)) {
+
+               DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
+               goto eq_allocate_fail;
+       }
+
+       /* register EQ completion on the SP SB */
+       ecore_int_register_cb(p_hwfn,
+                             ecore_eq_completion,
+                             p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
+
+       return p_eq;
+
+eq_allocate_fail:
+       ecore_eq_free(p_hwfn, p_eq);
+       return OSAL_NULL;
+}
+
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+{
+       ecore_chain_reset(&p_eq->chain);
+}
+
+void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+{
+       if (!p_eq)
+               return;
+       ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_eq);
+}
+
+/***************************************************************************
+* CQE API - manipulate EQ functionallity
+***************************************************************************/
+static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
+                                                struct eth_slow_path_rx_cqe
+                                                *cqe,
+                                                enum protocol_type protocol)
+{
+       if (IS_VF(p_hwfn->p_dev))
+               return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
+
+       /* @@@tmp - it's possible we'll eventually want to handle some
+        * actual commands that can arrive here, but for now this is only
+        * used to complete the ramrod using the echo value on the cqe
+        */
+       return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
+                                             struct eth_slow_path_rx_cqe *cqe)
+{
+       enum _ecore_status_t rc;
+
+       rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+                         cqe->ramrod_cmd_id);
+       }
+
+       return rc;
+}
+
+/***************************************************************************
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       dma_addr_t p_phys = 0;
+       u32 i, capacity;
+
+       OSAL_LIST_INIT(&p_spq->pending);
+       OSAL_LIST_INIT(&p_spq->completion_pending);
+       OSAL_LIST_INIT(&p_spq->free_pool);
+       OSAL_LIST_INIT(&p_spq->unlimited_pending);
+       OSAL_SPIN_LOCK_INIT(&p_spq->lock);
+
+       /* SPQ empty pool */
+       p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
+       p_virt = p_spq->p_virt;
+
+       capacity = ecore_chain_get_capacity(&p_spq->chain);
+       for (i = 0; i < capacity; i++) {
+
+               p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
+               p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+
+               OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
+
+               p_virt++;
+               p_phys += sizeof(struct ecore_spq_entry);
+       }
+
+       /* Statistics */
+       p_spq->normal_count = 0;
+       p_spq->comp_count = 0;
+       p_spq->comp_sent_count = 0;
+       p_spq->unlimited_pending_count = 0;
+
+       OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
+                     SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
+       p_spq->comp_bitmap_idx = 0;
+
+       /* SPQ cid, cannot fail */
+       ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+       ecore_spq_hw_initialize(p_hwfn, p_spq);
+
+       /* reset the chain itself */
+       ecore_chain_reset(&p_spq->chain);
+}
+
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_virt = OSAL_NULL;
+       struct ecore_spq *p_spq = OSAL_NULL;
+       dma_addr_t p_phys = 0;
+       u32 capacity;
+
+       /* SPQ struct */
+       p_spq =
+           OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
+       if (!p_spq) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_spq'");
+               return ECORE_NOMEM;
+       }
+
+       /* SPQ ring  */
+       if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
+                       ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
+                       /* N/A when the mode is SINGLE */
+                       sizeof(struct slow_path_element), &p_spq->chain)) {
+               DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
+               goto spq_allocate_fail;
+       }
+
+       /* allocate and fill the SPQ elements (incl. ramrod data list) */
+       capacity = ecore_chain_get_capacity(&p_spq->chain);
+       p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
+                                        capacity *
+                                        sizeof(struct ecore_spq_entry));
+       if (!p_virt)
+               goto spq_allocate_fail;
+
+       p_spq->p_virt = p_virt;
+       p_spq->p_phys = p_phys;
+
+       OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+
+       p_hwfn->p_spq = p_spq;
+       return ECORE_SUCCESS;
+
+spq_allocate_fail:
+       ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_spq);
+       return ECORE_NOMEM;
+}
+
+void ecore_spq_free(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       u32 capacity;
+
+       if (!p_spq)
+               return;
+
+       if (p_spq->p_virt) {
+               capacity = ecore_chain_get_capacity(&p_spq->chain);
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_spq->p_virt,
+                                      p_spq->p_phys,
+                                      capacity *
+                                      sizeof(struct ecore_spq_entry));
+       }
+
+       ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+       OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+       OSAL_FREE(p_hwfn->p_dev, p_spq);
+}
+
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+       OSAL_SPIN_LOCK(&p_spq->lock);
+
+       if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+
+               p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+                                   sizeof(struct ecore_spq_entry));
+               if (!p_ent) {
+                       OSAL_SPIN_UNLOCK(&p_spq->lock);
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to allocate an SPQ entry"
+                                 " for a pending ramrod\n");
+                       return ECORE_NOMEM;
+               }
+               p_ent->queue = &p_spq->unlimited_pending;
+       } else {
+               p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+                                             struct ecore_spq_entry, list);
+               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
+               p_ent->queue = &p_spq->pending;
+       }
+
+       *pp_ent = p_ent;
+
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       return ECORE_SUCCESS;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_spq_entry *p_ent)
+{
+       OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+                           struct ecore_spq_entry *p_ent)
+{
+       OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
+       __ecore_spq_return_entry(p_hwfn, p_ent);
+       OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * @brief ecore_spq_add_entry - adds a new entry to the pending
+ *        list. Should be used while lock is being held.
+ *
+ * Addes an entry to the pending list is there is room (en empty
+ * element is avaliable in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ * @param priority
+ *
+ * @return enum _ecore_status_t
+ */
+static enum _ecore_status_t
+ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
+                   struct ecore_spq_entry *p_ent, enum spq_priority priority)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+
+       if (p_ent->queue == &p_spq->unlimited_pending) {
+               if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+
+                       OSAL_LIST_PUSH_TAIL(&p_ent->list,
+                                           &p_spq->unlimited_pending);
+                       p_spq->unlimited_pending_count++;
+
+                       return ECORE_SUCCESS;
+
+               } else {
+                       struct ecore_spq_entry *p_en2;
+
+                       p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+                                                     struct ecore_spq_entry,
+                                                     list);
+                       OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+
+                       /* Copy the ring element physical pointer to the new
+                        * entry, since we are about to override the entire ring
+                        * entry and don't want to lose the pointer.
+                        */
+                       p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+                       /* Setting the cookie to the comp_done of the
+                        * new element.
+                        */
+                       if (p_ent->comp_cb.cookie == &p_ent->comp_done)
+                               p_ent->comp_cb.cookie = &p_en2->comp_done;
+
+                       *p_en2 = *p_ent;
+
+                       OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+                       p_ent = p_en2;
+               }
+       }
+
+       /* entry is to be placed in 'pending' queue */
+       switch (priority) {
+       case ECORE_SPQ_PRIORITY_NORMAL:
+               OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
+               p_spq->normal_count++;
+               break;
+       case ECORE_SPQ_PRIORITY_HIGH:
+               OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
+               p_spq->high_count++;
+               break;
+       default:
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+/***************************************************************************
+ * Accessor
+ ***************************************************************************/
+
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
+{
+       if (!p_hwfn->p_spq)
+               return 0xffffffff;      /* illegal */
+       return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+ * Posting new Ramrods
+ ***************************************************************************/
+
+static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
+                                               osal_list_t *head,
+                                               u32 keep_reserve)
+{
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       enum _ecore_status_t rc;
+
+       /* TODO - implementation might be wasteful; will always keep room
+        * for an additional high priority ramrod (even if one is already
+        * pending FW)
+        */
+       while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+              !OSAL_LIST_IS_EMPTY(head)) {
+               struct ecore_spq_entry *p_ent =
+                   OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
+               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+               OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
+               p_spq->comp_sent_count++;
+
+               rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+               if (rc) {
+                       OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+                                              &p_spq->completion_pending);
+                       __ecore_spq_return_entry(p_hwfn, p_ent);
+                       return rc;
+               }
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       struct ecore_spq *p_spq = p_hwfn->p_spq;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+
+       while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
+               if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
+                       break;
+
+               p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
+                                             struct ecore_spq_entry, list);
+               if (!p_ent)
+                       return ECORE_INVAL;
+
+               OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
+
+               ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       }
+
+       rc = ecore_spq_post_list(p_hwfn,
+                                &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
+       if (rc)
+               return rc;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_spq_entry *p_ent,
+                                   u8 *fw_return_code)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
+       bool b_ret_ent = true;
+
+       if (!p_hwfn)
+               return ECORE_INVAL;
+
+       if (!p_ent) {
+               DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
+               return ECORE_INVAL;
+       }
+
+       if (p_hwfn->p_dev->recov_in_prog) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Recovery is in progress -> skip spq post"
+                          " [cmd %02x protocol %02x]",
+                          p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
+               /* Return success to let the flows to be completed successfully
+                * w/o any error handling.
+                */
+               return ECORE_SUCCESS;
+       }
+
+       OSAL_SPIN_LOCK(&p_spq->lock);
+
+       /* Complete the entry */
+       rc = ecore_spq_fill_entry(p_hwfn, p_ent);
+
+       /* Check return value after LOCK is taken for cleaner error flow */
+       if (rc)
+               goto spq_post_fail;
+
+       /* Add the request to the pending queue */
+       rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+       if (rc)
+               goto spq_post_fail;
+
+       rc = ecore_spq_pend_post(p_hwfn);
+       if (rc) {
+               /* Since it's possible that pending failed for a different
+                * entry [altough unlikely], the failed entry was already
+                * dealt with; No need to return it here.
+                */
+               b_ret_ent = false;
+               goto spq_post_fail;
+       }
+
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
+               /* For entries in ECORE BLOCK mode, the completion code cannot
+                * perform the neccessary cleanup - if it did, we couldn't
+                * access p_ent here to see whether it's successful or not.
+                * Thus, after gaining the answer perform the cleanup here.
+                */
+               rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
+               if (rc)
+                       goto spq_post_fail2;
+
+               /* return to pool */
+               ecore_spq_return_entry(p_hwfn, p_ent);
+       }
+       return rc;
+
+spq_post_fail2:
+       OSAL_SPIN_LOCK(&p_spq->lock);
+       OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
+       ecore_chain_return_produced(&p_spq->chain);
+
+spq_post_fail:
+       /* return to the free pool */
+       if (b_ret_ent)
+               __ecore_spq_return_entry(p_hwfn, p_ent);
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+                                         __le16 echo,
+                                         u8 fw_return_code,
+                                         union event_ring_data *p_data)
+{
+       struct ecore_spq *p_spq;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_spq_entry *tmp;
+       struct ecore_spq_entry *found = OSAL_NULL;
+       enum _ecore_status_t rc;
+
+       if (!p_hwfn)
+               return ECORE_INVAL;
+
+       p_spq = p_hwfn->p_spq;
+       if (!p_spq)
+               return ECORE_INVAL;
+
+       OSAL_SPIN_LOCK(&p_spq->lock);
+       OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
+                                     tmp,
+                                     &p_spq->completion_pending,
+                                     list, struct ecore_spq_entry) {
+
+               if (p_ent->elem.hdr.echo == echo) {
+                       OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+                                              &p_spq->completion_pending);
+
+                       /* Avoid overriding of SPQ entries when getting
+                        * out-of-order completions, by marking the completions
+                        * in a bitmap and increasing the chain consumer only
+                        * for the first successive completed entries.
+                        */
+                       SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
+                       while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
+                                                     p_spq->comp_bitmap_idx)) {
+                               SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
+                                                       p_spq->comp_bitmap_idx);
+                               p_spq->comp_bitmap_idx++;
+                               ecore_chain_return_produced(&p_spq->chain);
+                       }
+
+                       p_spq->comp_count++;
+                       found = p_ent;
+                       break;
+               }
+
+               /* This is debug and should be relatively uncommon - depends
+                * on scenarios which have mutliple per-PF sent ramrods.
+                */
+               DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                          "Got completion for echo %04x - doesn't match"
+                          " echo %04x in completion pending list\n",
+                          OSAL_LE16_TO_CPU(echo),
+                          OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
+       }
+
+       /* Release lock before callback, as callback may post
+        * an additional ramrod.
+        */
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       if (!found) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to find an entry this"
+                         " EQE [echo %04x] completes\n",
+                         OSAL_LE16_TO_CPU(echo));
+               return ECORE_EXISTS;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+                  "Complete EQE [echo %04x]: func %p cookie %p)\n",
+                  OSAL_LE16_TO_CPU(echo),
+                  p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+       if (found->comp_cb.function)
+               found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+                                       fw_return_code);
+
+       if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
+               /* EBLOCK is responsible for freeing its own entry */
+               ecore_spq_return_entry(p_hwfn, found);
+       }
+
+       /* Attempt to post pending requests */
+       OSAL_SPIN_LOCK(&p_spq->lock);
+       rc = ecore_spq_pend_post(p_hwfn);
+       OSAL_SPIN_UNLOCK(&p_spq->lock);
+
+       return rc;
+}
+
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_consq *p_consq;
+
+       /* Allocate ConsQ struct */
+       p_consq =
+           OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
+       if (!p_consq) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_consq'\n");
+               return OSAL_NULL;
+       }
+
+       /* Allocate and initialize EQ chain */
+       if (ecore_chain_alloc(p_hwfn->p_dev,
+                             ECORE_CHAIN_USE_TO_PRODUCE,
+                             ECORE_CHAIN_MODE_PBL,
+                             ECORE_CHAIN_CNT_TYPE_U16,
+                             ECORE_CHAIN_PAGE_SIZE / 0x80,
+                             0x80, &p_consq->chain)) {
+
+               DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+               goto consq_allocate_fail;
+       }
+
+       return p_consq;
+
+consq_allocate_fail:
+       ecore_consq_free(p_hwfn, p_consq);
+       return OSAL_NULL;
+}
+
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+{
+       ecore_chain_reset(&p_consq->chain);
+}
+
+void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+{
+       if (!p_consq)
+               return;
+       ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
+       OSAL_FREE(p_hwfn->p_dev, p_consq);
+}
diff --git a/drivers/net/qede/ecore/ecore_spq.h 
b/drivers/net/qede/ecore/ecore_spq.h
new file mode 100644
index 0000000..aec57c1
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_spq.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SPQ_H__
+#define __ECORE_SPQ_H__
+
+#include "ecore_hsi_common.h"
+#include "ecore_status.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_chain.h"
+#include "ecore_sp_api.h"
+
+union ramrod_data {
+       struct pf_start_ramrod_data pf_start;
+       struct pf_update_ramrod_data pf_update;
+       struct rx_queue_start_ramrod_data rx_queue_start;
+       struct rx_queue_update_ramrod_data rx_queue_update;
+       struct rx_queue_stop_ramrod_data rx_queue_stop;
+       struct tx_queue_start_ramrod_data tx_queue_start;
+       struct tx_queue_stop_ramrod_data tx_queue_stop;
+       struct vport_start_ramrod_data vport_start;
+       struct vport_stop_ramrod_data vport_stop;
+       struct vport_update_ramrod_data vport_update;
+       struct core_rx_start_ramrod_data core_rx_queue_start;
+       struct core_rx_stop_ramrod_data core_rx_queue_stop;
+       struct core_tx_start_ramrod_data core_tx_queue_start;
+       struct core_tx_stop_ramrod_data core_tx_queue_stop;
+       struct vport_filter_update_ramrod_data vport_filter_update;
+
+#ifdef CONFIG_ECORE_ROCE
+       struct rdma_init_func_ramrod_data rdma_init_func;
+       struct rdma_close_func_ramrod_data rdma_close_func;
+       struct rdma_register_tid_ramrod_data rdma_register_tid;
+       struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
+       struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
+       struct roce_create_qp_req_ramrod_data roce_create_qp_req;
+       struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
+       struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
+       struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
+       struct roce_query_qp_req_ramrod_data roce_query_qp_req;
+       struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
+       struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+       struct rdma_create_cq_ramrod_data rdma_create_cq;
+       struct rdma_resize_cq_ramrod_data rdma_resize_cq;
+       struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
+#endif
+
+       struct vf_start_ramrod_data vf_start;
+       struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT  0xffffffff
+
+enum spq_priority {
+       ECORE_SPQ_PRIORITY_NORMAL,
+       ECORE_SPQ_PRIORITY_HIGH,
+};
+
+union ecore_spq_req_comp {
+       struct ecore_spq_comp_cb cb;
+       u64 *done_addr;
+};
+
+/* SPQ_MODE_EBLOCK */
+struct ecore_spq_comp_done {
+       u64 done;
+       u8 fw_return_code;
+};
+
+struct ecore_spq_entry {
+       osal_list_entry_t list;
+
+       u8 flags;
+
+       /* HSI slow path element */
+       struct slow_path_element elem;
+
+       union ramrod_data ramrod;
+
+       enum spq_priority priority;
+
+       /* pending queue for this entry */
+       osal_list_t *queue;
+
+       enum spq_mode comp_mode;
+       struct ecore_spq_comp_cb comp_cb;
+       struct ecore_spq_comp_done comp_done;   /* SPQ_MODE_EBLOCK */
+};
+
+struct ecore_eq {
+       struct ecore_chain chain;
+       u8 eq_sb_index;         /* index within the SB */
+       __le16 *p_fw_cons;      /* ptr to index value */
+};
+
+struct ecore_consq {
+       struct ecore_chain chain;
+};
+
+struct ecore_spq {
+       osal_spinlock_t lock;
+
+       osal_list_t unlimited_pending;
+       osal_list_t pending;
+       osal_list_t completion_pending;
+       osal_list_t free_pool;
+
+       struct ecore_chain chain;
+
+       /* allocated dma-able memory for spq entries (+ramrod data) */
+       dma_addr_t p_phys;
+       struct ecore_spq_entry *p_virt;
+
+       /* Bitmap for handling out-of-order completions */
+#define SPQ_RING_SIZE                                          \
+       (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+#define SPQ_COMP_BMAP_SIZE                                     \
+(SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */))
+       unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
+       u8 comp_bitmap_idx;
+#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx)                      \
+(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx)                    \
+(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx)                     \
+(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+
+       /* Statistics */
+       u32 unlimited_pending_count;
+       u32 normal_count;
+       u32 high_count;
+       u32 comp_sent_count;
+       u32 comp_count;
+
+       u32 cid;
+};
+
+struct ecore_port;
+struct ecore_hwfn;
+
+/**
+ * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
+ *        Pends it to the future list.
+ *
+ * @param p_hwfn
+ * @param p_req
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_spq_entry *p_ent,
+                                   u8 *fw_return_code);
+
+/**
+ * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
+ *
+ * @param p_hwfn
+ */
+void ecore_spq_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_spq_get_entry - Obtain an entrry from the spq
+ *        free pool list.
+ *
+ *
+ *
+ * @param p_hwfn
+ * @param pp_ent
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry 
**pp_ent);
+
+/**
+ * @brief ecore_spq_return_entry - Return an entry to spq free
+ *                                 pool list
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+                           struct ecore_spq_entry *p_ent);
+/**
+ * @brief ecore_eq_allocate - Allocates & initializes an EQ struct
+ *
+ * @param p_hwfn
+ * @param num_elem number of elements in the eq
+ *
+ * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ */
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+
+/**
+ * @brief ecore_eq_setup - Reset the SPQ to its start state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+
+/**
+ * @brief ecore_eq_deallocate - deallocates the given EQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+
+/**
+ * @brief ecore_eq_prod_update - update the FW with default EQ producer
+ *
+ * @param p_hwfn
+ * @param prod
+ */
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
+
+/**
+ * @brief ecore_eq_completion - Completes currently pending EQ elements
+ *
+ * @param p_hwfn
+ * @param cookie
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+                                        void *cookie);
+
+/**
+ * @brief ecore_spq_completion - Completes a single event
+ *
+ * @param p_hwfn
+ * @param echo - echo value from cookie (used for determining completion)
+ * @param p_data - data from cookie (used in callback function if applicable)
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+                                         __le16 echo,
+                                         u8 fw_return_code,
+                                         union event_ring_data *p_data);
+
+/**
+ * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ *
+ * @param p_hwfn
+ *
+ * @return u32 - SPQ CID
+ */
+u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ
+ *        struct
+ *
+ * @param p_hwfn
+ *
+ * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ */
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_consq_setup - Reset the ConsQ to its start
+ *        state.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+
+/**
+ * @brief ecore_consq_free - deallocates the given ConsQ struct.
+ *
+ * @param p_hwfn
+ * @param p_eq
+ */
+void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+
+#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/ecore/ecore_sriov.c 
b/drivers/net/qede/ecore/ecore_sriov.c
new file mode 100644
index 0000000..5b13b16
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_sriov.c
@@ -0,0 +1,3422 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "reg_addr.h"
+#include "ecore_sriov.h"
+#include "ecore_status.h"
+#include "ecore_hw.h"
+#include "ecore_hw_defs.h"
+#include "ecore_int.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_l2.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_rt_defs.h"
+#include "ecore_init_ops.h"
+#include "ecore_gtt_reg_addr.h"
+#include "ecore_iro.h"
+#include "ecore_mcp.h"
+#include "ecore_cxt.h"
+#include "ecore_vf.h"
+#include "ecore_init_fw_funcs.h"
+
+/* TEMPORARY until we implement print_enums... */
+const char *ecore_channel_tlvs_string[] = {
+       "CHANNEL_TLV_NONE",     /* ends tlv sequence */
+       "CHANNEL_TLV_ACQUIRE",
+       "CHANNEL_TLV_VPORT_START",
+       "CHANNEL_TLV_VPORT_UPDATE",
+       "CHANNEL_TLV_VPORT_TEARDOWN",
+       "CHANNEL_TLV_START_RXQ",
+       "CHANNEL_TLV_START_TXQ",
+       "CHANNEL_TLV_STOP_RXQ",
+       "CHANNEL_TLV_STOP_TXQ",
+       "CHANNEL_TLV_UPDATE_RXQ",
+       "CHANNEL_TLV_INT_CLEANUP",
+       "CHANNEL_TLV_CLOSE",
+       "CHANNEL_TLV_RELEASE",
+       "CHANNEL_TLV_LIST_END",
+       "CHANNEL_TLV_UCAST_FILTER",
+       "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
+       "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
+       "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
+       "CHANNEL_TLV_VPORT_UPDATE_MCAST",
+       "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
+       "CHANNEL_TLV_VPORT_UPDATE_RSS",
+       "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
+       "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+       "CHANNEL_TLV_MAX"
+};
+
+/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+{
+       int i;
+
+       while (length--) {
+               crc ^= *ptr++;
+               for (i = 0; i < 8; i++)
+                       crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+       }
+       return crc;
+}
+
+enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
+                                               int vfid,
+                                               struct ecore_ptt *p_ptt)
+{
+       struct ecore_bulletin_content *p_bulletin;
+       struct ecore_dmae_params params;
+       struct ecore_vf_info *p_vf;
+       int crc_size = sizeof(p_bulletin->crc);
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf)
+               return ECORE_INVAL;
+
+       /* TODO - check VF is in a state where it can accept message */
+       if (!p_vf->vf_bulletin)
+               return ECORE_INVAL;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+
+       /* Increment bulletin board version and compute crc */
+       p_bulletin->version++;
+       p_bulletin->crc = ecore_crc32(0, (u8 *) p_bulletin + crc_size,
+                                     p_vf->bulletin.size - crc_size);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+                  p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+       /* propagate bulletin board via dmae to vm memory */
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       params.flags = ECORE_DMAE_FLAG_VF_DST;
+       params.dst_vfid = p_vf->abs_vf_id;
+       return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+                                   p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+                                   &params);
+}
+
+static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
+{
+       struct ecore_hw_sriov_info *iov = &p_dev->sriov_info;
+       int pos = iov->pos;
+
+       DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
+       OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+       OSAL_PCI_READ_CONFIG_WORD(p_dev,
+                                 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+       OSAL_PCI_READ_CONFIG_WORD(p_dev,
+                                 pos + PCI_SRIOV_INITIAL_VF,
+                                 &iov->initial_vfs);
+
+       OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+       if (iov->num_vfs) {
+               /* @@@TODO - in future we might want to add an OSAL here to
+                * allow each OS to decide on its own how to act.
+                */
+               DP_VERBOSE(p_dev, ECORE_MSG_IOV,
+                          "Number of VFs are already set to non-zero value."
+                          " Ignoring PCI configuration value\n");
+               iov->num_vfs = 0;
+       }
+
+       OSAL_PCI_READ_CONFIG_WORD(p_dev,
+                                 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+       OSAL_PCI_READ_CONFIG_WORD(p_dev,
+                                 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+       OSAL_PCI_READ_CONFIG_WORD(p_dev,
+                                 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+       OSAL_PCI_READ_CONFIG_DWORD(p_dev,
+                                  pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+       OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+       OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+       DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
+                  "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
+                  " stride %d, page size 0x%x\n", 0,
+                  iov->nres, iov->cap, iov->ctrl,
+                  iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
+                  iov->offset, iov->stride, iov->pgsz);
+
+       /* Some sanity checks */
+       if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
+           iov->total_vfs > NUM_OF_VFS(p_dev)) {
+               /* This can happen only due to a bug. In this case we set
+                * num_vfs to zero to avoid memory corruption in the code that
+                * assumes max number of vfs
+                */
+               DP_NOTICE(p_dev, false,
+                         "IOV: Unexpected number of vfs set: %d"
+                         " setting num_vf to zero\n",
+                         iov->num_vfs);
+
+               iov->num_vfs = 0;
+               iov->total_vfs = 0;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt)
+{
+       struct ecore_igu_block *p_sb;
+       u16 sb_id;
+       u32 val;
+
+       if (!p_hwfn->hw_info.p_igu_info) {
+               DP_ERR(p_hwfn,
+                      "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
+               return;
+       }
+
+       for (sb_id = 0;
+            sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
+               p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+               if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
+                   !(p_sb->status & ECORE_IGU_STATUS_PF)) {
+                       val = ecore_rd(p_hwfn, p_ptt,
+                                      IGU_REG_MAPPING_MEMORY + sb_id * 4);
+                       SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+                       ecore_wr(p_hwfn, p_ptt,
+                                IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+               }
+       }
+}
+
+static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
+{
+       u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+       union pfvf_tlvs *p_reply_virt_addr;
+       union vfpf_tlvs *p_req_virt_addr;
+       struct ecore_bulletin_content *p_bulletin_virt;
+       struct ecore_pf_iov *p_iov_info;
+       dma_addr_t req_p, rply_p, bulletin_p;
+       u8 idx = 0;
+
+       p_iov_info = p_hwfn->pf_iov_info;
+
+       OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+       p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+       req_p = p_iov_info->mbx_msg_phys_addr;
+       p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+       rply_p = p_iov_info->mbx_reply_phys_addr;
+       p_bulletin_virt = p_iov_info->p_bulletins;
+       bulletin_p = p_iov_info->bulletins_phys;
+       if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+               DP_ERR(p_hwfn,
+                      "ecore_iov_setup_vfdb called without alloc mem first\n");
+               return;
+       }
+
+       p_iov_info->base_vport_id = 1;  /* @@@TBD resource allocation */
+
+       for (idx = 0; idx < num_vfs; idx++) {
+               struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
+               u32 concrete;
+
+               vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+               vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+               vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+               vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+               vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
+               vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+               vf->state = VF_STOPPED;
+
+               vf->bulletin.phys = idx *
+                   sizeof(struct ecore_bulletin_content) + bulletin_p;
+               vf->bulletin.p_virt = p_bulletin_virt + idx;
+               vf->bulletin.size = sizeof(struct ecore_bulletin_content);
+
+               vf->relative_vf_id = idx;
+               vf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf;
+               concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+               vf->concrete_fid = concrete;
+               /* TODO - need to devise a better way of getting opaque */
+               vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+                   (vf->abs_vf_id << 8);
+               /* @@TBD MichalK - add base vport_id of VFs to equation */
+               vf->vport_id = p_iov_info->base_vport_id + idx;
+       }
+}
+
+static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+       void **p_v_addr;
+       u16 num_vfs = 0;
+
+       num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+       /* Allocate PF Mailbox buffer (per-VF) */
+       p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+       p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+       *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                           &p_iov_info->mbx_msg_phys_addr,
+                                           p_iov_info->mbx_msg_size);
+       if (!*p_v_addr)
+               return ECORE_NOMEM;
+
+       /* Allocate PF Mailbox Reply buffer (per-VF) */
+       p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+       p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+       *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                           &p_iov_info->mbx_reply_phys_addr,
+                                           p_iov_info->mbx_reply_size);
+       if (!*p_v_addr)
+               return ECORE_NOMEM;
+
+       p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
+           num_vfs;
+       p_v_addr = &p_iov_info->p_bulletins;
+       *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                           &p_iov_info->bulletins_phys,
+                                           p_iov_info->bulletins_size);
+       if (!*p_v_addr)
+               return ECORE_NOMEM;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "PF's Requests mailbox [%p virt 0x%lx phys],  Response"
+                  " mailbox [%p virt 0x%lx phys] Bulletins"
+                  " [%p virt 0x%lx phys]\n",
+                  p_iov_info->mbx_msg_virt_addr,
+                  (u64) p_iov_info->mbx_msg_phys_addr,
+                  p_iov_info->mbx_reply_virt_addr,
+                  (u64) p_iov_info->mbx_reply_phys_addr,
+                  p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+       /* @@@TBD MichalK - statistics / RSS */
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+       if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov_info->mbx_msg_virt_addr,
+                                      p_iov_info->mbx_msg_phys_addr,
+                                      p_iov_info->mbx_msg_size);
+
+       if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov_info->mbx_reply_virt_addr,
+                                      p_iov_info->mbx_reply_phys_addr,
+                                      p_iov_info->mbx_reply_size);
+
+       if (p_iov_info->p_bulletins)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov_info->p_bulletins,
+                                      p_iov_info->bulletins_phys,
+                                      p_iov_info->bulletins_size);
+
+       /* @@@TBD MichalK - statistics / RSS */
+}
+
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_pf_iov *p_sriov;
+
+       if (!IS_PF_SRIOV(p_hwfn)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No SR-IOV - no need for IOV db\n");
+               return rc;
+       }
+
+       p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
+       if (!p_sriov) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_sriov'");
+               return ECORE_NOMEM;
+       }
+
+       p_hwfn->pf_iov_info = p_sriov;
+
+       rc = ecore_iov_allocate_vfdb(p_hwfn);
+
+       return rc;
+}
+
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+       if (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info)
+               return;
+
+       ecore_iov_setup_vfdb(p_hwfn);
+       ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void ecore_iov_free(struct ecore_hwfn *p_hwfn)
+{
+       if (p_hwfn->pf_iov_info) {
+               ecore_iov_free_vfdb(p_hwfn);
+               OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
+       }
+}
+
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc;
+
+       /* @@@ TBD get this information from shmem / pci cfg */
+       if (IS_VF(p_hwfn->p_dev))
+               return ECORE_SUCCESS;
+
+       /* First hwfn should learn the PCI configuration */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               struct ecore_dev *p_dev = p_hwfn->p_dev;
+               int *pos = &p_hwfn->p_dev->sriov_info.pos;
+
+               *pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
+                                                   PCI_EXT_CAP_ID_SRIOV);
+               if (!*pos) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "No PCIe IOV support\n");
+                       return ECORE_SUCCESS;
+               }
+
+               rc = ecore_iov_pci_cfg_info(p_dev);
+               if (rc)
+                       return rc;
+       } else if (!p_hwfn->p_dev->sriov_info.pos) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+               return ECORE_SUCCESS;
+       }
+
+       /* Calculate the first VF index - this is a bit tricky; Basically,
+        * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+        * after the first engine's VFs.
+        */
+       p_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset +
+           p_hwfn->abs_pf_id - 16;
+       if (ECORE_PATH_ID(p_hwfn))
+               p_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "First VF in hwfn 0x%08x\n", p_hwfn->hw_info.first_vf_in_pf);
+
+       return ECORE_SUCCESS;
+}
+
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+                                           u16 relative_vf_id,
+                                           bool b_enabled_only)
+{
+       struct ecore_vf_info *vf = OSAL_NULL;
+
+       if (!p_hwfn->pf_iov_info) {
+               DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+               return OSAL_NULL;
+       }
+
+       if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+               vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+       else
+               DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
+                      relative_vf_id);
+
+       return vf;
+}
+
+void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+                                u16 rel_vf_id, u8 to_disable)
+{
+       struct ecore_vf_info *vf;
+
+       vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       if (!vf)
+               return;
+
+       vf->to_disable = to_disable;
+}
+
+void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)
+{
+       u16 i;
+
+       for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+               ecore_iov_set_vf_to_disable(p_hwfn, i, to_disable);
+}
+
+#ifndef LINUX_REMOVE
+/* @@@TBD Consider taking outside of ecore... */
+enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
+                                         u16 vf_id, void *ctx)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
+
+       if (vf != OSAL_NULL) {
+               vf->ctx = ctx;
+#ifdef CONFIG_ECORE_SW_CHANNEL
+               vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
+#endif
+       } else {
+               rc = ECORE_UNKNOWN_ERROR;
+       }
+       return rc;
+}
+#endif
+
+/**
+ * VF enable primitives
+ *
+ * when pretend is required the caller is reponsible
+ * for calling pretend prioir to calling these routines
+ */
+
+/* clears vf error in all semi blocks
+ * Assumption: called under VF pretend...
+ */
+static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn,
+                                                   struct ecore_ptt *p_ptt)
+{
+       ecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1);
+       ecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1);
+       ecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1);
+       ecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1);
+       ecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1);
+       ecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1);
+}
+
+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt, u8 abs_vfid)
+{
+       ecore_wr(p_hwfn, p_ptt,
+                PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+                1 << (abs_vfid & 0x1f));
+}
+
+static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  struct ecore_vf_info *vf)
+{
+       int i;
+       u16 igu_sb_id;
+
+       /* Set VF masks and configuration - pretend */
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "value in VF_CONFIGURATION of vf %d after write %x\n",
+                  vf->abs_vf_id,
+                  ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
+
+       /* unpretend */
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+       /* iterate ove all queues, clear sb consumer */
+       for (i = 0; i < vf->num_sbs; i++) {
+               igu_sb_id = vf->igu_sbs[i];
+               /* Set then clear... */
+               ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
+                                        vf->opaque_fid);
+               ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
+                                        vf->opaque_fid);
+       }
+}
+
+static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_vf_info *vf, bool enable)
+{
+       u32 igu_vf_conf;
+
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+       igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+       if (enable)
+               igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+       else
+               igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+       ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+       /* unpretend */
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static enum _ecore_status_t
+ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
+{
+       u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+       enum _ecore_status_t rc;
+
+       if (vf->to_disable)
+               return ECORE_SUCCESS;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
+                  ECORE_VF_ABS_ID(p_hwfn, vf));
+
+       ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
+                                    ECORE_VF_ABS_ID(p_hwfn, vf));
+
+       rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+                                     vf->abs_vf_id, vf->num_sbs);
+       if (rc)
+               return rc;
+
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+       SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+       STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+       ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+                      p_hwfn->hw_info.hw_mode);
+
+       /* unpretend */
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+       if (vf->state != VF_STOPPED) {
+               DP_NOTICE(p_hwfn, true, "VF[%02x] is already started\n",
+                         vf->abs_vf_id);
+               return ECORE_INVAL;
+       }
+
+       /* Start VF */
+       rc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+       if (rc != ECORE_SUCCESS)
+               DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
+                         vf->abs_vf_id);
+
+       vf->state = VF_FREE;
+
+       return rc;
+}
+
+/**
+ *
+ * @brief ecore_iov_config_perm_table - configure the permission
+ *      zone table.
+ *      In E4, queue zone permission table size is 320x9. There
+ *      are 320 VF queues for single engine device (256 for dual
+ *      engine device), and each entry has the following format:
+ *      {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       struct ecore_vf_info *vf, u8 enable)
+{
+       u32 reg_addr;
+       u32 val;
+       u16 qzone_id = 0;
+       int qid;
+
+       for (qid = 0; qid < vf->num_rxqs; qid++) {
+               ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+                                 &qzone_id);
+
+               reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+               val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+               ecore_wr(p_hwfn, p_ptt, reg_addr, val);
+       }
+}
+
+static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       struct ecore_vf_info *vf)
+{
+       /* Reset vf in IGU interrupts are still disabled */
+       ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */);
+
+       /* Permission Table */
+       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */);
+}
+
+static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_vf_info *vf,
+                                    u16 num_rx_queues)
+{
+       int igu_id = 0;
+       int qid = 0;
+       u32 val = 0;
+       struct ecore_igu_block *igu_blocks =
+           p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+       if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+               num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+
+       p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+       SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+       SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+       SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+       while ((qid < num_rx_queues) &&
+              (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
+               if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
+                       struct cau_sb_entry sb_entry;
+
+                       vf->igu_sbs[qid] = (u16) igu_id;
+                       igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
+
+                       SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+                       ecore_wr(p_hwfn, p_ptt,
+                                IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+                                val);
+
+                       /* Configure igu sb in CAU which were marked valid */
+                       ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                               p_hwfn->rel_pf_id,
+                                               vf->abs_vf_id, 1);
+                       ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                           (u64) (osal_uintptr_t) &sb_entry,
+                                           CAU_REG_SB_VAR_MEMORY +
+                                           igu_id * sizeof(u64), 2, 0);
+                       qid++;
+               }
+               igu_id++;
+       }
+
+       vf->num_sbs = (u8) num_rx_queues;
+
+       return vf->num_sbs;
+}
+
+/**
+ *
+ * @brief The function invalidates all the VF entries,
+ *        technically this isn't required, but added for
+ *        cleaness and ease of debugging incase a VF attempts to
+ *        produce an interrupt after it has been taken down.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ */
+static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
+                                     struct ecore_ptt *p_ptt,
+                                     struct ecore_vf_info *vf)
+{
+       struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+       int idx, igu_id;
+       u32 addr, val;
+
+       /* Invalidate igu CAM lines and mark them as free */
+       for (idx = 0; idx < vf->num_sbs; idx++) {
+               igu_id = vf->igu_sbs[idx];
+               addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+               val = ecore_rd(p_hwfn, p_ptt, addr);
+               SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+               ecore_wr(p_hwfn, p_ptt, addr, val);
+
+               p_info->igu_map.igu_blocks[igu_id].status |=
+                   ECORE_IGU_STATUS_FREE;
+
+               p_hwfn->hw_info.p_igu_info->free_blks++;
+       }
+
+       vf->num_sbs = 0;
+}
+
+enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt,
+                                             u16 rel_vf_id, u16 num_rx_queues)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_vf_info *vf = OSAL_NULL;
+       u8 num_of_vf_avaiable_chains = 0;
+       u32 cids;
+       u8 i;
+
+       if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
+               DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
+                         rel_vf_id);
+               return ECORE_INVAL;
+       }
+
+       vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       if (!vf) {
+               DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       /* Limit number of queues according to number of CIDs */
+       ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d] - requesting to initialize for 0x%04x queues"
+                  " [0x%04x CIDs available]\n",
+                  vf->relative_vf_id, num_rx_queues, (u16) cids);
+       num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16) cids));
+
+       num_of_vf_avaiable_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
+                                                              p_ptt,
+                                                              vf,
+                                                              num_rx_queues);
+       if (num_of_vf_avaiable_chains == 0) {
+               DP_ERR(p_hwfn, "no available igu sbs\n");
+               return ECORE_NOMEM;
+       }
+
+       /* Choose queue number and index ranges */
+       vf->num_rxqs = num_of_vf_avaiable_chains;
+       vf->num_txqs = num_of_vf_avaiable_chains;
+
+       for (i = 0; i < vf->num_rxqs; i++) {
+               u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
+                                                            vf->igu_sbs[i]);
+
+               if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "VF[%d] will require utilizing of"
+                                 " out-of-bounds queues - %04x\n",
+                                 vf->relative_vf_id, queue_id);
+                       /* TODO - cleanup the already allocate SBs */
+                       return ECORE_INVAL;
+               }
+
+               /* CIDs are per-VF, so no problem having them 0-based. */
+               vf->vf_queues[i].fw_rx_qid = queue_id;
+               vf->vf_queues[i].fw_tx_qid = queue_id;
+               vf->vf_queues[i].fw_cid = i;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+                          vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+       }
+
+       rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+
+       if (rc == ECORE_SUCCESS) {
+               struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
+               u16 vf_id = vf->relative_vf_id;
+
+               p_iov->num_vfs++;
+               p_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64));
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                u16 rel_vf_id)
+{
+       struct ecore_vf_info *vf = OSAL_NULL;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!vf) {
+               DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       if (vf->state != VF_STOPPED) {
+               /* Stopping the VF */
+               rc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
+                              rc);
+                       return rc;
+               }
+
+               vf->state = VF_STOPPED;
+       }
+
+       /* disablng interrupts and resetting permission table was done during
+        * vf-close, however, we could get here without going through vf_close
+        */
+       /* Disable Interrupts for VF */
+       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+       /* Reset Permission table */
+       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+       vf->num_rxqs = 0;
+       vf->num_txqs = 0;
+       ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+       if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
+               struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
+               u16 vf_id = vf->relative_vf_id;
+
+               p_iov->num_vfs--;
+               p_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64));
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static bool ecore_iov_tlv_supported(u16 tlvtype)
+{
+       return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_vf_info *vf, u16 tlv)
+{
+       /* we don't lock the channel for unsupported tlvs */
+       if (!ecore_iov_tlv_supported(tlv))
+               return;
+
+       /* lock the channel */
+       /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
+
+       /* record the locking op */
+       /* vf->op_current = tlv; @@@TBD MichalK */
+
+       /* log the lock */
+       DP_VERBOSE(p_hwfn,
+                  ECORE_MSG_IOV,
+                  "VF[%d]: vf pf channel locked by     %s\n",
+                  vf->abs_vf_id, ecore_channel_tlvs_string[tlv]);
+}
+
+static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_vf_info *vf,
+                                          u16 expected_tlv)
+{
+       /* we don't unlock the channel for unsupported tlvs */
+       if (!ecore_iov_tlv_supported(expected_tlv))
+               return;
+
+       /*WARN(expected_tlv != vf->op_current,
+          "lock mismatch: expected %s found %s",
+          channel_tlvs_string[expected_tlv],
+          channel_tlvs_string[vf->op_current]);
+          @@@TBD MichalK
+        */
+
+       /* lock the channel */
+       /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
+
+       /* log the unlock */
+       DP_VERBOSE(p_hwfn,
+                  ECORE_MSG_IOV,
+                  "VF[%d]: vf pf channel unlocked by %s\n",
+                  vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]);
+
+       /* record the locking op */
+       /* vf->op_current = CHANNEL_TLV_NONE; */
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+                   u8 **offset, u16 type, u16 length)
+{
+       struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+       tl->type = type;
+       tl->length = length;
+
+       /* Offset should keep pointing to next TLV (the end of the last) */
+       *offset += length;
+
+       /* Return a pointer to the start of the added tlv */
+       return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
+{
+       u16 i = 1, total_length = 0;
+       struct channel_tlv *tlv;
+
+       do {
+               /* cast current tlv list entry to channel tlv header */
+               tlv = (struct channel_tlv *)((u8 *) tlvs_list + total_length);
+
+               /* output tlv */
+               if (ecore_iov_tlv_supported(tlv->type))
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "TLV number %d: type %s, length %d\n",
+                                  i, ecore_channel_tlvs_string[tlv->type],
+                                  tlv->length);
+               else
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "TLV number %d: type %d, length %d\n",
+                                  i, tlv->type, tlv->length);
+
+               if (tlv->type == CHANNEL_TLV_LIST_END)
+                       return;
+
+               /* Validate entry - protect against malicious VFs */
+               if (!tlv->length) {
+                       DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
+                       return;
+               }
+               total_length += tlv->length;
+               if (total_length >= sizeof(struct tlv_buffer_size)) {
+                       DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
+                       return;
+               }
+
+               i++;
+       } while (1);
+}
+
+static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt,
+                                   struct ecore_vf_info *p_vf,
+                                   u16 length, u8 status)
+{
+       struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct ecore_dmae_params params;
+       u8 eng_vf_id;
+
+       mbx->reply_virt->default_resp.hdr.status = status;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+       mbx->sw_mbx.response_size =
+           length + sizeof(struct channel_list_end_tlv);
+#endif
+
+       ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+       if (!p_hwfn->p_dev->sriov_info.b_hw_channel)
+               return;
+
+       eng_vf_id = p_vf->abs_vf_id;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+       params.flags = ECORE_DMAE_FLAG_VF_DST;
+       params.dst_vfid = eng_vf_id;
+
+       ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+                            mbx->req_virt->first_tlv.reply_address +
+                            sizeof(u64),
+                            (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+                            &params);
+
+       ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+                            mbx->req_virt->first_tlv.reply_address,
+                            sizeof(u64) / 4, &params);
+
+       REG_WR(p_hwfn,
+              GTT_BAR0_MAP_REG_USDM_RAM +
+              USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
+                                 enum ecore_iov_vport_update_flag flag)
+{
+       switch (flag) {
+       case ECORE_IOV_VP_UPDATE_ACTIVATE:
+               return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+       case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
+               return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+       case ECORE_IOV_VP_UPDATE_TX_SWITCH:
+               return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+       case ECORE_IOV_VP_UPDATE_MCAST:
+               return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+       case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
+               return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+       case ECORE_IOV_VP_UPDATE_RSS:
+               return CHANNEL_TLV_VPORT_UPDATE_RSS;
+       case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+               return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+       case ECORE_IOV_VP_UPDATE_SGE_TPA:
+               return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+       default:
+               return 0;
+       }
+}
+
+static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_vf_info *p_vf,
+                                             struct ecore_iov_vf_mbx *p_mbx,
+                                             u8 status, u16 tlvs_mask,
+                                             u16 tlvs_accepted)
+{
+       struct pfvf_def_resp_tlv *resp;
+       u16 size, total_len, i;
+
+       OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+       p_mbx->offset = (u8 *) (p_mbx->reply_virt);
+       size = sizeof(struct pfvf_def_resp_tlv);
+       total_len = size;
+
+       ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+       /* Prepare response for all extended tlvs if they are found by PF */
+       for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
+               if (!(tlvs_mask & (1 << i)))
+                       continue;
+
+               resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
+                                    ecore_iov_vport_to_tlv(p_hwfn, i), size);
+
+               if (tlvs_accepted & (1 << i))
+                       resp->hdr.status = status;
+               else
+                       resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - vport_update resp: TLV %d, status %02x\n",
+                          p_vf->relative_vf_id,
+                          ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+               total_len += size;
+       }
+
+       ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       return total_len;
+}
+
+static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  struct ecore_vf_info *vf_info,
+                                  u16 type, u16 length, u8 status)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+       mbx->offset = (u8 *) (mbx->reply_virt);
+
+       ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
+       ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
+                                struct ecore_vf_info *p_vf)
+{
+       p_vf->vf_bulletin = 0;
+       p_vf->vport_instance = 0;
+       p_vf->num_mac_filters = 0;
+       p_vf->num_vlan_filters = 0;
+       p_vf->num_mc_filters = 0;
+       p_vf->configured_features = 0;
+
+       /* If VF previously requested less resources, go back to default */
+       p_vf->num_rxqs = p_vf->num_sbs;
+       p_vf->num_txqs = p_vf->num_sbs;
+
+       p_vf->num_active_rxqs = 0;
+
+       OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+       OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+       struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+       struct pf_vf_resc *resc = &resp->resc;
+       struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+       u16 length;
+       u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+
+       /* Validate FW compatibility */
+       if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
+           req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
+           req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
+           req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+               DP_INFO(p_hwfn,
+                       "VF[%d] is running an incompatible driver [VF needs"
+                       " FW %02x:%02x:%02x:%02x but Hypervisor is"
+                       " using %02x:%02x:%02x:%02x]\n",
+                       vf->abs_vf_id, req->vfdev_info.fw_major,
+                       req->vfdev_info.fw_minor, req->vfdev_info.fw_revision,
+                       req->vfdev_info.fw_engineering, FW_MAJOR_VERSION,
+                       FW_MINOR_VERSION, FW_REVISION_VERSION,
+                       FW_ENGINEERING_VERSION);
+               vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+#ifndef __EXTRACT__LINUX__
+       if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
+               vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+#endif
+
+       OSAL_MEMSET(resp, 0, sizeof(*resp));
+
+       /* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */
+       vf->opaque_fid = req->vfdev_info.opaque_fid;
+       vf->num_mac_filters = 1;
+       vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+       vf->num_mc_filters = ECORE_MAX_MC_ADDRS;
+
+       vf->vf_bulletin = req->bulletin_addr;
+       vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+           vf->bulletin.size : req->bulletin_size;
+
+       /* fill in pfdev info */
+       pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
+       pfdev_info->db_size = 0;        /* @@@ TBD MichalK Vf Doorbells */
+       pfdev_info->indices_per_sb = PIS_PER_SB;
+       pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED;
+
+       pfdev_info->stats_info.mstats.address =
+           PXP_VF_BAR0_START_MSDM_ZONE_B +
+           OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
+       pfdev_info->stats_info.mstats.len =
+           sizeof(struct eth_mstorm_per_queue_stat);
+
+       pfdev_info->stats_info.ustats.address =
+           PXP_VF_BAR0_START_USDM_ZONE_B +
+           OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
+       pfdev_info->stats_info.ustats.len =
+           sizeof(struct eth_ustorm_per_queue_stat);
+
+       pfdev_info->stats_info.pstats.address =
+           PXP_VF_BAR0_START_PSDM_ZONE_B +
+           OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
+       pfdev_info->stats_info.pstats.len =
+           sizeof(struct eth_pstorm_per_queue_stat);
+
+       pfdev_info->stats_info.tstats.address = 0;
+       pfdev_info->stats_info.tstats.len = 0;
+
+       OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
+                   ETH_ALEN);
+
+       pfdev_info->fw_major = FW_MAJOR_VERSION;
+       pfdev_info->fw_minor = FW_MINOR_VERSION;
+       pfdev_info->fw_rev = FW_REVISION_VERSION;
+       pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+       pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
+       ecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver,
+                             OSAL_NULL);
+
+       pfdev_info->dev_type = p_hwfn->p_dev->type;
+       pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
+
+       /* Fill in resc : @@@TBD MichalK Hard Coded for now... */
+       resc->num_rxqs = vf->num_rxqs;
+       resc->num_txqs = vf->num_txqs;
+       resc->num_sbs = vf->num_sbs;
+       for (i = 0; i < resc->num_sbs; i++) {
+               resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
+               resc->hw_sbs[i].sb_qid = 0;
+       }
+
+       for (i = 0; i < resc->num_rxqs; i++) {
+               ecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
+                                 (u16 *) &resc->hw_qid[i]);
+               resc->cid[i] = vf->vf_queues[i].fw_cid;
+       }
+
+       resc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters,
+                                          req->resc_request.num_mac_filters);
+       resc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters,
+                                           req->resc_request.num_vlan_filters);
+       resc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters,
+                                         req->resc_request.num_mc_filters);
+
+       /* Fill agreed size of bulletin board in response, and post
+        * an initial image to the bulletin board.
+        */
+       resp->bulletin_size = vf->bulletin.size;
+       ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
+                  " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
+                  "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
+                  " n_vlans-%d, n_mcs-%d\n",
+                  vf->abs_vf_id, resp->pfdev_info.chip_num,
+                  resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
+                  resp->pfdev_info.capabilities, resc->num_rxqs,
+                  resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
+                  resc->num_vlan_filters, resc->num_mc_filters);
+
+       vf->state = VF_ACQUIRED;
+
+       /* Prepare Response */
+       length = sizeof(struct pfvf_acquire_resp_tlv);
+
+out:
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+                              length, vfpf_status);
+
+       /* @@@TBD Bulletin */
+}
+
+static enum _ecore_status_t
+__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+                        struct ecore_vf_info *p_vf, bool val)
+{
+       struct ecore_sp_vport_update_params params;
+       enum _ecore_status_t rc;
+
+       if (val == p_vf->spoof_chk) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Spoofchk value[%d] is already configured\n", val);
+               return ECORE_SUCCESS;
+       }
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+       params.opaque_fid = p_vf->opaque_fid;
+       params.vport_id = p_vf->vport_id;
+       params.update_anti_spoofing_en_flg = 1;
+       params.anti_spoofing_en = val;
+
+       rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+                                  OSAL_NULL);
+       if (rc == ECORE_SUCCESS) {
+               p_vf->spoof_chk = val;
+               p_vf->req_spoofchk_val = p_vf->spoof_chk;
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Spoofchk val[%d] configured\n", val);
+       } else {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+                          val, p_vf->relative_vf_id);
+       }
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_vf_info *p_vf)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_filter_ucast filter;
+       int i;
+
+       OSAL_MEMSET(&filter, 0, sizeof(filter));
+       filter.is_rx_filter = 1;
+       filter.is_tx_filter = 1;
+       filter.vport_to_add_to = p_vf->vport_id;
+       filter.opcode = ECORE_FILTER_ADD;
+
+       /* Reconfigure vlans */
+       for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+               if (p_vf->shadow_config.vlans[i].used) {
+                       filter.type = ECORE_FILTER_VLAN;
+                       filter.vlan = p_vf->shadow_config.vlans[i].vid;
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "Reconfig VLAN [0x%04x] for VF [%04x]\n",
+                                  filter.vlan, p_vf->relative_vf_id);
+                       rc = ecore_sp_eth_filter_ucast(p_hwfn,
+                                                      p_vf->opaque_fid,
+                                                      &filter,
+                                                      ECORE_SPQ_MODE_CB,
+                                                      OSAL_NULL);
+                       if (rc) {
+                               DP_NOTICE(p_hwfn, true,
+                                         "Failed to configure VLAN [%04x]"
+                                         " to VF [%04x]\n",
+                                         filter.vlan, p_vf->relative_vf_id);
+                               break;
+                       }
+               }
+       }
+
+       return rc;
+}
+
+static enum _ecore_status_t
+ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_vf_info *p_vf, u64 events)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /*TODO - what about MACs? */
+
+       if ((events & (1 << VLAN_ADDR_FORCED)) &&
+           !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+               rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+       return rc;
+}
+
+static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_vf_info *p_vf,
+                                           u64 events)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_filter_ucast filter;
+
+       if (!p_vf->vport_instance)
+               return ECORE_INVAL;
+
+       if (events & (1 << MAC_ADDR_FORCED)) {
+               /* Since there's no way [currently] of removing the MAC,
+                * we can always assume this means we need to force it.
+                */
+               OSAL_MEMSET(&filter, 0, sizeof(filter));
+               filter.type = ECORE_FILTER_MAC;
+               filter.opcode = ECORE_FILTER_REPLACE;
+               filter.is_rx_filter = 1;
+               filter.is_tx_filter = 1;
+               filter.vport_to_add_to = p_vf->vport_id;
+               OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
+
+               rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                              &filter,
+                                              ECORE_SPQ_MODE_CB, OSAL_NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "PF failed to configure MAC for VF\n");
+                       return rc;
+               }
+
+               p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+       }
+
+       if (events & (1 << VLAN_ADDR_FORCED)) {
+               struct ecore_sp_vport_update_params vport_update;
+               u8 removal;
+               int i;
+
+               OSAL_MEMSET(&filter, 0, sizeof(filter));
+               filter.type = ECORE_FILTER_VLAN;
+               filter.is_rx_filter = 1;
+               filter.is_tx_filter = 1;
+               filter.vport_to_add_to = p_vf->vport_id;
+               filter.vlan = p_vf->bulletin.p_virt->pvid;
+               filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
+                   ECORE_FILTER_FLUSH;
+
+               /* Send the ramrod */
+               rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                              &filter,
+                                              ECORE_SPQ_MODE_CB, OSAL_NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "PF failed to configure VLAN for VF\n");
+                       return rc;
+               }
+
+               /* Update the default-vlan & silent vlan stripping */
+               OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
+               vport_update.opaque_fid = p_vf->opaque_fid;
+               vport_update.vport_id = p_vf->vport_id;
+               vport_update.update_default_vlan_enable_flg = 1;
+               vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+               vport_update.update_default_vlan_flg = 1;
+               vport_update.default_vlan = filter.vlan;
+
+               vport_update.update_inner_vlan_removal_flg = 1;
+               removal = filter.vlan ?
+                   1 : p_vf->shadow_config.inner_vlan_removal;
+               vport_update.inner_vlan_removal_flg = removal;
+               vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+               rc = ecore_sp_vport_update(p_hwfn, &vport_update,
+                                          ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "PF failed to configure VF vport for vlan\n");
+                       return rc;
+               }
+
+               /* Update all the Rx queues */
+               for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+                       u16 qid;
+
+                       if (!p_vf->vf_queues[i].rxq_active)
+                               continue;
+
+                       qid = p_vf->vf_queues[i].fw_rx_qid;
+
+                       rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+                                                  1, 0, 1,
+                                                  ECORE_SPQ_MODE_EBLOCK,
+                                                  OSAL_NULL);
+                       if (rc) {
+                               DP_NOTICE(p_hwfn, true,
+                                         "Failed to send Rx update"
+                                         " queue[0x%04x]\n",
+                                         qid);
+                               return rc;
+                       }
+               }
+
+               if (filter.vlan)
+                       p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+               else
+                       p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+       }
+
+       /* If forced features are terminated, we need to configure the shadow
+        * configuration back again.
+        */
+       if (events)
+               ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+       return rc;
+}
+
+static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport;
+       struct ecore_sp_vport_start_params params = { 0 };
+       u8 status = PFVF_STATUS_SUCCESS;
+       struct ecore_vf_info *vf_info;
+       enum _ecore_status_t rc;
+       u64 *p_bitmap;
+       int sb_id;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Failed to get VF info, invalid vfid [%d]\n",
+                         vf->relative_vf_id);
+               return;
+       }
+
+       vf->state = VF_ENABLED;
+
+       /* Initialize Status block in CAU */
+       for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+               if (!start->sb_addr[sb_id]) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d] did not fill the address of SB %d\n",
+                                  vf->relative_vf_id, sb_id);
+                       break;
+               }
+
+               ecore_int_cau_conf_sb(p_hwfn, p_ptt,
+                                     start->sb_addr[sb_id],
+                                     vf->igu_sbs[sb_id],
+                                     vf->abs_vf_id, 1 /* VF Valid */);
+       }
+       ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+       vf->mtu = start->mtu;
+       vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+       /* Take into consideration configuration forced by hypervisor;
+        * If none is configured, use the supplied VF values [for old
+        * vfs that would still be fine, since they passed '0' as padding].
+        */
+       p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+       if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+               u8 vf_req = start->only_untagged;
+
+               vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+               *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+       }
+
+       params.tpa_mode = start->tpa_mode;
+       params.remove_inner_vlan = start->inner_vlan_removal;
+       params.tx_switching = true;
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: Don't confi VF for Tx-switching [no pVFC]\n");
+               params.tx_switching = false;
+       }
+#endif
+
+       params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+       params.drop_ttl0 = false;
+       params.concrete_fid = vf->concrete_fid;
+       params.opaque_fid = vf->opaque_fid;
+       params.vport_id = vf->vport_id;
+       params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+       params.mtu = vf->mtu;
+
+       rc = ecore_sp_eth_vport_start(p_hwfn, &params);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn,
+                      "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
+               status = PFVF_STATUS_FAILURE;
+       } else {
+               vf->vport_instance++;
+
+               /* Force configuration if needed on the newly opened vport */
+               ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+               OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
+                                         vf->vport_id, vf->opaque_fid);
+               __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+       }
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+                              sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       struct ecore_vf_info *vf)
+{
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+
+       vf->vport_instance--;
+       vf->spoof_chk = false;
+
+       rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+       if (rc != ECORE_SUCCESS) {
+               DP_ERR(p_hwfn,
+                      "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
+               status = PFVF_STATUS_FAILURE;
+       }
+
+       /* Forget the configuration on the vport */
+       vf->configured_features = 0;
+       OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+                              sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq;
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+
+       rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+                                          vf->vf_queues[req->rx_qid].fw_cid,
+                                          vf->vf_queues[req->rx_qid].fw_rx_qid,
+                                          vf->vport_id,
+                                          vf->abs_vf_id + 0x10,
+                                          req->hw_sb,
+                                          req->sb_index,
+                                          req->bd_max_bytes,
+                                          req->rxq_addr,
+                                          req->cqe_pbl_addr,
+                                          req->cqe_pbl_size);
+
+       if (rc) {
+               status = PFVF_STATUS_FAILURE;
+       } else {
+               vf->vf_queues[req->rx_qid].rxq_active = true;
+               vf->num_active_rxqs++;
+       }
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ,
+                              length, status);
+}
+
+static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq;
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       union ecore_qm_pq_params pq_params;
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+
+       /* Prepare the parameters which would choose the right PQ */
+       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+       pq_params.eth.is_vf = 1;
+       pq_params.eth.vf_id = vf->relative_vf_id;
+
+       rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
+                                          vf->opaque_fid,
+                                          vf->vf_queues[req->tx_qid].fw_tx_qid,
+                                          vf->vf_queues[req->tx_qid].fw_cid,
+                                          vf->vport_id,
+                                          vf->abs_vf_id + 0x10,
+                                          req->hw_sb,
+                                          req->sb_index,
+                                          req->pbl_addr,
+                                          req->pbl_size, &pq_params);
+
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+       else
+               vf->vf_queues[req->tx_qid].txq_active = true;
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
+                              length, status);
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_vf_info *vf,
+                                                  u16 rxq_id,
+                                                  u8 num_rxqs,
+                                                  bool cqe_completion)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       int qid;
+
+       if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+               return ECORE_INVAL;
+
+       for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+               if (vf->vf_queues[qid].rxq_active) {
+                       rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
+                                                       vf->vf_queues[qid].
+                                                       fw_rx_qid, false,
+                                                       cqe_completion);
+
+                       if (rc)
+                               return rc;
+               }
+               vf->vf_queues[qid].rxq_active = false;
+               vf->num_active_rxqs--;
+       }
+
+       return rc;
+}
+
+static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_vf_info *vf,
+                                                  u16 txq_id, u8 num_txqs)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       int qid;
+
+       if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+               return ECORE_INVAL;
+
+       for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+               if (vf->vf_queues[qid].txq_active) {
+                       rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
+                                                       vf->vf_queues[qid].
+                                                       fw_tx_qid);
+
+                       if (rc)
+                               return rc;
+               }
+               vf->vf_queues[qid].txq_active = false;
+       }
+       return rc;
+}
+
+static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs;
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+
+       /* We give the option of starting from qid != 0, in this case we
+        * need to make sure that qid + num_qs doesn't exceed the actual
+        * amount of queues that exist.
+        */
+       rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+                                   req->num_rxqs, req->cqe_completion);
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+                              length, status);
+}
+
+static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs;
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+
+       /* We give the option of starting from qid != 0, in this case we
+        * need to make sure that qid + num_qs doesn't exceed the actual
+        * amount of queues that exist.
+        */
+       rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+                              length, status);
+}
+
+static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq;
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+       u8 complete_event_flg;
+       u8 complete_cqe_flg;
+       enum _ecore_status_t rc;
+       u16 qid;
+       u8 i;
+
+       complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+       complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+       for (i = 0; i < req->num_rxqs; i++) {
+               qid = req->rx_qid + i;
+
+               if (!vf->vf_queues[qid].rxq_active) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "VF rx_qid = %d isn`t active!\n", qid);
+                       status = PFVF_STATUS_FAILURE;
+                       break;
+               }
+
+               rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+                                                  vf->vf_queues[qid].fw_rx_qid,
+                                                  1,
+                                                  complete_cqe_flg,
+                                                  complete_event_flg,
+                                                  ECORE_SPQ_MODE_EBLOCK,
+                                                  OSAL_NULL);
+
+               if (rc) {
+                       status = PFVF_STATUS_FAILURE;
+                       break;
+               }
+       }
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+                              length, status);
+}
+
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+                                void *p_tlvs_list, u16 req_type)
+{
+       struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+       int len = 0;
+
+       do {
+               if (!p_tlv->length) {
+                       DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
+                       return OSAL_NULL;
+               }
+
+               if (p_tlv->type == req_type) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "Extended tlv type %s, length %d found\n",
+                                  ecore_channel_tlvs_string[p_tlv->type],
+                                  p_tlv->length);
+                       return p_tlv;
+               }
+
+               len += p_tlv->length;
+               p_tlv = (struct channel_tlv *)((u8 *) p_tlv + p_tlv->length);
+
+               if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "TLVs has overrun the buffer size\n");
+                       return OSAL_NULL;
+               }
+       } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+       return OSAL_NULL;
+}
+
+static void
+ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
+                             struct ecore_sp_vport_update_params *p_data,
+                             struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_activate_tlv *p_act_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+       p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (p_act_tlv) {
+               p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+               p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+               p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+               p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
+       }
+}
+
+static void
+ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
+                              struct ecore_sp_vport_update_params *p_data,
+                              struct ecore_vf_info *p_vf,
+                              struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+       p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_vlan_tlv)
+               return;
+
+       p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+       /* Ignore the VF request if we're forcing a vlan */
+       if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+               p_data->update_inner_vlan_removal_flg = 1;
+               p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+       }
+
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
+                             struct ecore_sp_vport_update_params *p_data,
+                             struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+       p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+#ifndef ASIC_ONLY
+       if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, false,
+                         "FPGA: Ignore tx-switching configuration originating 
from VFs\n");
+               return;
+       }
+#endif
+
+       if (p_tx_switch_tlv) {
+               p_data->update_tx_switching_flg = 1;
+               p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
+       }
+}
+
+static void
+ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_sp_vport_update_params *p_data,
+                                   struct ecore_iov_vf_mbx *p_mbx,
+                                   u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+       p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+       if (p_mcast_tlv) {
+               p_data->update_approx_mcast_flg = 1;
+               OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
+                           sizeof(unsigned long) *
+                           ETH_MULTICAST_MAC_BINS_IN_REGS);
+               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
+       }
+}
+
+static void
+ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
+                               struct ecore_sp_vport_update_params *p_data,
+                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+       p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+       if (p_accept_tlv) {
+               p_data->accept_flags.update_rx_mode_config =
+                   p_accept_tlv->update_rx_mode;
+               p_data->accept_flags.rx_accept_filter =
+                   p_accept_tlv->rx_accept_filter;
+               p_data->accept_flags.update_tx_mode_config =
+                   p_accept_tlv->update_tx_mode;
+               p_data->accept_flags.tx_accept_filter =
+                   p_accept_tlv->tx_accept_filter;
+               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
+       }
+}
+
+static void
+ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_sp_vport_update_params *p_data,
+                                   struct ecore_iov_vf_mbx *p_mbx,
+                                   u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+       p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+       if (p_accept_any_vlan) {
+               p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+               p_data->update_accept_any_vlan_flg =
+                   p_accept_any_vlan->update_accept_any_vlan_flg;
+               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+       }
+}
+
+static void
+ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
+                             struct ecore_vf_info *vf,
+                             struct ecore_sp_vport_update_params *p_data,
+                             struct ecore_rss_params *p_rss,
+                             struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+       u16 table_size;
+       u16 i, q_idx, max_q_idx;
+
+       p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (p_rss_tlv) {
+               OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
+
+               p_rss->update_rss_config =
+                   !!(p_rss_tlv->update_rss_flags &
+                       VFPF_UPDATE_RSS_CONFIG_FLAG);
+               p_rss->update_rss_capabilities =
+                   !!(p_rss_tlv->update_rss_flags &
+                       VFPF_UPDATE_RSS_CAPS_FLAG);
+               p_rss->update_rss_ind_table =
+                   !!(p_rss_tlv->update_rss_flags &
+                       VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+               p_rss->update_rss_key =
+                   !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG);
+
+               p_rss->rss_enable = p_rss_tlv->rss_enable;
+               p_rss->rss_eng_id = vf->relative_vf_id + 1;
+               p_rss->rss_caps = p_rss_tlv->rss_caps;
+               p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+               OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+                           sizeof(p_rss->rss_ind_table));
+               OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
+                           sizeof(p_rss->rss_key));
+
+               table_size = OSAL_MIN_T(u16,
+                                       OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
+                                       (1 << p_rss_tlv->rss_table_size_log));
+
+               max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
+
+               for (i = 0; i < table_size; i++) {
+                       q_idx = p_rss->rss_ind_table[i];
+                       if (q_idx >= max_q_idx) {
+                               DP_NOTICE(p_hwfn, true,
+                                         "rss_ind_table[%d] = %d, rxq is out 
of range\n",
+                                         i, q_idx);
+                               /* TBD: fail the request mark VF as malicious */
+                               p_rss->rss_ind_table[i] =
+                                   vf->vf_queues[0].fw_rx_qid;
+                       } else if (!vf->vf_queues[q_idx].rxq_active) {
+                               DP_NOTICE(p_hwfn, true,
+                                         "rss_ind_table[%d] = %d, rxq is not 
active\n",
+                                         i, q_idx);
+                               /* TBD: fail the request mark VF as malicious */
+                               p_rss->rss_ind_table[i] =
+                                   vf->vf_queues[0].fw_rx_qid;
+                       } else {
+                               p_rss->rss_ind_table[i] =
+                                   vf->vf_queues[q_idx].fw_rx_qid;
+                       }
+               }
+
+               p_data->rss_params = p_rss;
+               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+       } else {
+               p_data->rss_params = OSAL_NULL;
+       }
+}
+
+static void
+ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_vf_info *vf,
+                                 struct ecore_sp_vport_update_params *p_data,
+                                 struct ecore_sge_tpa_params *p_sge_tpa,
+                                 struct ecore_iov_vf_mbx *p_mbx,
+                                 u16 *tlvs_mask)
+{
+       struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+       u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+       p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+           ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+       if (!p_sge_tpa_tlv) {
+               p_data->sge_tpa_params = OSAL_NULL;
+               return;
+       }
+
+       OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
+
+       p_sge_tpa->update_tpa_en_flg =
+           !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+       p_sge_tpa->update_tpa_param_flg =
+           !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+               VFPF_UPDATE_TPA_PARAM_FLAG);
+
+       p_sge_tpa->tpa_ipv4_en_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+       p_sge_tpa->tpa_ipv6_en_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+       p_sge_tpa->tpa_pkt_split_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+       p_sge_tpa->tpa_hdr_data_split_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+       p_sge_tpa->tpa_gro_consistent_flg =
+           !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+       p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+       p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+       p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+       p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+       p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+       p_data->sge_tpa_params = p_sge_tpa;
+
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         struct ecore_vf_info *vf)
+{
+       struct ecore_sp_vport_update_params params;
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct ecore_sge_tpa_params sge_tpa_params;
+       struct ecore_rss_params rss_params;
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+       u16 tlvs_mask = 0, tlvs_accepted;
+       u16 length;
+
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       params.opaque_fid = vf->opaque_fid;
+       params.vport_id = vf->vport_id;
+       params.rss_params = OSAL_NULL;
+
+       /* Search for extended tlvs list and update values
+        * from VF in struct ecore_sp_vport_update_params.
+        */
+       ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+       ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+       ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+       ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+       ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+       ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+                                     mbx, &tlvs_mask);
+       ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+       ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+                                         &sge_tpa_params, mbx, &tlvs_mask);
+
+       /* Just log a message if there is no single extended tlv in buffer.
+        * When all features of vport update ramrod would be requested by VF
+        * as extended TLVs in buffer then an error can be returned in response
+        * if there is no extended TLV present in buffer.
+        */
+       tlvs_accepted = tlvs_mask;
+
+#ifndef __EXTRACT__LINUX__
+       if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
+                                    &params, &tlvs_accepted) !=
+           ECORE_SUCCESS) {
+               tlvs_accepted = 0;
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+#endif
+
+       if (!tlvs_accepted) {
+               if (tlvs_mask)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "Upper-layer prevents said VF 
configuration\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "No feature tlvs found for vport update\n");
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+
+       rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+                                  OSAL_NULL);
+
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+out:
+       length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+                                                   tlvs_mask, tlvs_accepted);
+       ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_vf_info *p_vf,
+                                  struct ecore_filter_ucast *p_params)
+{
+       int i;
+
+       /* TODO - do we need a MAC shadow registery? */
+       if (p_params->type == ECORE_FILTER_MAC)
+               return ECORE_SUCCESS;
+
+       /* First remove entries and then add new ones */
+       if (p_params->opcode == ECORE_FILTER_REMOVE) {
+               for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+                       if (p_vf->shadow_config.vlans[i].used &&
+                           p_vf->shadow_config.vlans[i].vid ==
+                           p_params->vlan) {
+                               p_vf->shadow_config.vlans[i].used = false;
+                               break;
+                       }
+               if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF [%d] - Tries to remove a non-existing 
vlan\n",
+                                  p_vf->relative_vf_id);
+                       return ECORE_INVAL;
+               }
+       } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+                  p_params->opcode == ECORE_FILTER_FLUSH) {
+               for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+                       p_vf->shadow_config.vlans[i].used = false;
+       }
+
+       /* In forced mode, we're willing to remove entries - but we don't add
+        * new ones.
+        */
+       if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+               return ECORE_SUCCESS;
+
+       if (p_params->opcode == ECORE_FILTER_ADD ||
+           p_params->opcode == ECORE_FILTER_REPLACE) {
+               for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+                       if (!p_vf->shadow_config.vlans[i].used) {
+                               p_vf->shadow_config.vlans[i].used = true;
+                               p_vf->shadow_config.vlans[i].vid =
+                                   p_params->vlan;
+                               break;
+                       }
+               if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF [%d] - Tries to configure more than %d 
vlan filters\n",
+                                  p_vf->relative_vf_id,
+                                  ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
+                       return ECORE_INVAL;
+               }
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
+                                         struct ecore_ptt *p_ptt,
+                                         struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter;
+       struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+       struct ecore_filter_ucast params;
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc;
+
+       /* Prepare the unicast filter params */
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
+       params.opcode = (enum ecore_filter_opcode)req->opcode;
+       params.type = (enum ecore_filter_ucast_type)req->type;
+
+       /* @@@TBD - We might need logic on HV side in determining this */
+       params.is_rx_filter = 1;
+       params.is_tx_filter = 1;
+       params.vport_to_remove_from = vf->vport_id;
+       params.vport_to_add_to = vf->vport_id;
+       OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
+       params.vlan = req->vlan;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] 
MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+                  vf->abs_vf_id, params.opcode, params.type,
+                  params.is_rx_filter ? "RX" : "",
+                  params.is_tx_filter ? "TX" : "",
+                  params.vport_to_add_to,
+                  params.mac[0], params.mac[1], params.mac[2],
+                  params.mac[3], params.mac[4], params.mac[5], params.vlan);
+
+       if (!vf->vport_instance) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No VPORT instance available for VF[%d], failing 
ucast MAC configuration\n",
+                          vf->abs_vf_id);
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       /* Update shadow copy of the VF configuration */
+       if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
+           ECORE_SUCCESS) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       /* Determine if the unicast filtering is acceptible by PF */
+       if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+           (params.type == ECORE_FILTER_VLAN ||
+            params.type == ECORE_FILTER_MAC_VLAN)) {
+               /* Once VLAN is forced or PVID is set, do not allow
+                * to add/replace any further VLANs.
+                */
+               if (params.opcode == ECORE_FILTER_ADD ||
+                   params.opcode == ECORE_FILTER_REPLACE)
+                       status = PFVF_STATUS_FORCED;
+               goto out;
+       }
+
+       if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+           (params.type == ECORE_FILTER_MAC ||
+            params.type == ECORE_FILTER_MAC_VLAN)) {
+               if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
+                   (params.opcode != ECORE_FILTER_ADD &&
+                    params.opcode != ECORE_FILTER_REPLACE))
+                       status = PFVF_STATUS_FORCED;
+               goto out;
+       }
+
+       rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
+       if (rc == ECORE_EXISTS) {
+               goto out;
+       } else if (rc == ECORE_INVAL) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+                                      ECORE_SPQ_MODE_CB, OSAL_NULL);
+       if (rc)
+               status = PFVF_STATUS_FAILURE;
+
+out:
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+                              sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_vf_info *vf)
+{
+       int i;
+
+       /* Reset the SBs */
+       for (i = 0; i < vf->num_sbs; i++)
+               ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+                                                 vf->igu_sbs[i],
+                                                 vf->opaque_fid, false);
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+                              sizeof(struct pfvf_def_resp_tlv),
+                              PFVF_STATUS_SUCCESS);
+}
+
+static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  struct ecore_vf_info *vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+
+       /* Disable Interrupts for VF */
+       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+       /* Reset Permission table */
+       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+                              length, status);
+}
+
+static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
+                                    struct ecore_ptt *p_ptt,
+                                    struct ecore_vf_info *p_vf)
+{
+       u16 length = sizeof(struct pfvf_def_resp_tlv);
+
+       ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+                              length, PFVF_STATUS_SUCCESS);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
+                          struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+       int cnt;
+       u32 val;
+
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+       for (cnt = 0; cnt < 50; cnt++) {
+               val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+               if (!val)
+                       break;
+               OSAL_MSLEEP(20);
+       }
+       ecore_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+       if (cnt == 50) {
+               DP_ERR(p_hwfn,
+                      "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+                      p_vf->abs_vf_id, val);
+               return ECORE_TIMEOUT;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
+                         struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+       u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+       int i, cnt;
+
+       /* Read initial consumers & producers */
+       for (i = 0; i < MAX_NUM_VOQS; i++) {
+               u32 prod;
+
+               cons[i] = ecore_rd(p_hwfn, p_ptt,
+                                  PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+                                  i * 0x40);
+               prod = ecore_rd(p_hwfn, p_ptt,
+                               PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+                               i * 0x40);
+               distance[i] = prod - cons[i];
+       }
+
+       /* Wait for consumers to pass the producers */
+       i = 0;
+       for (cnt = 0; cnt < 50; cnt++) {
+               for (; i < MAX_NUM_VOQS; i++) {
+                       u32 tmp;
+
+                       tmp = ecore_rd(p_hwfn, p_ptt,
+                                      PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+                                      i * 0x40);
+                       if (distance[i] > tmp - cons[i])
+                               break;
+               }
+
+               if (i == MAX_NUM_VOQS)
+                       break;
+
+               OSAL_MSLEEP(20);
+       }
+
+       if (cnt == 50) {
+               DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+                      p_vf->abs_vf_id, i);
+               return ECORE_TIMEOUT;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn,
+                         struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
+{
+       u16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS];
+       u16 prod[NUM_OF_TCS];
+       int i, cnt;
+
+       /* Read initial consumers & producers */
+       for (i = 0; i < NUM_OF_TCS; i++) {
+               tc_cons[i] = (u16) ecore_rd(p_hwfn, p_ptt,
+                                           PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
+               tc_lb_cons[i] = (u16) ecore_rd(p_hwfn, p_ptt,
+                                              PRS_REG_MSG_CT_LB_0 + i * 0x4);
+               prod[i] = (u16) ecore_rd(p_hwfn, p_ptt,
+                                        BRB_REG_PER_TC_COUNTERS +
+                                        p_hwfn->port_id * 0x20 + i * 0x4);
+       }
+
+       /* Wait for consumers to pass the producers */
+       i = 0;
+       for (cnt = 0; cnt < 50; cnt++) {
+               for (; i < NUM_OF_TCS; i++) {
+                       u16 cons;
+
+                       cons = (u16) ecore_rd(p_hwfn, p_ptt,
+                                             PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
+                       if (prod[i] - tc_cons[i] > cons - tc_cons[i])
+                               break;
+
+                       cons = (u16) ecore_rd(p_hwfn, p_ptt,
+                                             PRS_REG_MSG_CT_LB_0 + i * 0x4);
+                       if (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i])
+                               break;
+               }
+
+               if (i == NUM_OF_TCS)
+                       break;
+
+               /* 16-bit counters; Delay instead of sleep... */
+               OSAL_UDELAY(10);
+       }
+
+       /* This is only optional polling for BB, since registers are only
+        * 16-bit wide and guarantee is not good enough. Don't fail things
+        * if polling didn't return the expected results.
+        */
+       if (cnt == 50)
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - prs polling failed on TC %d\n",
+                          p_vf->abs_vf_id, i);
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_vf_info *p_vf,
+                                                 struct ecore_ptt *p_ptt)
+{
+       enum _ecore_status_t rc;
+
+       /* TODO - add SRC and TM polling once we add storage IOV */
+
+       rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+       if (rc)
+               return rc;
+
+       rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+       if (rc)
+               return rc;
+
+       rc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt);
+       if (rc)
+               return rc;
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+                                struct ecore_ptt *p_ptt,
+                                u16 rel_vf_id, u32 *ack_vfs)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+       if (!p_vf)
+               return ECORE_SUCCESS;
+
+       if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+           (1ULL << (rel_vf_id % 64))) {
+               u16 vfid = p_vf->abs_vf_id;
+
+               /* TODO - should we lock channel? */
+
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - Handling FLR\n", vfid);
+
+               ecore_iov_vf_cleanup(p_hwfn, p_vf);
+
+               /* If VF isn't active, no need for anything but SW */
+               if (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id))
+                       goto cleanup;
+
+               /* TODO - what to do in case of failure? */
+               rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+               if (rc != ECORE_SUCCESS)
+                       goto cleanup;
+
+               rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
+               if (rc) {
+                       /* TODO - what's now? What a mess.... */
+                       DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+                       return rc;
+               }
+
+               /* VF_STOPPED has to be set only after final cleanup
+                * but prior to re-enabling the VF.
+                */
+               p_vf->state = VF_STOPPED;
+
+               rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+               if (rc) {
+                       /* TODO - again, a mess... */
+                       DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+                              vfid);
+                       return rc;
+               }
+cleanup:
+               /* Mark VF for ack and clean pending state */
+               if (p_vf->state == VF_RESET)
+                       p_vf->state = VF_STOPPED;
+               ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+               p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+                   ~(1ULL << (rel_vf_id % 64));
+               p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+                   ~(1ULL << (rel_vf_id % 64));
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_ptt *p_ptt)
+{
+       u32 ack_vfs[VF_MAX_STATIC / 32];
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u16 i;
+
+       OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+       for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+               ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+       rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt, u16 rel_vf_id)
+{
+       u32 ack_vfs[VF_MAX_STATIC / 32];
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+       ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
+
+       rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+       return rc;
+}
+
+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+       u16 i, found = 0;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
+       for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "[%08x,...,%08x]: %08x\n",
+                          i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+       /* Mark VFs */
+       for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) {
+               struct ecore_vf_info *p_vf;
+               u8 vfid;
+
+               p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
+               if (!p_vf)
+                       continue;
+
+               vfid = p_vf->abs_vf_id;
+               if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+                       u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+                       u16 rel_vf_id = p_vf->relative_vf_id;
+
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d] [rel %d] got FLR-ed\n",
+                                  vfid, rel_vf_id);
+
+                       p_vf->state = VF_RESET;
+
+                       /* No need to lock here, since pending_flr should
+                        * only change here and before ACKing MFw. Since
+                        * MFW will not trigger an additional attention for
+                        * VF flr until ACKs, we're safe.
+                        */
+                       p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+                       found = 1;
+               }
+       }
+
+       return found;
+}
+
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+                       u16 vfid,
+                       struct ecore_mcp_link_params *params,
+                       struct ecore_mcp_link_state *link,
+                       struct ecore_mcp_link_capabilities *p_caps)
+{
+       struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+       struct ecore_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+       p_bulletin->req_autoneg = params->speed.autoneg;
+       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+       p_bulletin->req_forced_speed = params->speed.forced_speed;
+       p_bulletin->req_autoneg_pause = params->pause.autoneg;
+       p_bulletin->req_forced_rx = params->pause.forced_rx;
+       p_bulletin->req_forced_tx = params->pause.forced_tx;
+       p_bulletin->req_loopback = params->loopback_mode;
+
+       p_bulletin->link_up = link->link_up;
+       p_bulletin->speed = link->speed;
+       p_bulletin->full_duplex = link->full_duplex;
+       p_bulletin->autoneg = link->an;
+       p_bulletin->autoneg_complete = link->an_complete;
+       p_bulletin->parallel_detection = link->parallel_detection;
+       p_bulletin->pfc_enabled = link->pfc_enabled;
+       p_bulletin->partner_adv_speed = link->partner_adv_speed;
+       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+       p_bulletin->partner_adv_pause = link->partner_adv_pause;
+       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+       p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
+                       u16 vfid,
+                       struct ecore_mcp_link_params *p_params,
+                       struct ecore_mcp_link_state *p_link,
+                       struct ecore_mcp_link_capabilities *p_caps)
+{
+       struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+       struct ecore_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+
+       if (p_params)
+               __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+       if (p_link)
+               __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+       if (p_caps)
+               __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
+                              struct ecore_ptt *p_ptt, int vfid)
+{
+       struct ecore_iov_vf_mbx *mbx;
+       struct ecore_vf_info *p_vf;
+       int i;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf)
+               return;
+
+       mbx = &p_vf->vf_mbx;
+
+       /* ecore_iov_process_mbx_request */
+       DP_VERBOSE(p_hwfn,
+                  ECORE_MSG_IOV,
+                  "ecore_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+
+       mbx->first_tlv = mbx->req_virt->first_tlv;
+
+       /* check if tlv type is known */
+       if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+               /* Lock the per vf op mutex and note the locker's identity.
+                * The unlock will take place in mbx response.
+                */
+               ecore_iov_lock_vf_pf_channel(p_hwfn,
+                                            p_vf, mbx->first_tlv.tl.type);
+
+               /* switch on the opcode */
+               switch (mbx->first_tlv.tl.type) {
+               case CHANNEL_TLV_ACQUIRE:
+                       ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_VPORT_START:
+                       ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_VPORT_TEARDOWN:
+                       ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_START_RXQ:
+                       ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_START_TXQ:
+                       ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_STOP_RXQS:
+                       ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_STOP_TXQS:
+                       ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_UPDATE_RXQ:
+                       ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_VPORT_UPDATE:
+                       ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_UCAST_FILTER:
+                       ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_CLOSE:
+                       ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_INT_CLEANUP:
+                       ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_RELEASE:
+                       ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+                       break;
+               }
+
+               ecore_iov_unlock_vf_pf_channel(p_hwfn,
+                                              p_vf, mbx->first_tlv.tl.type);
+
+       } else {
+               /* unknown TLV - this may belong to a VF driver from the future
+                * - a version written after this PF driver was written, which
+                * supports features unknown as of yet. Too bad since we don't
+                * support them. Or this may be because someone wrote a crappy
+                * VF driver and is sending garbage over the channel.
+                */
+               DP_ERR(p_hwfn,
+                      "unknown TLV. type %d length %d. first 20 bytes of 
mailbox buffer:\n",
+                      mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+
+               for (i = 0; i < 20; i++) {
+                       DP_VERBOSE(p_hwfn,
+                                  ECORE_MSG_IOV,
+                                  "%x ",
+                                  mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+               }
+
+               /* test whether we can respond to the VF (do we have an address
+                * for it?)
+                */
+               if (p_vf->state == VF_ACQUIRED)
+                       DP_ERR(p_hwfn, "UNKNOWN TLV Not supported yet\n");
+       }
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+       mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
+       mbx->sw_mbx.response_offset = 0;
+#endif
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+                                                __le16 vfid,
+                                                struct regpair *vf_msg)
+{
+       struct ecore_vf_info *p_vf;
+       u8 min, max;
+
+       if (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Got a message from VF while PF is not initialized 
for IOV support\n");
+               return ECORE_SUCCESS;
+       }
+
+       /* Find the VF record - message comes with realtive [engine] vfid */
+       min = (u8) p_hwfn->hw_info.first_vf_in_pf;
+       max = min + p_hwfn->p_dev->sriov_info.total_vfs;
+       /* @@@TBD - for BE machines, should echo field be reversed? */
+       if ((u8) vfid < min || (u8) vfid >= max) {
+               DP_INFO(p_hwfn,
+                       "Got a message from VF with relative id 0x%08x, but 
PF's range is [0x%02x,...,0x%02x)\n",
+                       (u8) vfid, min, max);
+               return ECORE_INVAL;
+       }
+       p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8) vfid - min];
+
+       /* List the physical address of the request so that handler
+        * could later on copy the message from it.
+        */
+       p_vf->vf_mbx.pending_req = (((u64) vf_msg->hi) << 32) | vf_msg->lo;
+
+       return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
+}
+
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+                                          u8 opcode,
+                                          __le16 echo,
+                                          union event_ring_data *data)
+{
+       switch (opcode) {
+       case COMMON_EVENT_VF_PF_CHANNEL:
+               return ecore_sriov_vfpf_msg(p_hwfn, echo,
+                                           &data->vf_pf_channel.msg_addr);
+       case COMMON_EVENT_VF_FLR:
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF-FLR is still not supported\n");
+               return ECORE_SUCCESS;
+       default:
+               DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
+                       opcode);
+               return ECORE_INVAL;
+       }
+}
+
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+                  (1ULL << (rel_vf_id % 64)));
+}
+
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
+                            bool b_enabled_only)
+{
+       if (!p_hwfn->pf_iov_info) {
+               DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+               return false;
+       }
+
+       return b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) :
+           (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs);
+}
+
+struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
+                                                         *p_hwfn,
+                                                         u16 relative_vf_id,
+                                                         bool b_enabled_only)
+{
+       struct ecore_vf_info *vf = OSAL_NULL;
+
+       vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+       if (!vf)
+               return OSAL_NULL;
+
+       return &vf->p_vf_info;
+}
+
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+       u64 add_bit = 1ULL << (vfid % 64);
+
+       /* TODO - add locking mechanisms [no atomics in ecore, so we can't
+        * add the lock inside the ecore_pf_iov struct].
+        */
+       p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+                                              u64 *events)
+{
+       u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+       /* TODO - Take a lock */
+       OSAL_MEMCPY(events, p_pending_events,
+                   sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+       OSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+}
+
+enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
+                                          struct ecore_ptt *ptt, int vfid)
+{
+       struct ecore_dmae_params params;
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return ECORE_INVAL;
+
+       OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
+       params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
+       params.src_vfid = vf_info->abs_vf_id;
+
+       if (ecore_dmae_host2host(p_hwfn, ptt,
+                                vf_info->vf_mbx.pending_req,
+                                vf_info->vf_mbx.req_phys,
+                                sizeof(union vfpf_tlvs) / 4, &params)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Failed to copy message from VF 0x%02x\n", vfid);
+
+               return ECORE_IO;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
+                                      u8 *mac, int vfid)
+{
+       struct ecore_vf_info *vf_info;
+       u64 feature;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+               return;
+       }
+
+       feature = 1 << MAC_ADDR_FORCED;
+       OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+       vf_info->bulletin.p_virt->valid_bitmap |= feature;
+       /* Forced MAC will disable MAC_ADDR */
+       vf_info->bulletin.p_virt->valid_bitmap &=
+           ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+       ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
+                                               u8 *mac, int vfid)
+{
+       struct ecore_vf_info *vf_info;
+       u64 feature;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Can not set MAC, invalid vfid [%d]\n", vfid);
+               return ECORE_INVAL;
+       }
+
+       if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Can not set MAC, Forced MAC is configured\n");
+               return ECORE_INVAL;
+       }
+
+       feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+       OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+       vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+                                       u16 pvid, int vfid)
+{
+       struct ecore_vf_info *vf_info;
+       u64 feature;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+               return;
+       }
+
+       feature = 1 << VLAN_ADDR_FORCED;
+       vf_info->bulletin.p_virt->pvid = pvid;
+       if (pvid)
+               vf_info->bulletin.p_virt->valid_bitmap |= feature;
+       else
+               vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+       ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+enum _ecore_status_t
+ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
+                                              bool b_untagged_only, int vfid)
+{
+       struct ecore_vf_info *vf_info;
+       u64 feature;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+               return ECORE_INVAL;
+       }
+
+       /* Since this is configurable only during vport-start, don't take it
+        * if we're past that point.
+        */
+       if (vf_info->state == VF_ENABLED) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Can't support untagged change for vfid[%d] - VF is 
already active\n",
+                          vfid);
+               return ECORE_INVAL;
+       }
+
+       /* Set configuration; This will later be taken into account during the
+        * VF initialization.
+        */
+       feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
+           (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
+       vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+       vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
+           : 0;
+
+       return ECORE_SUCCESS;
+}
+
+void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
+                                 u16 *opaque_fid)
+{
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return;
+
+       *opaque_fid = vf_info->opaque_fid;
+}
+
+void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
+                               u8 *p_vort_id)
+{
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return;
+
+       *p_vort_id = vf_info->vport_id;
+}
+
+bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
+{
+       struct ecore_vf_info *p_vf_info;
+
+       p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!p_vf_info)
+               return false;
+
+       return !!p_vf_info->vport_instance;
+}
+
+bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
+{
+       struct ecore_vf_info *p_vf_info;
+
+       p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+
+       return p_vf_info->state == VF_STOPPED;
+}
+
+bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
+{
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return false;
+
+       return vf_info->spoof_chk;
+}
+
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+       if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
+           !IS_PF_SRIOV_ALLOC(p_hwfn) ||
+           !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid))
+               return false;
+       else
+               return true;
+}
+
+enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
+                                           int vfid, bool val)
+{
+       enum _ecore_status_t rc = ECORE_INVAL;
+       struct ecore_vf_info *vf;
+
+       if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+               DP_NOTICE(p_hwfn, true,
+                         "SR-IOV sanity check failed, can't set spoofchk\n");
+               goto out;
+       }
+
+       vf = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf)
+               goto out;
+
+       if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+               /* After VF VPORT start PF will configure spoof check */
+               vf->req_spoofchk_val = val;
+               rc = ECORE_SUCCESS;
+               goto out;
+       }
+
+       rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+       return rc;
+}
+
+u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
+{
+       u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
+
+       max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
+           : ECORE_MAX_VF_CHAINS_PER_PF;
+
+       return max_chains_per_vf;
+}
+
+void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+                                         u16 rel_vf_id,
+                                         void **pp_req_virt_addr,
+                                         u16 *p_req_virt_size)
+{
+       struct ecore_vf_info *vf_info =
+           ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+       if (!vf_info)
+               return;
+
+       if (pp_req_virt_addr)
+               *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
+
+       if (p_req_virt_size)
+               *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
+}
+
+void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
+                                           u16 rel_vf_id,
+                                           void **pp_reply_virt_addr,
+                                           u16 *p_reply_virt_size)
+{
+       struct ecore_vf_info *vf_info =
+           ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+       if (!vf_info)
+               return;
+
+       if (pp_reply_virt_addr)
+               *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
+
+       if (p_reply_virt_size)
+               *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
+}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+                                                u16 rel_vf_id)
+{
+       struct ecore_vf_info *vf_info =
+           ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+
+       if (!vf_info)
+               return OSAL_NULL;
+
+       return &vf_info->vf_mbx.sw_mbx;
+}
+#endif
+
+bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
+{
+       return (length >= sizeof(struct vfpf_first_tlv) &&
+               (length <= sizeof(union vfpf_tlvs)));
+}
+
+u32 ecore_iov_pfvf_msg_length(void)
+{
+       return sizeof(union pfvf_tlvs);
+}
+
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf || !p_vf->bulletin.p_virt)
+               return OSAL_NULL;
+
+       if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+               return OSAL_NULL;
+
+       return p_vf->bulletin.p_virt->mac;
+}
+
+u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
+                                      u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf || !p_vf->bulletin.p_virt)
+               return 0;
+
+       if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+               return 0;
+
+       return p_vf->bulletin.p_virt->pvid;
+}
+
+enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_ptt *p_ptt,
+                                                int vfid, int val)
+{
+       struct ecore_vf_info *vf;
+       enum _ecore_status_t rc;
+       u8 abs_vp_id = 0;
+
+       vf = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+
+       if (!vf)
+               return ECORE_INVAL;
+
+       rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       rc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32) val);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+                                                    int vfid, u32 rate)
+{
+       struct ecore_vf_info *vf;
+       enum _ecore_status_t rc;
+       u8 vport_id;
+       int i;
+
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "SR-IOV sanity check failed, can't set min 
rate\n");
+                       return ECORE_INVAL;
+               }
+       }
+
+       vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16) vfid, true);
+       vport_id = vf->vport_id;
+
+       rc = ecore_configure_vport_wfq(p_dev, vport_id, rate);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           int vfid,
+                                           struct ecore_eth_stats *p_stats)
+{
+       struct ecore_vf_info *vf;
+
+       vf = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf)
+               return ECORE_INVAL;
+
+       if (vf->state != VF_ENABLED)
+               return ECORE_INVAL;
+
+       __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
+                               vf->abs_vf_id + 0x10, false);
+
+       return ECORE_SUCCESS;
+}
+
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return 0;
+
+       return p_vf->num_rxqs;
+}
+
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return 0;
+
+       return p_vf->num_active_rxqs;
+}
+
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return OSAL_NULL;
+
+       return p_vf->ctx;
+}
+
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return 0;
+
+       return p_vf->num_sbs;
+}
+
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return false;
+
+       return (p_vf->state == VF_FREE);
+}
+
+bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
+                                             u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return false;
+
+       return (p_vf->state == VF_ACQUIRED);
+}
+
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return false;
+
+       return (p_vf->state == VF_ENABLED);
+}
+
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+{
+       struct ecore_wfq_data *vf_vp_wfq;
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       if (!vf_info)
+               return 0;
+
+       vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+       if (vf_vp_wfq->configured)
+               return vf_vp_wfq->min_speed;
+       else
+               return 0;
+}
diff --git a/drivers/net/qede/ecore/ecore_sriov.h 
b/drivers/net/qede/ecore/ecore_sriov.h
new file mode 100644
index 0000000..9ddc9aa
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_sriov.h
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_SRIOV_H__
+#define __ECORE_SRIOV_H__
+
+#include "ecore_status.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_iov_api.h"
+#include "ecore_hsi_common.h"
+
+#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
+
+#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
+       (MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+
+/* Represents a full message. Both the request filled by VF
+ * and the response filled by the PF. The VF needs one copy
+ * of this message, it fills the request part and sends it to
+ * the PF. The PF will copy the response to the response part for
+ * the VF to later read it. The PF needs to hold a message like this
+ * per VF, the request that is copied to the PF is placed in the
+ * request size, and the response is filled by the PF before sending
+ * it to the VF.
+ */
+struct ecore_vf_mbx_msg {
+       union vfpf_tlvs req;
+       union pfvf_tlvs resp;
+};
+
+/* This data is held in the ecore_hwfn structure for VFs only. */
+struct ecore_vf_iov {
+       union vfpf_tlvs *vf2pf_request;
+       dma_addr_t vf2pf_request_phys;
+       union pfvf_tlvs *pf2vf_reply;
+       dma_addr_t pf2vf_reply_phys;
+
+       /* Should be taken whenever the mailbox buffers are accessed */
+       osal_mutex_t mutex;
+       u8 *offset;
+
+       /* Bulletin Board */
+       struct ecore_bulletin bulletin;
+       struct ecore_bulletin_content bulletin_shadow;
+
+       /* we set aside a copy of the acquire response */
+       struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+/* This mailbox is maintained per VF in its PF
+ * contains all information required for sending / receiving
+ * a message
+ */
+struct ecore_iov_vf_mbx {
+       union vfpf_tlvs *req_virt;
+       dma_addr_t req_phys;
+       union pfvf_tlvs *reply_virt;
+       dma_addr_t reply_phys;
+
+       /* Address in VF where a pending message is located */
+       dma_addr_t pending_req;
+
+       u8 *offset;
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+       struct ecore_iov_sw_mbx sw_mbx;
+#endif
+
+       /* VF GPA address */
+       u32 vf_addr_lo;
+       u32 vf_addr_hi;
+
+       struct vfpf_first_tlv first_tlv;        /* saved VF request header */
+
+       u8 flags;
+#define VF_MSG_INPROCESS       0x1     /* failsafe - the FW should prevent
+                                        * more then one pending msg
+                                        */
+};
+
+struct ecore_vf_q_info {
+       u16 fw_rx_qid;
+       u16 fw_tx_qid;
+       u8 fw_cid;
+       u8 rxq_active;
+       u8 txq_active;
+};
+
+enum int_mod {
+       VPORT_INT_MOD_UNDEFINED = 0,
+       VPORT_INT_MOD_ADAPTIVE = 1,
+       VPORT_INT_MOD_OFF = 2,
+       VPORT_INT_MOD_LOW = 100,
+       VPORT_INT_MOD_MEDIUM = 200,
+       VPORT_INT_MOD_HIGH = 300
+};
+
+enum vf_state {
+       VF_FREE = 0,            /* VF ready to be acquired holds no resc */
+       VF_ACQUIRED = 1,        /* VF, aquired, but not initalized */
+       VF_ENABLED = 2,         /* VF, Enabled */
+       VF_RESET = 3,           /* VF, FLR'd, pending cleanup */
+       VF_STOPPED = 4          /* VF, Stopped */
+};
+
+struct ecore_vf_vlan_shadow {
+       bool used;
+       u16 vid;
+};
+
+struct ecore_vf_shadow_config {
+       /* Shadow copy of all guest vlans */
+       struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+       u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct ecore_vf_info {
+       struct ecore_iov_vf_mbx vf_mbx;
+       enum vf_state state;
+       u8 to_disable;
+
+       struct ecore_bulletin bulletin;
+       dma_addr_t vf_bulletin;
+
+       u32 concrete_fid;
+       u16 opaque_fid;
+       u16 mtu;
+
+       u8 vport_id;
+       u8 relative_vf_id;
+       u8 abs_vf_id;
+#define ECORE_VF_ABS_ID(p_hwfn, p_vf)  (ECORE_PATH_ID(p_hwfn) ? \
+                                        (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+                                        (p_vf)->abs_vf_id)
+
+       u8 vport_instance;      /* Number of active vports */
+       u8 num_rxqs;
+       u8 num_txqs;
+
+       u8 num_sbs;
+
+       u8 num_mac_filters;
+       u8 num_vlan_filters;
+       u8 num_mc_filters;
+
+       struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+       u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
+
+       /* TODO - Only windows is using it - should be removed */
+       u8 was_malicious;
+       u8 num_active_rxqs;
+       void *ctx;
+       struct ecore_public_vf_info p_vf_info;
+       bool spoof_chk;         /* Current configured on HW */
+       bool req_spoofchk_val;  /* Requested value */
+
+       /* Stores the configuration requested by VF */
+       struct ecore_vf_shadow_config shadow_config;
+
+       /* A bitfield using bulletin's valid-map bits, used to indicate
+        * which of the bulletin board features have been configured.
+        */
+       u64 configured_features;
+#define ECORE_IOV_CONFIGURED_FEATURES_MASK     ((1 << MAC_ADDR_FORCED) | \
+                                                (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of ecore_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct ecore_pf_iov {
+       struct ecore_vf_info vfs_array[MAX_NUM_VFS];
+       u64 pending_events[ECORE_VF_ARRAY_LENGTH];
+       u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
+       u16 base_vport_id;
+
+       /* Allocate message address continuosuly and split to each VF */
+       void *mbx_msg_virt_addr;
+       dma_addr_t mbx_msg_phys_addr;
+       u32 mbx_msg_size;
+       void *mbx_reply_virt_addr;
+       dma_addr_t mbx_reply_phys_addr;
+       u32 mbx_reply_size;
+       void *p_bulletins;
+       dma_addr_t bulletins_phys;
+       u32 bulletins_size;
+};
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read sriov related information and allocated resources
+ *  reads from configuraiton space, shmem, and allocates the VF
+ *  database in the PF.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+                   u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief ecore_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void ecore_iov_free(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+                                          u8 opcode,
+                                          __le16 echo,
+                                          union event_ring_data *data);
+
+/**
+ * @brief calculate CRC for bulletin board validation
+ *
+ * @param basic crc seed
+ * @param ptr to beginning of buffer
+ * @length in bytes of buffer
+ *
+ * @return calculated crc over buffer [with respect to seed].
+ */
+u32 ecore_crc32(u32 crc, u8 *ptr, u32 length);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+                                void *p_tlvs_list, u16 req_type);
+
+/**
+ * @brief ecore_iov_get_vf_info - return the database of a
+ *        specific VF
+ *
+ * @param p_hwfn
+ * @param relative_vf_id - relative id of the VF for which info
+ *                      is requested
+ * @param b_enabled_only - false iff want to access even if vf is disabled
+ *
+ * @return struct ecore_vf_info*
+ */
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+                                           u16 relative_vf_id,
+                                           bool b_enabled_only);
+#else
+static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn
+                                                         *p_hwfn,
+                                                         struct ecore_ptt
+                                                         *p_ptt)
+{
+       return ECORE_SUCCESS;
+}
+
+static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset,
+                                      u16 type, u16 length)
+{
+       return OSAL_NULL;
+}
+
+static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
+                                         void *tlvs_list)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn
+                                                       *p_hwfn)
+{
+       return ECORE_SUCCESS;
+}
+
+static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt)
+{
+}
+
+static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn
+                                                             *p_hwfn,
+                                                             u8 opcode,
+                                                             __le16 echo,
+                                                             union
+                                                             event_ring_data
+                                                             *data)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+{
+       return 0;
+}
+
+static OSAL_INLINE int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+                                            u32 *disabled_vfs)
+{
+       return 0;
+}
+
+static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
+                                                   void *p_tlvs_list,
+                                                   u16 req_type)
+{
+       return OSAL_NULL;
+}
+
+static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct 
ecore_hwfn
+                                                              *p_hwfn,
+                                                              u16
+                                                              relative_vf_id,
+                                                              bool
+                                                              b_enabled_only)
+{
+       return OSAL_NULL;
+}
+
+#endif
+#endif /* __ECORE_SRIOV_H__ */
diff --git a/drivers/net/qede/ecore/ecore_status.h 
b/drivers/net/qede/ecore/ecore_status.h
new file mode 100644
index 0000000..98d40bb
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_status.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_STATUS_H__
+#define __ECORE_STATUS_H__
+
+enum _ecore_status_t {
+       ECORE_UNKNOWN_ERROR = -12,
+       ECORE_NORESOURCES = -11,
+       ECORE_NODEV = -10,
+       ECORE_ABORTED = -9,
+       ECORE_AGAIN = -8,
+       ECORE_NOTIMPL = -7,
+       ECORE_EXISTS = -6,
+       ECORE_IO = -5,
+       ECORE_TIMEOUT = -4,
+       ECORE_INVAL = -3,
+       ECORE_BUSY = -2,
+       ECORE_NOMEM = -1,
+       ECORE_SUCCESS = 0,
+       /* PENDING is not an error and should be positive */
+       ECORE_PENDING = 1,
+};
+
+#endif /* __ECORE_STATUS_H__ */
diff --git a/drivers/net/qede/ecore/ecore_utils.h 
b/drivers/net/qede/ecore/ecore_utils.h
new file mode 100644
index 0000000..616b44c
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_utils.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_UTILS_H__
+#define __ECORE_UTILS_H__
+
+/* dma_addr_t manip */
+#define DMA_LO(x)              ((u32)(((dma_addr_t)(x)) & 0xffffffff))
+#define DMA_HI(x)              ((u32)(((dma_addr_t)(x)) >> 32))
+
+#define DMA_LO_LE(x)           OSAL_CPU_TO_LE32(DMA_LO(x))
+#define DMA_HI_LE(x)           OSAL_CPU_TO_LE32(DMA_HI(x))
+
+/* It's assumed that whoever includes this has previously included an hsi
+ * file defining the regpair.
+ */
+#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
+                               (x).lo = DMA_LO_LE((val))
+
+#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
+#define HILO_DMA(hi, lo)       HILO_GEN(hi, lo, dma_addr_t)
+#define HILO_64(hi, lo)                HILO_GEN(hi, lo, u64)
+#define HILO_DMA_REGPAIR(regpair)      (HILO_DMA(regpair.hi, regpair.lo))
+#define HILO_64_REGPAIR(regpair)       (HILO_64(regpair.hi, regpair.lo))
+
+#endif
diff --git a/drivers/net/qede/ecore/ecore_vf.c 
b/drivers/net/qede/ecore/ecore_vf.c
new file mode 100644
index 0000000..2ede55c
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_vf.c
@@ -0,0 +1,1319 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_sriov.h"
+#include "ecore_l2_api.h"
+#include "ecore_vf.h"
+#include "ecore_vfpf_if.h"
+#include "ecore_status.h"
+#include "reg_addr.h"
+#include "ecore_int.h"
+#include "ecore_l2.h"
+#include "ecore_mcp_api.h"
+#include "ecore_vf_api.h"
+
+static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       void *p_tlv;
+
+       /* This lock is released when we receive PF's response
+        * in ecore_send_msg2pf().
+        * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
+        * must come in sequence.
+        */
+       OSAL_MUTEX_ACQUIRE(&(p_iov->mutex));
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "preparing to send %s tlv over vf pf channel\n",
+                  ecore_channel_tlvs_string[type]);
+
+       /* Reset Requst offset */
+       p_iov->offset = (u8 *) (p_iov->vf2pf_request);
+
+       /* Clear mailbox - both request and reply */
+       OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+       OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+       /* Init type and length */
+       p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+       /* Init first tlv header */
+       ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+           (u64) p_iov->pf2vf_reply_phys;
+
+       return p_tlv;
+}
+
+static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+                            u8 *done, u32 resp_size)
+{
+       struct ustorm_vf_zone *zone_data = (struct ustorm_vf_zone *)
+           ((u8 *) PXP_VF_BAR0_START_USDM_ZONE_B);
+       union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+       struct ustorm_trigger_vf_zone trigger;
+       int rc = ECORE_SUCCESS, time = 100;
+       u8 pf_id;
+
+       /* output tlvs list */
+       ecore_dp_tlv_list(p_hwfn, p_req);
+
+       /* need to add the END TLV to the message size */
+       resp_size += sizeof(struct channel_list_end_tlv);
+
+       if (!p_hwfn->p_dev->sriov_info.b_hw_channel) {
+               rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
+                                        done,
+                                        p_req,
+                                        p_hwfn->vf_iov_info->pf2vf_reply,
+                                        sizeof(union vfpf_tlvs), resp_size);
+               /* TODO - no prints about message ? */
+               goto exit;
+       }
+
+       /* Send TLVs over HW channel */
+       OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+       trigger.vf_pf_msg_valid = 1;
+       /* TODO - FW should remove this requirement */
+       pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> 
%p\n",
+                  pf_id,
+                  U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
+                  U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
+                  &zone_data->non_trigger.vf_pf_msg_addr,
+                  *((u32 *) &trigger), &zone_data->trigger);
+
+       REG_WR(p_hwfn,
+              (osal_uintptr_t) &zone_data->non_trigger.vf_pf_msg_addr.lo,
+              U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+       REG_WR(p_hwfn,
+              (osal_uintptr_t) &zone_data->non_trigger.vf_pf_msg_addr.hi,
+              U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+       /* The message data must be written first, to prevent trigger before
+        * data is written.
+        */
+       OSAL_WMB(p_hwfn->p_dev);
+
+       REG_WR(p_hwfn, (osal_uintptr_t) &zone_data->trigger,
+              *((u32 *) &trigger));
+
+       while ((!*done) && time) {
+               OSAL_MSLEEP(25);
+               time--;
+       }
+
+       if (!*done) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF <-- PF Timeout [Type %d]\n",
+                          p_req->first_tlv.tl.type);
+               rc = ECORE_TIMEOUT;
+               goto exit;
+       } else {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "PF response: %d [Type %d]\n",
+                          *done, p_req->first_tlv.tl.type);
+       }
+
+exit:
+       OSAL_MUTEX_RELEASE(&(p_hwfn->vf_iov_info->mutex));
+
+       return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+
+static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+       struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+       struct ecore_vf_acquire_sw_info vf_sw_info;
+       struct vfpf_acquire_tlv *req;
+       int rc = 0, attempts = 0;
+       bool resources_acquired = false;
+
+       /* @@@ TBD: MichalK take this from somewhere else... */
+       u8 rx_count = 1, tx_count = 1, num_sbs = 1;
+       u8 num_mac = VF_ACQUIRE_MAC_FILTERS, num_mc = VF_ACQUIRE_MC_FILTERS;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+       /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
+       req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+       req->resc_request.num_rxqs = rx_count;
+       req->resc_request.num_txqs = tx_count;
+       req->resc_request.num_sbs = num_sbs;
+       req->resc_request.num_mac_filters = num_mac;
+       req->resc_request.num_mc_filters = num_mc;
+       req->resc_request.num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+
+       OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
+       OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
+
+       req->vfdev_info.os_type = vf_sw_info.os_type;
+       req->vfdev_info.driver_version = vf_sw_info.driver_version;
+       req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+       req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+       req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+       req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+
+       /* pf 2 vf bulletin board address */
+       req->bulletin_addr = p_iov->bulletin.phys;
+       req->bulletin_size = p_iov->bulletin.size;
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       while (!resources_acquired) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "attempting to acquire resources\n");
+
+               /* send acquire request */
+               rc = ecore_send_msg2pf(p_hwfn,
+                                      &resp->hdr.status, sizeof(*resp));
+
+               /* PF timeout */
+               if (rc)
+                       return rc;
+
+               /* copy acquire response from buffer to p_hwfn */
+               OSAL_MEMCPY(&p_iov->acquire_resp,
+                           resp, sizeof(p_iov->acquire_resp));
+
+               attempts++;
+
+               /* PF agrees to allocate our resources */
+               if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "resources acquired\n");
+                       resources_acquired = true;
+               } /* PF refuses to allocate our resources */
+               else if (resp->hdr.status ==
+                        PFVF_STATUS_NO_RESOURCE &&
+                        attempts < VF_ACQUIRE_THRESH) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "PF unwilling to fullfill resource request. 
Try PF recommended amount\n");
+
+                       /* humble our request */
+                       req->resc_request.num_txqs = resp->resc.num_txqs;
+                       req->resc_request.num_rxqs = resp->resc.num_rxqs;
+                       req->resc_request.num_sbs = resp->resc.num_sbs;
+                       req->resc_request.num_mac_filters =
+                           resp->resc.num_mac_filters;
+                       req->resc_request.num_vlan_filters =
+                           resp->resc.num_vlan_filters;
+                       req->resc_request.num_mc_filters =
+                           resp->resc.num_mc_filters;
+
+                       /* Clear response buffer */
+                       OSAL_MEMSET(p_iov->pf2vf_reply, 0,
+                                   sizeof(union pfvf_tlvs));
+               } else {
+                       DP_ERR(p_hwfn,
+                              "PF returned error %d to VF acquisition 
request\n",
+                              resp->hdr.status);
+                       return ECORE_AGAIN;
+               }
+       }
+
+       rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
+       if (rc) {
+               DP_NOTICE(p_hwfn, true,
+                         "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 
0x%x.\n",
+                         rc);
+               return ECORE_AGAIN;
+       }
+
+       /* Update bulletin board size with response from PF */
+       p_iov->bulletin.size = resp->bulletin_size;
+
+       /* get HW info */
+       p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
+       p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
+
+       DP_INFO(p_hwfn, "Chip details - %s%d\n",
+               ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
+               CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
+
+       /* @@@TBD MichalK: Fw ver... */
+       /* strlcpy(p_hwfn->fw_ver, p_hwfn->acquire_resp.pfdev_info.fw_ver,
+        *  sizeof(p_hwfn->fw_ver));
+        */
+
+       p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
+
+       return 0;
+}
+
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev)
+{
+       enum _ecore_status_t rc = ECORE_NOMEM;
+       struct ecore_vf_iov *p_sriov;
+       struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];   /* @@@TBD CMT */
+
+       p_dev->num_hwfns = 1;   /* @@@TBD CMT must be fixed... */
+
+       p_hwfn->regview = p_dev->regview;
+       if (p_hwfn->regview == OSAL_NULL) {
+               DP_ERR(p_hwfn,
+                      "regview should be initialized before"
+                       " ecore_vf_hw_prepare is called\n");
+               return ECORE_INVAL;
+       }
+
+       /* Set the doorbell bar. Assumption: regview is set */
+       p_hwfn->doorbells = (u8 OSAL_IOMEM *) p_hwfn->regview +
+           PXP_VF_BAR0_START_DQ;
+
+       p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
+                                         PXP_VF_BAR0_ME_OPAQUE_ADDRESS);
+
+       p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn,
+                                     PXP_VF_BAR0_ME_CONCRETE_ADDRESS);
+
+       /* Allocate vf sriov info */
+       p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
+       if (!p_sriov) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `struct ecore_sriov'\n");
+               return ECORE_NOMEM;
+       }
+
+       OSAL_MEMSET(p_sriov, 0, sizeof(*p_sriov));
+
+       /* Allocate vf2pf msg */
+       p_sriov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                                        &p_sriov->
+                                                        vf2pf_request_phys,
+                                                        sizeof(union
+                                                               vfpf_tlvs));
+       if (!p_sriov->vf2pf_request) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `vf2pf_request' DMA memory\n");
+               goto free_p_sriov;
+       }
+
+       p_sriov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                                      &p_sriov->
+                                                      pf2vf_reply_phys,
+                                                      sizeof(union pfvf_tlvs));
+       if (!p_sriov->pf2vf_reply) {
+               DP_NOTICE(p_hwfn, true,
+                         "Failed to allocate `pf2vf_reply' DMA memory\n");
+               goto free_vf2pf_request;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF's Request mailbox [%p virt 0x%lx phys], Response"
+                  " mailbox [%p virt 0x%lx phys]\n",
+                  p_sriov->vf2pf_request,
+                  (u64) p_sriov->vf2pf_request_phys,
+                  p_sriov->pf2vf_reply, (u64) p_sriov->pf2vf_reply_phys);
+
+       /* Allocate Bulletin board */
+       p_sriov->bulletin.size = sizeof(struct ecore_bulletin_content);
+       p_sriov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+                                                          &p_sriov->bulletin.
+                                                          phys,
+                                                          p_sriov->bulletin.
+                                                          size);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
+                  p_sriov->bulletin.p_virt, (u64) p_sriov->bulletin.phys,
+                  p_sriov->bulletin.size);
+
+       OSAL_MUTEX_ALLOC(p_hwfn, &p_sriov->mutex);
+       OSAL_MUTEX_INIT(&p_sriov->mutex);
+
+       p_hwfn->vf_iov_info = p_sriov;
+
+       p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+
+       /* First VF needs to query for information from PF */
+       if (!p_hwfn->my_id)
+               rc = ecore_vf_pf_acquire(p_hwfn);
+
+       return rc;
+
+free_vf2pf_request:
+       OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sriov->vf2pf_request,
+                              p_sriov->vf2pf_request_phys,
+                              sizeof(union vfpf_tlvs));
+free_p_sriov:
+       OSAL_FREE(p_hwfn->p_dev, p_sriov);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn)
+{
+       p_hwfn->b_int_enabled = 1;
+
+       return 0;
+}
+
+/* TEMP TEMP until in HSI */
+#define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \
+                                  (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+#define USTORM_QZONE_START(dev)   (MSTORM_QZONE_START + \
+                                  (MSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+                                          u8 rx_qid,
+                                          u16 sb,
+                                          u8 sb_index,
+                                          u16 bd_max_bytes,
+                                          dma_addr_t bd_chain_phys_addr,
+                                          dma_addr_t cqe_pbl_addr,
+                                          u16 cqe_pbl_size,
+                                          void OSAL_IOMEM **pp_prod)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_start_rxq_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+       u8 hw_qid;
+       u64 init_prod_val = 0;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+       /* @@@TBD MichalK TPA */
+
+       req->rx_qid = rx_qid;
+       req->cqe_pbl_addr = cqe_pbl_addr;
+       req->cqe_pbl_size = cqe_pbl_size;
+       req->rxq_addr = bd_chain_phys_addr;
+       req->hw_sb = sb;
+       req->sb_index = sb_index;
+       req->hc_rate = 0;       /* @@@TBD MichalK -> host coalescing! */
+       req->bd_max_bytes = bd_max_bytes;
+       req->stat_id = -1;      /* No stats at the moment */
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       if (pp_prod) {
+               hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+
+               *pp_prod = (u8 OSAL_IOMEM *) p_hwfn->regview +
+                   MSTORM_QZONE_START(p_hwfn->p_dev) +
+                   (hw_qid) * MSTORM_QZONE_SIZE +
+                   OFFSETOF(struct mstorm_eth_queue_zone, rx_producers);
+
+               /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+               __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+                                 (u32 *) (&init_prod_val));
+       }
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+                                         u16 rx_qid, bool cqe_completion)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_stop_rxqs_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+       /* @@@TBD MichalK TPA */
+
+       /* @@@TBD MichalK - relevant ???
+        * flags  VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN
+        */
+       req->rx_qid = rx_qid;
+       req->num_rxqs = 1;
+       req->cqe_completion = cqe_completion;
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+                                          u16 tx_queue_id,
+                                          u16 sb,
+                                          u8 sb_index,
+                                          dma_addr_t pbl_addr,
+                                          u16 pbl_size,
+                                          void OSAL_IOMEM **pp_doorbell)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_start_txq_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+       /* @@@TBD MichalK TPA */
+
+       req->tx_qid = tx_queue_id;
+
+       /* Tx */
+       req->pbl_addr = pbl_addr;
+       req->pbl_size = pbl_size;
+       req->hw_sb = sb;
+       req->sb_index = sb_index;
+       req->hc_rate = 0;       /* @@@TBD MichalK -> host coalescing! */
+       req->flags = 0;         /* @@@TBD MichalK -> flags... */
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       if (pp_doorbell) {
+               u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+
+               *pp_doorbell = (u8 OSAL_IOMEM *) p_hwfn->doorbells +
+                   DB_ADDR(cid, DQ_DEMS_LEGACY);
+       }
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 
tx_qid)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_stop_txqs_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+       /* @@@TBD MichalK TPA */
+
+       /* @@@TBD MichalK - relevant ??? flags
+        * VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN
+        */
+       req->tx_qid = tx_qid;
+       req->num_txqs = 1;
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+                                            u16 rx_queue_id,
+                                            u8 num_rxqs,
+                                            u8 comp_cqe_flg, u8 comp_event_flg)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       struct vfpf_update_rxq_tlv *req;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
+
+       req->rx_qid = rx_queue_id;
+       req->num_rxqs = num_rxqs;
+
+       if (comp_cqe_flg)
+               req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
+       if (comp_event_flg)
+               req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
+                       u16 mtu, u8 inner_vlan_removal,
+                       enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
+                       u8 only_untagged)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_vport_start_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc, i;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+       req->mtu = mtu;
+       req->vport_id = vport_id;
+       req->inner_vlan_removal = inner_vlan_removal;
+       req->tpa_mode = tpa_mode;
+       req->max_buffers_per_cqe = max_buffers_per_cqe;
+       req->only_untagged = only_untagged;
+
+       /* status blocks */
+       for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+               if (p_hwfn->sbs_info[i])
+                       req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+                        sizeof(struct vfpf_first_tlv));
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return rc;
+}
+
+static void
+ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_sp_vport_update_params *p_data)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *p_resp;
+       u16 tlv;
+
+       if (p_data->update_vport_active_rx_flg ||
+           p_data->update_vport_active_tx_flg) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update activate tlv configured\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update activate tlv config failed\n");
+       }
+
+       if (p_data->update_tx_switching_flg) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update tx switch tlv configured\n");
+#ifndef ASIC_ONLY
+               else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+                       DP_NOTICE(p_hwfn, false,
+                                 "FPGA: Skip checking whether PF"
+                                 " replied to Tx-switching request\n");
+#endif
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update tx switch tlv config failed\n");
+       }
+
+       if (p_data->update_inner_vlan_removal_flg) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update vlan strip tlv configured\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update vlan strip tlv config failed\n");
+       }
+
+       if (p_data->update_approx_mcast_flg) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update mcast tlv configured\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update mcast tlv config failed\n");
+       }
+
+       if (p_data->accept_flags.update_rx_mode_config ||
+           p_data->accept_flags.update_tx_mode_config) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update accept_mode tlv configured\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update accept_mode tlv config failed\n");
+       }
+
+       if (p_data->rss_params) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update rss tlv configured\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update rss tlv config failed\n");
+       }
+
+       if (p_data->sge_tpa_params) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+               p_resp = (struct pfvf_def_resp_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
+               if (p_resp && p_resp->hdr.status)
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VP update sge tpa tlv configured\n");
+               else
+                       DP_NOTICE(p_hwfn, true,
+                                 "VP update sge tpa tlv config failed\n");
+       }
+}
+
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+                        struct ecore_sp_vport_update_params *p_params)
+{
+       struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+       struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+       struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+       struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+       struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+       struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+       struct vfpf_vport_update_activate_tlv *p_act_tlv;
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+       struct vfpf_vport_update_tlv *req;
+       struct pfvf_def_resp_tlv *resp;
+       u8 update_rx, update_tx;
+       u32 resp_size = 0;
+       u16 size, tlv;
+       int rc;
+
+       resp = &p_iov->pf2vf_reply->default_resp;
+       resp_size = sizeof(*resp);
+
+       update_rx = p_params->update_vport_active_rx_flg;
+       update_tx = p_params->update_vport_active_tx_flg;
+
+       /* clear mailbox and prep header tlv */
+       ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+       /* Prepare extended tlvs */
+       if (update_rx || update_tx) {
+               size = sizeof(struct vfpf_vport_update_activate_tlv);
+               p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                         CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+                                         size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (update_rx) {
+                       p_act_tlv->update_rx = update_rx;
+                       p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+               }
+
+               if (update_tx) {
+                       p_act_tlv->update_tx = update_tx;
+                       p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+               }
+       }
+
+       if (p_params->update_inner_vlan_removal_flg) {
+               size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
+               p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                          CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+                                          size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
+       }
+
+       if (p_params->update_tx_switching_flg) {
+               size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+               tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+               p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                               tlv, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+       }
+
+       if (p_params->update_approx_mcast_flg) {
+               size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+               p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                           CHANNEL_TLV_VPORT_UPDATE_MCAST,
+                                           size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
+                           sizeof(unsigned long) *
+                           ETH_MULTICAST_MAC_BINS_IN_REGS);
+       }
+
+       update_rx = p_params->accept_flags.update_rx_mode_config;
+       update_tx = p_params->accept_flags.update_tx_mode_config;
+
+       if (update_rx || update_tx) {
+               tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+               size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+               p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (update_rx) {
+                       p_accept_tlv->update_rx_mode = update_rx;
+                       p_accept_tlv->rx_accept_filter =
+                           p_params->accept_flags.rx_accept_filter;
+               }
+
+               if (update_tx) {
+                       p_accept_tlv->update_tx_mode = update_tx;
+                       p_accept_tlv->tx_accept_filter =
+                           p_params->accept_flags.tx_accept_filter;
+               }
+       }
+
+       if (p_params->rss_params) {
+               struct ecore_rss_params *rss_params = p_params->rss_params;
+
+               size = sizeof(struct vfpf_vport_update_rss_tlv);
+               p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                         CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (rss_params->update_rss_config)
+                       p_rss_tlv->update_rss_flags |=
+                           VFPF_UPDATE_RSS_CONFIG_FLAG;
+               if (rss_params->update_rss_capabilities)
+                       p_rss_tlv->update_rss_flags |=
+                           VFPF_UPDATE_RSS_CAPS_FLAG;
+               if (rss_params->update_rss_ind_table)
+                       p_rss_tlv->update_rss_flags |=
+                           VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+               if (rss_params->update_rss_key)
+                       p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+               p_rss_tlv->rss_enable = rss_params->rss_enable;
+               p_rss_tlv->rss_caps = rss_params->rss_caps;
+               p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+               OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+                           sizeof(rss_params->rss_ind_table));
+               OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
+                           sizeof(rss_params->rss_key));
+       }
+
+       if (p_params->update_accept_any_vlan_flg) {
+               size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+               tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+               p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                              tlv, size);
+
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+               p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+               p_any_vlan_tlv->update_accept_any_vlan_flg =
+                   p_params->update_accept_any_vlan_flg;
+       }
+
+       if (p_params->sge_tpa_params) {
+               struct ecore_sge_tpa_params *sge_tpa_params =
+                   p_params->sge_tpa_params;
+
+               size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
+               p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+                                             CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+                                             size);
+               resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+               if (sge_tpa_params->update_tpa_en_flg)
+                       p_sge_tpa_tlv->update_sge_tpa_flags |=
+                           VFPF_UPDATE_TPA_EN_FLAG;
+               if (sge_tpa_params->update_tpa_param_flg)
+                       p_sge_tpa_tlv->update_sge_tpa_flags |=
+                           VFPF_UPDATE_TPA_PARAM_FLAG;
+
+               if (sge_tpa_params->tpa_ipv4_en_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
+               if (sge_tpa_params->tpa_ipv6_en_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
+               if (sge_tpa_params->tpa_pkt_split_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
+               if (sge_tpa_params->tpa_hdr_data_split_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |=
+                           VFPF_TPA_HDR_DATA_SPLIT_FLAG;
+               if (sge_tpa_params->tpa_gro_consistent_flg)
+                       p_sge_tpa_tlv->sge_tpa_flags |=
+                           VFPF_TPA_GRO_CONSIST_FLAG;
+
+               p_sge_tpa_tlv->tpa_max_aggs_num =
+                   sge_tpa_params->tpa_max_aggs_num;
+               p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
+               p_sge_tpa_tlv->tpa_min_size_to_start =
+                   sge_tpa_params->tpa_min_size_to_start;
+               p_sge_tpa_tlv->tpa_min_size_to_cont =
+                   sge_tpa_params->tpa_min_size_to_cont;
+
+               p_sge_tpa_tlv->max_buffers_per_cqe =
+                   sge_tpa_params->max_buffers_per_cqe;
+       }
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+       return rc;
+}
+
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_first_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_AGAIN;
+
+       p_hwfn->b_int_enabled = 0;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_first_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       u32 size;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+       if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+               rc = ECORE_AGAIN;
+
+       p_hwfn->b_int_enabled = 0;
+
+       /* TODO - might need to revise this for 100g */
+       if (IS_LEAD_HWFN(p_hwfn))
+               OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+
+       if (p_iov->vf2pf_request)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov->vf2pf_request,
+                                      p_iov->vf2pf_request_phys,
+                                      sizeof(union vfpf_tlvs));
+       if (p_iov->pf2vf_reply)
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov->pf2vf_reply,
+                                      p_iov->pf2vf_reply_phys,
+                                      sizeof(union pfvf_tlvs));
+
+       if (p_iov->bulletin.p_virt) {
+               size = sizeof(struct ecore_bulletin_content);
+               OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+                                      p_iov->bulletin.p_virt,
+                                      p_iov->bulletin.phys, size);
+       }
+
+       OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+       p_hwfn->vf_iov_info = OSAL_NULL;
+
+       return rc;
+}
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+                             struct ecore_filter_mcast *p_filter_cmd)
+{
+       struct ecore_sp_vport_update_params sp_params;
+       int i;
+
+       OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
+       sp_params.update_approx_mcast_flg = 1;
+
+       if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
+               for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+                       u32 bit;
+
+                       bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+                       OSAL_SET_BIT(bit, sp_params.bins);
+               }
+       }
+
+       ecore_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_filter_ucast
+                                             *p_ucast)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct vfpf_ucast_filter_tlv *req;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* Sanitize */
+       if (p_ucast->opcode == ECORE_FILTER_MOVE) {
+               DP_NOTICE(p_hwfn, true,
+                         "VFs don't support Moving of filters\n");
+               return ECORE_INVAL;
+       }
+
+       /* clear mailbox and prep first tlv */
+       req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+       req->opcode = (u8) p_ucast->opcode;
+       req->type = (u8) p_ucast->type;
+       OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
+       req->vlan = p_ucast->vlan;
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_AGAIN;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+       int rc;
+
+       /* clear mailbox and prep first tlv */
+       ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+                        sizeof(struct vfpf_first_tlv));
+
+       /* add list termination tlv */
+       ecore_add_tlv(p_hwfn, &p_iov->offset,
+                     CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+       if (rc)
+               return rc;
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+               return ECORE_INVAL;
+
+       return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+                                           u8 *p_change)
+{
+       struct ecore_bulletin_content shadow;
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+       u32 crc, crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+
+       *p_change = 0;
+
+       /* Need to guarantee PF is not in the middle of writing it */
+       OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+       /* If version did not update, no need to do anything */
+       if (shadow.version == p_iov->bulletin_shadow.version)
+               return ECORE_SUCCESS;
+
+       /* Verify the bulletin we see is valid */
+       crc = ecore_crc32(0, (u8 *) &shadow + crc_size,
+                         p_iov->bulletin.size - crc_size);
+       if (crc != shadow.crc)
+               return ECORE_AGAIN;
+
+       /* Set the shadow bulletin and process it */
+       OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "Read a bulletin update %08x\n", shadow.version);
+
+       *p_change = 1;
+
+       return ECORE_SUCCESS;
+}
+
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+{
+       struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+       if (!p_iov) {
+               DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+               return 0;
+       }
+
+       return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+                               struct ecore_mcp_link_params *p_params,
+                               struct ecore_bulletin_content *p_bulletin)
+{
+       OSAL_MEMSET(p_params, 0, sizeof(*p_params));
+
+       p_params->speed.autoneg = p_bulletin->req_autoneg;
+       p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+       p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+       p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+       p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+       p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+       p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+                             struct ecore_mcp_link_params *params)
+{
+       __ecore_vf_get_link_params(p_hwfn, params,
+                                  &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+                              struct ecore_mcp_link_state *p_link,
+                              struct ecore_bulletin_content *p_bulletin)
+{
+       OSAL_MEMSET(p_link, 0, sizeof(*p_link));
+
+       p_link->link_up = p_bulletin->link_up;
+       p_link->speed = p_bulletin->speed;
+       p_link->full_duplex = p_bulletin->full_duplex;
+       p_link->an = p_bulletin->autoneg;
+       p_link->an_complete = p_bulletin->autoneg_complete;
+       p_link->parallel_detection = p_bulletin->parallel_detection;
+       p_link->pfc_enabled = p_bulletin->pfc_enabled;
+       p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+       p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+       p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+       p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+       p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+                            struct ecore_mcp_link_state *link)
+{
+       __ecore_vf_get_link_state(p_hwfn, link,
+                                 &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+                             struct ecore_mcp_link_capabilities *p_link_caps,
+                             struct ecore_bulletin_content *p_bulletin)
+{
+       OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
+       p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+                           struct ecore_mcp_link_capabilities *p_link_caps)
+{
+       __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+                                &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
+{
+       *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
+{
+       OSAL_MEMCPY(port_mac,
+                   p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
+                   ETH_ALEN);
+}
+
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+                                  u8 *num_vlan_filters)
+{
+       struct ecore_vf_iov *p_vf;
+
+       p_vf = p_hwfn->vf_iov_info;
+       *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
+{
+       struct ecore_bulletin_content *bulletin;
+
+       bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+       if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+               return true;
+
+       /* Forbid VF from changing a MAC enforced by PF */
+       if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
+               return false;
+
+       return false;
+}
+
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+                                     u8 *p_is_forced)
+{
+       struct ecore_bulletin_content *bulletin;
+
+       bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+       if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+               if (p_is_forced)
+                       *p_is_forced = 1;
+       } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+               if (p_is_forced)
+                       *p_is_forced = 0;
+       } else {
+               return false;
+       }
+
+       OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
+
+       return true;
+}
+
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
+{
+       struct ecore_bulletin_content *bulletin;
+
+       bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+       if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+               return false;
+
+       if (dst_pvid)
+               *dst_pvid = bulletin->pvid;
+
+       return true;
+}
+
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+                            u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
+                            u16 *fw_eng)
+{
+       struct pf_vf_pfdev_info *info;
+
+       info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+       *fw_major = info->fw_major;
+       *fw_minor = info->fw_minor;
+       *fw_rev = info->fw_rev;
+       *fw_eng = info->fw_eng;
+}
diff --git a/drivers/net/qede/ecore/ecore_vf.h 
b/drivers/net/qede/ecore/ecore_vf.h
new file mode 100644
index 0000000..a006dac
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_vf.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_H__
+#define __ECORE_VF_H__
+
+#include "ecore_status.h"
+#include "ecore_vf_api.h"
+#include "ecore_l2_api.h"
+#include "ecore_vfpf_if.h"
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ *
+ * @brief hw preparation for VF
+ *     sends ACQUIRE message
+ *
+ * @param p_dev
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev);
+
+/**
+ *
+ * @brief VF init in hw (equivalent to hw_init in PF)
+ *      mark interrupts as enabled
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn);
+
+/**
+ *
+ * @brief VF - start the RX Queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param cid                  - zero based within the VF
+ * @param rx_queue_id          - zero based within the VF
+ * @param sb                   - VF status block for this queue
+ * @param sb_index             - Index within the status block
+ * @param bd_max_bytes         - maximum number of bytes per bd
+ * @param bd_chain_phys_addr   - physical address of bd chain
+ * @param cqe_pbl_addr         - physical address of pbl
+ * @param cqe_pbl_size         - pbl size
+ * @param pp_prod              - pointer to the producer to be
+ *         used in fasthwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+                                          u8 rx_queue_id,
+                                          u16 sb,
+                                          u8 sb_index,
+                                          u16 bd_max_bytes,
+                                          dma_addr_t bd_chain_phys_addr,
+                                          dma_addr_t cqe_pbl_addr,
+                                          u16 cqe_pbl_size,
+                                          void OSAL_IOMEM **pp_prod);
+
+/**
+ *
+ * @brief VF - start the TX queue by sending a message to the
+ *        PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id          - zero based within the VF
+ * @param sb                   - status block for this queue
+ * @param sb_index             - index within the status block
+ * @param bd_chain_phys_addr   - physical address of tx chain
+ * @param pp_doorbell          - pointer to address to which to
+ *             write the doorbell too..
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+                                          u16 tx_queue_id,
+                                          u16 sb,
+                                          u8 sb_index,
+                                          dma_addr_t pbl_addr,
+                                          u16 pbl_size,
+                                          void OSAL_IOMEM **pp_doorbell);
+
+/**
+ *
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+                                         u16 rx_qid, bool cqe_completion);
+
+/**
+ *
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+                                         u16 tx_qid);
+
+/**
+ * @brief VF - update the RX queue by sending a message to the
+ *        PF
+ *
+ * @param p_hwfn
+ * @param rx_queue_id
+ * @param num_rxqs
+ * @param init_sge_ring
+ * @param comp_cqe_flg
+ * @param comp_event_flg
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+                                            u16 rx_queue_id,
+                                            u8 num_rxqs,
+                                            u8 comp_cqe_flg,
+                                            u8 comp_event_flg);
+
+/**
+ *
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
+                        struct ecore_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
+
+/**
+ *
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
+
+/**
+ *
+ * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ *        sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief ecore_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn,
+                                            u8 vport_id,
+                                            u16 mtu,
+                                            u8 inner_vlan_removal,
+                                            enum ecore_tpa_mode tpa_mode,
+                                            u8 max_buffers_per_cqe,
+                                            u8 only_untagged);
+
+/**
+ * @brief ecore_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
+
+enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_filter_ucast
+                                             *p_param);
+
+void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+                             struct ecore_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+                               struct ecore_mcp_link_params *p_params,
+                               struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+                              struct ecore_mcp_link_state *p_link,
+                              struct ecore_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+                             struct ecore_mcp_link_capabilities *p_link_caps,
+                             struct ecore_bulletin_content *p_bulletin);
+
+#else
+static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev
+                                                           *p_dev)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn
+                                                        *p_hwfn)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn
+                                                             *p_hwfn,
+                                                             u8 rx_queue_id,
+                                                             u16 sb,
+                                                             u8 sb_index,
+                                                             u16 bd_max_bytes,
+                                                             dma_addr_t
+                                                             bd_chain_phys_adr,
+                                                             dma_addr_t
+                                                             cqe_pbl_addr,
+                                                             u16 cqe_pbl_size,
+                                                             void OSAL_IOMEM **
+                                                             pp_prod)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn
+                                                             *p_hwfn,
+                                                             u16 tx_queue_id,
+                                                             u16 sb,
+                                                             u8 sb_index,
+                                                             dma_addr_t
+                                                             pbl_addr,
+                                                             u16 pbl_size,
+                                                             void OSAL_IOMEM **
+                                                             pp_doorbell)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn
+                                                            *p_hwfn,
+                                                            u16 rx_qid,
+                                                            bool
+                                                            cqe_completion)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn
+                                                            *p_hwfn,
+                                                            u16 tx_qid)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct
+                                                               ecore_hwfn
+                                                               *p_hwfn,
+                                                               u16 rx_queue_id,
+                                                               u8 num_rxqs,
+                                                               u8 comp_cqe_flg,
+                                                               u8
+                                                               comp_event_flg)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(
+       struct ecore_hwfn *p_hwfn,
+       struct ecore_sp_vport_update_params *p_params)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn
+                                                         *p_hwfn)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn
+                                                           *p_hwfn)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+                                             u16 sb_id)
+{
+       return 0;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(
+       struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu,
+       u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode,
+       u8 max_buffers_per_cqe, u8 only_untagged)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(
+       struct ecore_hwfn *p_hwfn)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(
+        struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_filter_mcast
+                                                *p_filter_cmd)
+{
+}
+
+static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct
+                                                               ecore_hwfn
+                                                               *p_hwfn)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+                                                  struct ecore_mcp_link_params
+                                                  *p_params,
+                                                  struct ecore_bulletin_content
+                                                  *p_bulletin)
+{
+}
+
+static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+                                                 struct ecore_mcp_link_state
+                                                 *p_link,
+                                                 struct ecore_bulletin_content
+                                                 *p_bulletin)
+{
+}
+
+static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+                                                struct
+                                                ecore_mcp_link_capabilities
+                                                *p_link_caps,
+                                                struct ecore_bulletin_content
+                                                *p_bulletin)
+{
+}
+#endif
+
+#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/ecore/ecore_vf_api.h 
b/drivers/net/qede/ecore/ecore_vf_api.h
new file mode 100644
index 0000000..47a58a1
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_vf_api.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_API_H__
+#define __ECORE_VF_API_H__
+
+#include "ecore_sp_api.h"
+#include "ecore_mcp_api.h"
+
+#ifdef CONFIG_ECORE_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _ecore_status
+ */
+enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
+                                           u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from ecore
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+                             struct ecore_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from ecore
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+                            struct ecore_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from ecore
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+                           struct ecore_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated RX queues
+ */
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by ecore
+ *
+ *  @param p_hwfn
+ *  @param num_rxqs - allocated VLAN filters
+ */
+void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
+                                  u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Copy forced MAC address from bulletin board
+ *
+ * @param hwfn
+ * @param dst_mac
+ * @param p_is_forced - out param which indicate in case mac
+ *             exist if it forced or not.
+ *
+ * @return bool       - return true if mac exist and false if
+ *                      not.
+ */
+bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
+                                     u8 *p_is_forced);
+
+/**
+ * @brief Check if force vlan is set and copy the forced vlan
+ *        from bulletin board
+ *
+ * @param hwfn
+ * @param dst_pvid
+ * @return bool
+ */
+bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire 
response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+                            u16 *fw_major,
+                            u16 *fw_minor, u16 *fw_rev, u16 *fw_eng);
+#else
+static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct 
ecore_hwfn
+                                                              *p_hwfn,
+                                                              u8 *p_change)
+{
+       return ECORE_INVAL;
+}
+
+static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
+                                                struct ecore_mcp_link_params
+                                                *params)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
+                                               struct ecore_mcp_link_state
+                                               *link)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
+                                              struct
+                                              ecore_mcp_link_capabilities
+                                              *p_link_caps)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
+                                             u8 *num_rxqs)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
+                                             u8 *port_mac)
+{
+}
+
+static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn 
*p_hwfn,
+                                                     u8 *num_vlan_filters)
+{
+}
+
+static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
+{
+       return false;
+}
+
+static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn
+                                                        *hwfn, u8 *dst_mac,
+                                                        u8 *p_is_forced)
+{
+       return false;
+}
+
+static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
+                                               u16 *fw_major, u16 *fw_minor,
+                                               u16 *fw_rev, u16 *fw_eng)
+{
+}
+#endif
+#endif
diff --git a/drivers/net/qede/ecore/ecore_vfpf_if.h 
b/drivers/net/qede/ecore/ecore_vfpf_if.h
new file mode 100644
index 0000000..d5b671d
--- /dev/null
+++ b/drivers/net/qede/ecore/ecore_vfpf_if.h
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_VF_PF_IF_H__
+#define __ECORE_VF_PF_IF_H__
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+#ifndef aligned_u64
+#define aligned_u64 u64
+#endif
+
+/***********************************************
+ *
+ * Common definitions for all HVs
+ *
+ **/
+struct vf_pf_resc_request {
+       u8 num_rxqs;
+       u8 num_txqs;
+       u8 num_sbs;
+       u8 num_mac_filters;
+       u8 num_vlan_filters;
+       u8 num_mc_filters;      /* No limit  so superfluous */
+       u16 padding;
+};
+
+struct hw_sb_info {
+       u16 hw_sb_id;           /* aka absolute igu id, used to ack the sb */
+       u8 sb_qid;              /* used to update DHC for sb */
+       u8 padding[5];
+};
+
+/***********************************************
+ *
+ * HW VF-PF channel definitions
+ *
+ * A.K.A VF-PF mailbox
+ *
+ **/
+#define TLV_BUFFER_SIZE                1024
+#define TLV_ALIGN              sizeof(u64)
+#define PF_VF_BULLETIN_SIZE    512
+
+#define VFPF_RX_MASK_ACCEPT_NONE               0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST     0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST   0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST        0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST       0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST  0x00000010
+/* TODO: #define VFPF_RX_MASK_ACCEPT_ANY_VLAN   0x00000020 */
+
+#define BULLETIN_CONTENT_SIZE  (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_ATTEMPTS       5      /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED       0
+
+enum {
+       PFVF_STATUS_WAITING = 0,
+       PFVF_STATUS_SUCCESS,
+       PFVF_STATUS_FAILURE,
+       PFVF_STATUS_NOT_SUPPORTED,
+       PFVF_STATUS_NO_RESOURCE,
+       PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+       u16 type;
+       u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+       struct channel_tlv tl;
+       u32 padding;
+       aligned_u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+       struct channel_tlv tl;
+       u8 status;
+       u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+       struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+       struct channel_tlv tl;
+       u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       struct vf_pf_vfdev_info {
+               aligned_u64 capabilties;
+               u8 fw_major;
+               u8 fw_minor;
+               u8 fw_revision;
+               u8 fw_engineering;
+               u32 driver_version;
+               u16 opaque_fid; /* ME register value */
+               u8 os_type;     /* VFPF_ACQUIRE_OS_* value */
+               u8 padding[5];
+       } vfdev_info;
+
+       struct vf_pf_resc_request resc_request;
+
+       aligned_u64 bulletin_addr;
+       u32 bulletin_size;
+       u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+       struct channel_tlv tl;
+
+       u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG      (1 << 0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG        (1 << 1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG   (1 << 2)
+#define VFPF_UPDATE_RSS_KEY_FLAG         (1 << 3)
+
+       u8 rss_enable;
+       u8 rss_caps;
+       u8 rss_table_size_log;  /* The table size is 2 ^ rss_table_size_log */
+       u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+       u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+       u32 address;
+       u32 len;
+};
+
+struct pfvf_stats_info {
+       struct pfvf_storm_stats mstats;
+       struct pfvf_storm_stats pstats;
+       struct pfvf_storm_stats tstats;
+       struct pfvf_storm_stats ustats;
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+       struct pfvf_tlv hdr;
+
+       struct pf_vf_pfdev_info {
+               u32 chip_num;
+               u32 mfw_ver;
+
+               u16 fw_major;
+               u16 fw_minor;
+               u16 fw_rev;
+               u16 fw_eng;
+
+               aligned_u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED      (1 << 0)
+
+               u16 db_size;
+               u8 indices_per_sb;
+               u8 os_type;
+
+               /* Thesee should match the PF's ecore_dev values */
+               u16 chip_rev;
+               u8 dev_type;
+
+               u8 padding;
+
+               struct pfvf_stats_info stats_info;
+
+               u8 port_mac[ETH_ALEN];
+               u8 padding2[2];
+       } pfdev_info;
+
+       struct pf_vf_resc {
+               /* in case of status NO_RESOURCE in message hdr, pf will fill
+                * this struct with suggested amount of resources for next
+                * acquire request
+                */
+#define PFVF_MAX_QUEUES_PER_VF         16
+#define PFVF_MAX_SBS_PER_VF            16
+               struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+               u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+               u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+               u8 num_rxqs;
+               u8 num_txqs;
+               u8 num_sbs;
+               u8 num_mac_filters;
+               u8 num_vlan_filters;
+               u8 num_mc_filters;
+               u8 padding[2];
+       } resc;
+
+       u32 bulletin_size;
+       u32 padding;
+};
+
+/* Init VF */
+struct vfpf_init_tlv {
+       struct vfpf_first_tlv first_tlv;
+       aligned_u64 stats_addr;
+
+       u16 rx_mask;
+       u16 tx_mask;
+       u8 drop_ttl0_flg;
+       u8 padding[3];
+
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       /* physical addresses */
+       aligned_u64 rxq_addr;
+       aligned_u64 deprecated_sge_addr;
+       aligned_u64 cqe_pbl_addr;
+
+       u16 cqe_pbl_size;
+       u16 hw_sb;
+       u16 rx_qid;
+       u16 hc_rate;            /* desired interrupts per sec. */
+
+       u16 bd_max_bytes;
+       u16 stat_id;
+       u8 sb_index;
+       u8 padding[3];
+
+};
+
+struct vfpf_start_txq_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       /* physical addresses */
+       aligned_u64 pbl_addr;
+       u16 pbl_size;
+       u16 stat_id;
+       u16 tx_qid;
+       u16 hw_sb;
+
+       u32 flags;              /* VFPF_QUEUE_FLG_X flags */
+       u16 hc_rate;            /* desired interrupts per sec. */
+       u8 sb_index;
+       u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u16 rx_qid;
+       u8 num_rxqs;
+       u8 cqe_completion;
+       u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u16 tx_qid;
+       u8 num_txqs;
+       u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       aligned_u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+       u16 rx_qid;
+       u8 num_rxqs;
+       u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG   (1 << 0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG         (1 << 1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG       (1 << 2)
+
+       u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+       u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID    0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID    0x02
+#define VFPF_Q_FILTER_SET_MAC  0x100   /* set/clear */
+
+       u8 mac[ETH_ALEN];
+       u16 vlan_tag;
+
+       u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+       u32 tpa_mode;
+       u16 dep1;
+       u16 mtu;
+
+       u8 vport_id;
+       u8 inner_vlan_removal;
+
+       u8 only_untagged;
+       u8 max_buffers_per_cqe;
+
+       u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+       struct channel_tlv tl;
+       u8 update_rx;
+       u8 update_tx;
+       u8 active_rx;
+       u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+       struct channel_tlv tl;
+       u8 tx_switching;
+       u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+       struct channel_tlv tl;
+       u8 remove_vlan;
+       u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+       struct channel_tlv tl;
+       u8 padding[4];
+
+       aligned_u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+       struct channel_tlv tl;
+       u8 update_rx_mode;
+       u8 update_tx_mode;
+       u8 rx_accept_filter;
+       u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+       struct channel_tlv tl;
+       u8 update_accept_any_vlan_flg;
+       u8 accept_any_vlan;
+
+       u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+       struct channel_tlv tl;
+
+       u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG       (1 << 0)
+#define VFPF_TPA_IPV6_EN_FLAG        (1 << 1)
+#define VFPF_TPA_PKT_SPLIT_FLAG      (1 << 2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
+#define VFPF_TPA_GRO_CONSIST_FLAG    (1 << 4)
+
+       u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG           (1 << 0)
+#define VFPF_UPDATE_TPA_EN_FLAG    (1 << 1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
+
+       u8 max_buffers_per_cqe;
+
+       u16 deprecated_sge_buff_size;
+       u16 tpa_max_size;
+       u16 tpa_min_size_to_start;
+       u16 tpa_min_size_to_cont;
+
+       u8 tpa_max_aggs_num;
+       u8 padding[7];
+
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+       struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+       struct vfpf_first_tlv first_tlv;
+
+       u8 opcode;
+       u8 type;
+
+       u8 mac[ETH_ALEN];
+
+       u16 vlan;
+       u16 padding[3];
+};
+
+struct tlv_buffer_size {
+       u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+       struct vfpf_first_tlv first_tlv;
+       struct vfpf_acquire_tlv acquire;
+       struct vfpf_init_tlv init;
+       struct vfpf_start_rxq_tlv start_rxq;
+       struct vfpf_start_txq_tlv start_txq;
+       struct vfpf_stop_rxqs_tlv stop_rxqs;
+       struct vfpf_stop_txqs_tlv stop_txqs;
+       struct vfpf_update_rxq_tlv update_rxq;
+       struct vfpf_vport_start_tlv start_vport;
+       struct vfpf_vport_update_tlv vport_update;
+       struct vfpf_ucast_filter_tlv ucast_filter;
+       struct channel_list_end_tlv list_end;
+       struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+       struct pfvf_def_resp_tlv default_resp;
+       struct pfvf_acquire_resp_tlv acquire_resp;
+       struct channel_list_end_tlv list_end;
+       struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+enum ecore_bulletin_bit {
+       /* Alert the VF that a forced MAC was set by the PF */
+       MAC_ADDR_FORCED = 0,
+
+       /* The VF should not access the vfpf channel */
+       VFPF_CHANNEL_INVALID = 1,
+
+       /* Alert the VF that a forced VLAN was set by the PF */
+       VLAN_ADDR_FORCED = 2,
+
+       /* Indicate that `default_only_untagged' contains actual data */
+       VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+       VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+       /* Alert the VF that suggested mac was sent by the PF.
+        * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set */
+       VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct ecore_bulletin_content {
+       u32 crc;                /* crc of structure to ensure is not in
+                                * mid-update
+                                */
+       u32 version;
+
+       aligned_u64 valid_bitmap;       /* bitmap indicating wich fields
+                                        * hold valid values
+                                        */
+
+       u8 mac[ETH_ALEN];       /* used for MAC_ADDR or MAC_ADDR_FORCED */
+
+       u8 default_only_untagged;       /* If valid, 1 => only untagged Rx
+                                        * if no vlan filter is configured.
+                                        */
+       u8 padding;
+
+       /* The following is a 'copy' of ecore_mcp_link_state,
+        * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
+        * possible the structs will increase further along the road we cannot
+        * have it here; Instead we need to have all of its fields.
+        */
+       u8 req_autoneg;
+       u8 req_autoneg_pause;
+       u8 req_forced_rx;
+       u8 req_forced_tx;
+       u8 padding2[4];
+
+       u32 req_adv_speed;
+       u32 req_forced_speed;
+       u32 req_loopback;
+       u32 padding3;
+
+       u8 link_up;
+       u8 full_duplex;
+       u8 autoneg;
+       u8 autoneg_complete;
+       u8 parallel_detection;
+       u8 pfc_enabled;
+       u8 partner_tx_flow_ctrl_en;
+       u8 partner_rx_flow_ctrl_en;
+       u8 partner_adv_pause;
+       u8 sfp_tx_fault;
+       u8 padding4[6];
+
+       u32 speed;
+       u32 partner_adv_speed;
+
+       u32 capability_speed;
+
+       /* Forced vlan */
+       u16 pvid;
+       u16 padding5;
+};
+
+struct ecore_bulletin {
+       dma_addr_t phys;
+       struct ecore_bulletin_content *p_virt;
+       u32 size;
+};
+
+#ifndef print_enum
+enum {
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+
+       CHANNEL_TLV_NONE,       /* ends tlv sequence */
+       CHANNEL_TLV_ACQUIRE,
+       CHANNEL_TLV_VPORT_START,
+       CHANNEL_TLV_VPORT_UPDATE,
+       CHANNEL_TLV_VPORT_TEARDOWN,
+       CHANNEL_TLV_START_RXQ,
+       CHANNEL_TLV_START_TXQ,
+       CHANNEL_TLV_STOP_RXQS,
+       CHANNEL_TLV_STOP_TXQS,
+       CHANNEL_TLV_UPDATE_RXQ,
+       CHANNEL_TLV_INT_CLEANUP,
+       CHANNEL_TLV_CLOSE,
+       CHANNEL_TLV_RELEASE,
+       CHANNEL_TLV_LIST_END,
+       CHANNEL_TLV_UCAST_FILTER,
+       CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+       CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+       CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+       CHANNEL_TLV_VPORT_UPDATE_MCAST,
+       CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+       CHANNEL_TLV_VPORT_UPDATE_RSS,
+       CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+       CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+       CHANNEL_TLV_MAX
+/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
+};
+extern const char *ecore_channel_tlvs_string[];
+
+#else
+print_enum(channel_tlvs, CHANNEL_TLV_NONE,     /* ends tlv sequence */
+          CHANNEL_TLV_ACQUIRE,
+          CHANNEL_TLV_VPORT_START,
+          CHANNEL_TLV_VPORT_UPDATE,
+          CHANNEL_TLV_VPORT_TEARDOWN,
+          CHANNEL_TLV_SETUP_RXQ,
+          CHANNEL_TLV_SETUP_TXQ,
+          CHANNEL_TLV_STOP_RXQS,
+          CHANNEL_TLV_STOP_TXQS,
+          CHANNEL_TLV_UPDATE_RXQ,
+          CHANNEL_TLV_INT_CLEANUP,
+          CHANNEL_TLV_CLOSE,
+          CHANNEL_TLV_RELEASE,
+          CHANNEL_TLV_LIST_END,
+          CHANNEL_TLV_UCAST_FILTER,
+          CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+          CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+          CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+          CHANNEL_TLV_VPORT_UPDATE_MCAST,
+          CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+          CHANNEL_TLV_VPORT_UPDATE_RSS,
+          CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+          CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX);
+#endif
+
+#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/drivers/net/qede/ecore/eth_common.h 
b/drivers/net/qede/ecore/eth_common.h
new file mode 100644
index 0000000..046bbb2
--- /dev/null
+++ b/drivers/net/qede/ecore/eth_common.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ETH_COMMON__
+#define __ETH_COMMON__
+/********************/
+/* ETH FW CONSTANTS */
+/********************/
+#define ETH_CACHE_LINE_SIZE                 64
+#define ETH_RX_CQE_GAP                                         32
+#define ETH_MAX_RAMROD_PER_CON                         8
+#define ETH_TX_BD_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES                      4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES                     4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS                       2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                         1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                      18
+#define ETH_TX_MAX_LSO_HDR_NBD                                         4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT                                     3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT      3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT           2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE         2
+#define ETH_TX_MAX_NON_LSO_PKT_LEN                  (9700 - (4 + 12 + 8))
+#define ETH_TX_MAX_LSO_HDR_BYTES                    510
+#define ETH_TX_LSO_WINDOW_BDS_NUM                   18
+#define ETH_TX_LSO_WINDOW_MIN_LEN                   9700
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN                  0xFFFF
+
+#define ETH_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
+
+#define ETH_RX_MAX_BUFF_PER_PKT             5
+
+/* num of MAC/VLAN filters */
+#define ETH_NUM_MAC_FILTERS                                    512
+#define ETH_NUM_VLAN_FILTERS                           512
+
+/* approx. multicast constants */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED            0
+#define ETH_MULTICAST_MAC_BINS                         256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS         (ETH_MULTICAST_MAC_BINS / 32)
+
+/*  ethernet vport update constants */
+#define ETH_FILTER_RULES_COUNT                         10
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM          128
+#define ETH_RSS_KEY_SIZE_REGS                      10
+#define ETH_RSS_ENGINE_NUM_K2               207
+#define ETH_RSS_ENGINE_NUM_BB               127
+
+/* TPA constants */
+#define ETH_TPA_MAX_AGGS_NUM              64
+#define ETH_TPA_CQE_START_LEN_LIST_SIZE   ETH_RX_MAX_BUFF_PER_PKT
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE    6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE     4
+
+/*
+ * Interrupt coalescing TimeSet
+ */
+struct coalescing_timeset {
+       u8 timeset;
+       u8 valid /* Only if this flag is set, timeset will take effect */;
+};
+
+/*
+ * Destination port mode
+ */
+enum dest_port_mode {
+       DEST_PORT_PHY /* Send to physical port. */,
+       DEST_PORT_LOOPBACK /* Send to loopback port. */,
+       DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
+       DEST_PORT_DROP /* Drop the packet in PBF. */,
+       MAX_DEST_PORT_MODE
+};
+
+/*
+ * Ethernet address type
+ */
+enum eth_addr_type {
+       BROADCAST_ADDRESS,
+       MULTICAST_ADDRESS,
+       UNICAST_ADDRESS,
+       UNKNOWN_ADDRESS,
+       MAX_ETH_ADDR_TYPE
+};
+
+struct eth_tx_1st_bd_flags {
+       u8 bitfields;
+#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK         0x1
+#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT        0
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK  0x1
+#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT         2
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK          0x1
+#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT         3
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK   0x1
+#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT  4
+#define ETH_TX_1ST_BD_FLAGS_LSO_MASK              0x1
+#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT             5
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT    6
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK     0x1
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT    7
+};
+
+/*
+ * The parsing information data for the first tx bd of a given packet.
+ */
+struct eth_tx_data_1st_bd {
+       __le16 vlan /* VLAN to insert to packet (if needed). */;
+               /* Number of BDs in packet. Should be at least 2 in non-LSO
+               * packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet.
+               */
+       u8 nbds;
+       struct eth_tx_1st_bd_flags bd_flags;
+       __le16 bitfields;
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK  0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK          0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT         1
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK        0x3FFF
+#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT       2
+};
+
+/*
+ * The parsing information data for the second tx bd of a given packet.
+ */
+struct eth_tx_data_2nd_bd {
+       __le16 tunn_ip_size;
+       __le16 bitfields1;
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK  0xF
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK       0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT      4
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK            0x3
+#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT           6
+#define ETH_TX_DATA_2ND_BD_START_BD_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT                 8
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK                 0x3
+#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT                9
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK           0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT          11
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK                  0x1
+#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT                 12
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK             0x1
+#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT            13
+#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK                    0x1
+#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT                   14
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK       0x1
+#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT      15
+       __le16 bitfields2;
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK     0x1FFF
+#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT    0
+#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK                 0x7
+#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT                13
+};
+
+/*
+ * Firmware data for L2-EDPM packet.
+ */
+struct eth_edpm_fw_data {
+       struct eth_tx_data_1st_bd data_1st_bd
+           /* Parsing information data from the 1st BD. */;
+       struct eth_tx_data_2nd_bd data_2nd_bd
+           /* Parsing information data from the 2nd BD. */;
+       __le32 reserved;
+};
+
+/*
+ * FW debug.
+ */
+struct eth_fast_path_cqe_fw_debug {
+       u8 reserved0 /* FW reserved. */;
+       u8 reserved1 /* FW reserved. */;
+       __le16 reserved2 /* FW reserved. */;
+};
+
+struct tunnel_parsing_flags {
+       u8 flags;
+#define TUNNEL_PARSING_FLAGS_TYPE_MASK              0x3
+#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT             0
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK  0x1
+#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK     0x3
+#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT    3
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK   0x1
+#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT  5
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK     0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT    6
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK      0x1
+#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT     7
+};
+
+/*
+ * Regular ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_reg_cqe {
+       u8 type /* CQE type */;
+       u8 bitfields;
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT     7
+       __le16 pkt_len /* Total packet length (from the parser) */;
+       struct parsing_and_err_flags pars_flags
+           /* Parsing and error flags from the parser */;
+       __le16 vlan_tag /* 802.1q VLAN tag */;
+       __le32 rss_hash /* RSS hash result */;
+       __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+       u8 placement_offset /* Offset of placement from BD start */;
+       struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
+         ;
+       u8 bd_num /* Number of BDs, used for packet */;
+       u8 reserved[7];
+       struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+       u8 reserved1[3];
+       u8 flags;
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK          0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT         0
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK   0x1
+#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT  1
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK      0x3F
+#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT     2
+};
+
+/*
+ * TPA-continue ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_cont_cqe {
+       u8 type /* CQE type */;
+       u8 tpa_agg_index /* TPA aggregation index */;
+       __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]
+           /* List of the segment sizes */;
+       u8 reserved[5];
+       u8 reserved1 /* FW reserved. */;
+       __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
+};
+
+/*
+ * TPA-end ETH Rx FP CQE .
+ */
+struct eth_fast_path_rx_tpa_end_cqe {
+       u8 type /* CQE type */;
+       u8 tpa_agg_index /* TPA aggregation index */;
+       __le16 total_packet_len /* Total aggregated packet length */;
+       u8 num_of_bds /* Total number of BDs comprising the packet */;
+       u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */
+         ;
+       __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
+       __le32 ts_delta /* TCP timestamp delta */;
+       __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]
+           /* List of the segment sizes */;
+       u8 reserved1[3];
+       u8 reserved2 /* FW reserved. */;
+       __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
+};
+
+/*
+ * TPA-start ETH Rx FP CQE.
+ */
+struct eth_fast_path_rx_tpa_start_cqe {
+       u8 type /* CQE type */;
+       u8 bitfields;
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK  0x7
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK             0xF
+#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT            3
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK      0x1
+#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT     7
+       __le16 seg_len /* Segment length (packetLen from the parser) */;
+       struct parsing_and_err_flags pars_flags
+           /* Parsing and error flags from the parser */;
+       __le16 vlan_tag /* 802.1q VLAN tag */;
+       __le32 rss_hash /* RSS hash result */;
+       __le16 len_on_first_bd /* Number of bytes placed on first BD */;
+       u8 placement_offset /* Offset of placement from BD start */;
+       struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
+         ;
+       u8 tpa_agg_index /* TPA aggregation index */;
+       u8 header_len /* Packet L2+L3+L4 header length */;
+       __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]
+           /* Additional BDs length list. */;
+       struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+};
+
+/*
+ * The L4 pseudo checksum mode for Ethernet
+ */
+enum eth_l4_pseudo_checksum_mode {
+       ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
+               /* Pseudo Header checksum on packet is calculated
+                * with the correct packet length field.
+               */
+          ,
+       ETH_L4_PSEUDO_CSUM_ZERO_LENGTH
+           /* Pseudo Hdr checksum on packet is calc with zero len field. */
+          ,
+       MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
+};
+
+struct eth_rx_bd {
+       struct regpair addr /* single continues buffer */;
+};
+
+/*
+ * regular ETH Rx SP CQE
+ */
+struct eth_slow_path_rx_cqe {
+       u8 type /* CQE type */;
+       u8 ramrod_cmd_id;
+       u8 error_flag;
+       u8 reserved[25];
+       __le16 echo;
+       u8 reserved1;
+       u8 flags;
+#define ETH_SLOW_PATH_RX_CQE_VALID_MASK         0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT        0
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK  0x1
+#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK     0x3F
+#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT    2
+};
+
+/*
+ * union for all ETH Rx CQE types
+ */
+union eth_rx_cqe {
+       struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */;
+       struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start
+           /* TPA-start CQE */;
+       struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont
+           /* TPA-continue CQE */;
+       struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */
+         ;
+       struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
+};
+
+/*
+ * ETH Rx CQE type
+ */
+enum eth_rx_cqe_type {
+       ETH_RX_CQE_TYPE_UNUSED,
+       ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
+       ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
+       MAX_ETH_RX_CQE_TYPE
+};
+
+/*
+ * Wrapp for PD RX CQE used in order to cover full cache line when writing CQE
+ */
+struct eth_rx_pmd_cqe {
+       union eth_rx_cqe cqe /* CQE data itself */;
+       u8 reserved[ETH_RX_CQE_GAP];
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_prod_data {
+       __le16 bd_prod /* BD producer */;
+       __le16 cqe_prod /* CQE producer */;
+       __le16 reserved;
+       __le16 reserved1 /* FW reserved. */;
+};
+
+/*
+ * Aggregation end reason.
+ */
+enum eth_tpa_end_reason {
+       ETH_AGG_END_UNUSED,
+       ETH_AGG_END_SP_UPDATE /* SP configuration update */,
+       ETH_AGG_END_MAX_LEN
+           /* Maximum aggregation length or maximum buffer number used. */,
+       ETH_AGG_END_LAST_SEG
+           /* TCP PSH flag or TCP payload length below continue threshold. */,
+       ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
+       ETH_AGG_END_NOT_CONSISTENT,
+       ETH_AGG_END_OUT_OF_ORDER,
+       ETH_AGG_END_NON_TPA_SEG,
+       MAX_ETH_TPA_END_REASON
+};
+
+/*
+ * Eth Tunnel Type
+ */
+enum eth_tunn_type {
+       ETH_TUNN_GENEVE /* GENEVE Tunnel. */,
+       ETH_TUNN_TTAG /* T-Tag Tunnel. */,
+       ETH_TUNN_GRE /* GRE Tunnel. */,
+       ETH_TUNN_VXLAN /* VXLAN Tunnel. */,
+       MAX_ETH_TUNN_TYPE
+};
+
+/*
+ * The first tx bd of a given packet
+ */
+struct eth_tx_1st_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_1st_bd data /* Parsing information data. */;
+};
+
+/*
+ * The second tx bd of a given packet
+ */
+struct eth_tx_2nd_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_2nd_bd data /* Parsing information data. */;
+};
+
+/*
+ * The parsing information data for the third tx bd of a given packet.
+ */
+struct eth_tx_data_3rd_bd {
+       __le16 lso_mss /* For LSO packet - the MSS in bytes. */;
+       __le16 bitfields;
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK  0xF
+#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK         0xF
+#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT        4
+#define ETH_TX_DATA_3RD_BD_START_BD_MASK        0x1
+#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT       8
+#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK       0x7F
+#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT      9
+       u8 tunn_l4_hdr_start_offset_w;
+       u8 tunn_hdr_size_w;
+};
+
+/*
+ * The third tx bd of a given packet
+ */
+struct eth_tx_3rd_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_3rd_bd data /* Parsing information data. */;
+};
+
+/*
+ * Complementary information for the regular tx bd of a given packet.
+ */
+struct eth_tx_data_bd {
+       __le16 reserved0;
+       __le16 bitfields;
+#define ETH_TX_DATA_BD_RESERVED1_MASK  0xFF
+#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+#define ETH_TX_DATA_BD_START_BD_MASK   0x1
+#define ETH_TX_DATA_BD_START_BD_SHIFT  8
+#define ETH_TX_DATA_BD_RESERVED2_MASK  0x7F
+#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
+       __le16 reserved3;
+};
+
+/*
+ * The common regular TX BD ring element
+ */
+struct eth_tx_bd {
+       struct regpair addr /* Single continuous buffer */;
+       __le16 nbytes /* Number of bytes in this BD. */;
+       struct eth_tx_data_bd data /* Complementary information. */;
+};
+
+union eth_tx_bd_types {
+       struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
+       struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */
+         ;
+       struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
+       struct eth_tx_bd reg_bd /* The common non-special bd */;
+};
+
+/*
+ * Mstorm Queue Zone
+ */
+struct mstorm_eth_queue_zone {
+       struct eth_rx_prod_data rx_producers;
+       __le32 reserved[2];
+};
+
+/*
+ * Ustorm Queue Zone
+ */
+struct ustorm_eth_queue_zone {
+       struct coalescing_timeset int_coalescing_timeset
+           /* Rx interrupt coalescing TimeSet */;
+       __le16 reserved[3];
+};
+
+/*
+ * Ystorm Queue Zone
+ */
+struct ystorm_eth_queue_zone {
+       struct coalescing_timeset int_coalescing_timeset
+           /* Tx interrupt coalescing TimeSet */;
+       __le16 reserved[3];
+};
+
+/*
+ * ETH doorbell data
+ */
+struct eth_db_data {
+       u8 params;
+#define ETH_DB_DATA_DEST_MASK         0x3
+#define ETH_DB_DATA_DEST_SHIFT        0
+#define ETH_DB_DATA_AGG_CMD_MASK      0x3
+#define ETH_DB_DATA_AGG_CMD_SHIFT     2
+#define ETH_DB_DATA_BYPASS_EN_MASK    0x1
+#define ETH_DB_DATA_BYPASS_EN_SHIFT   4
+#define ETH_DB_DATA_RESERVED_MASK     0x1
+#define ETH_DB_DATA_RESERVED_SHIFT    5
+#define ETH_DB_DATA_AGG_VAL_SEL_MASK  0x3
+#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+       u8 agg_flags;
+       __le16 bd_prod;
+};
+
+#endif /* __ETH_COMMON__ */
diff --git a/drivers/net/qede/ecore/mcp_public.h 
b/drivers/net/qede/ecore/mcp_public.h
new file mode 100644
index 0000000..948847b
--- /dev/null
+++ b/drivers/net/qede/ecore/mcp_public.h
@@ -0,0 +1,1243 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name:        mcp_public.h
+ *
+ * Description: MCP public data
+ *
+ * Created:     13/01/2013 yanivr
+ *
+ ****************************************************************************/
+
+#ifndef MCP_PUBLIC_H
+#define MCP_PUBLIC_H
+
+#define VF_MAX_STATIC 192      /* In case of AH */
+
+#define MCP_GLOB_PATH_MAX      2
+#define MCP_PORT_MAX           2       /* Global */
+#define MCP_GLOB_PORT_MAX      4       /* Global */
+#define MCP_GLOB_FUNC_MAX      16      /* Global */
+
+typedef u32 offsize_t;         /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT   0
+#define OFFSIZE_OFFSET_MASK    0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT     16
+#define OFFSIZE_SIZE_MASK      0xffff0000
+
+/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
+#define SECTION_OFFSET(_offsize) \
+((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+
+/* SECTION_SIZE is calculating the size in bytes out of offsize */
+#define SECTION_SIZE(_offsize) \
+(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) \
+(MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+(_pub_base + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct pmm_phy_cfg {
+       u32 speed; /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
+#define PMM_SPEED_AUTONEG   0
+#define PMM_SPEED_SMARTLINQ  0x8
+
+       u32 pause;              /* bitmask */
+#define PMM_PAUSE_NONE         0x0
+#define PMM_PAUSE_AUTONEG      0x1
+#define PMM_PAUSE_RX           0x2
+#define PMM_PAUSE_TX           0x4
+
+       u32 adv_speed;          /* Default should be the speed_cap_mask */
+       u32 loopback_mode;
+#define PMM_LOOPBACK_NONE              0
+#define PMM_LOOPBACK_INT_PHY           1
+#define PMM_LOOPBACK_EXT_PHY           2
+#define PMM_LOOPBACK_EXT               3
+#define PMM_LOOPBACK_MAC               4
+#define PMM_LOOPBACK_CNIG_AH_ONLY_0123 5       /* Port to itself */
+#define PMM_LOOPBACK_CNIG_AH_ONLY_2301 6       /* Port to Port */
+
+       /* features */
+       u32 feature_config_flags;
+
+};
+
+struct port_mf_cfg {
+
+       u32 dynamic_cfg;        /* device control channel */
+#define PORT_MF_CFG_OV_TAG_MASK              0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT             0
+#define PORT_MF_CFG_OV_TAG_DEFAULT         PORT_MF_CFG_OV_TAG_MASK
+
+       u32 reserved[1];
+};
+
+/* DO NOT add new fields in the middle
+ * MUST be synced with struct pmm_stats_map
+ */
+struct pmm_stats {
+       u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter */
+       u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter */
+       u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter */
+       u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter */
+       u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter */
+       u64 r1518; /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
+       u64 r1522; /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged  */
+       u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter */
+       u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter */
+       u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter */
+       u64 r16383; /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame ctr */
+       u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter */
+       u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter */
+       u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter */
+       u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter */
+       u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter */
+       u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+       u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter */
+       u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+       u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+       u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+       u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+       u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
+       u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter */
+       u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter */
+       u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter */
+       u64 t1518; /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
+       u64 t2047; /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
+       u64 t4095; /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
+       u64 t9216; /* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
+       u64 t16383; /* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame ctr */
+       u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+       u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+       u64 tlpiec; /* 0x6C (Offset 0x108) Transmit Logical Type LLFC */
+       u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+       u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+       u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+       u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+       u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+       u64 rxpok; /* 0x22 (Offset 0x138) RX good frame */
+       u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+       u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+       u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+       u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+       u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+};
+
+struct brb_stats {
+       u64 brb_truncate[8];
+       u64 brb_discard[8];
+};
+
+struct port_stats {
+       struct brb_stats brb;
+       struct pmm_stats pmm;
+};
+
+/*-----+-----------------------------------------------------------------------
+ * Chip | Number and       | Ports in| Ports in|2 PHY-s |# of ports|# of 
engines
+ *      | rate of physical | team #1 | team #2 |are used|per path  | (paths)
+ *      | ports            |         |         |        |          |
+ *======+==================+=========+=========+========+======================
+ * BB   | 1x100G           | This is special mode, where there are 2 HW func
+ * BB   | 2x10/20Gbps      | 0,1     | NA      |  No    | 1        | 1
+ * BB   | 2x40 Gbps        | 0,1     | NA      |  Yes   | 1        | 1
+ * BB   | 2x50Gbps         | 0,1     | NA      |  No    | 1        | 1
+ * BB   | 4x10Gbps         | 0,2     | 1,3     |  No    | 1/2      | 1,2
+ * BB   | 4x10Gbps         | 0,1     | 2,3     |  No    | 1/2      | 1,2
+ * BB   | 4x10Gbps         | 0,3     | 1,2     |  No    | 1/2      | 1,2
+ * BB   | 4x10Gbps         | 0,1,2,3 | NA      |  No    | 1        | 1
+ * AH   | 2x10/20Gbps      | 0,1     | NA      |  NA    | 1        | NA
+ * AH   | 4x10Gbps         | 0,1     | 2,3     |  NA    | 2        | NA
+ * AH   | 4x10Gbps         | 0,2     | 1,3     |  NA    | 2        | NA
+ * AH   | 4x10Gbps         | 0,3     | 1,2     |  NA    | 2        | NA
+ * AH   | 4x10Gbps         | 0,1,2,3 | NA      |  NA    | 1        | NA
+ 
*======+==================+=========+=========+========+=======================
+ */
+
+#define CMT_TEAM0 0
+#define CMT_TEAM1 1
+#define CMT_TEAM_MAX 2
+
+struct couple_mode_teaming {
+       u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM            (1<<0)
+
+#define PORT_CMT_PORT_ROLE          (1<<1)
+#define PORT_CMT_PORT_INACTIVE      (0<<1)
+#define PORT_CMT_PORT_ACTIVE        (1<<1)
+
+#define PORT_CMT_TEAM_MASK          (1<<2)
+#define PORT_CMT_TEAM0              (0<<2)
+#define PORT_CMT_TEAM1              (1<<2)
+};
+
+/**************************************
+ *     LLDP and DCBX HSI structures
+ **************************************/
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL          32
+#define MAX_SYSTEM_LLDP_TLV_DATA    32
+
+typedef enum _lldp_agent_e {
+       LLDP_NEAREST_BRIDGE = 0,
+       LLDP_NEAREST_NON_TPMR_BRIDGE,
+       LLDP_NEAREST_CUSTOMER_BRIDGE,
+       LLDP_MAX_LLDP_AGENTS
+} lldp_agent_e;
+
+struct lldp_config_params_s {
+       u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK        0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT       0
+#define LLDP_CONFIG_HOLD_MASK               0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT              8
+#define LLDP_CONFIG_MAX_CREDIT_MASK         0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT        12
+#define LLDP_CONFIG_ENABLE_RX_MASK          0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT         30
+#define LLDP_CONFIG_ENABLE_TX_MASK          0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT         31
+       /* Holds local Chassis ID TLV header, subtype and 9B of payload.
+          If firtst byte is 0, then we will use default chassis ID */
+       u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       /* Holds local Port ID TLV header, subtype and 9B of payload.
+          If firtst byte is 0, then we will use default port ID */
+       u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+       u32 prefix_seq_num;
+       u32 status;             /* TBD */
+       /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+       u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+       /* Holds remote Port ID TLV header, subtype and 9B of payload. */
+       u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+       u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+       u32 flags;
+#define DCBX_ETS_ENABLED_MASK                   0x00000001
+#define DCBX_ETS_ENABLED_SHIFT                  0
+#define DCBX_ETS_WILLING_MASK                   0x00000002
+#define DCBX_ETS_WILLING_SHIFT                  1
+#define DCBX_ETS_ERROR_MASK                     0x00000004
+#define DCBX_ETS_ERROR_SHIFT                    2
+#define DCBX_ETS_CBS_MASK                       0x00000008
+#define DCBX_ETS_CBS_SHIFT                      3
+#define DCBX_ETS_MAX_TCS_MASK                   0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT                  4
+       u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC                      4
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET                (DCBX_ISCSI_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY               0xf
+#define DCBX_CEE_STRICT_PRIORITY_TC            0x7
+       u32 tc_bw_tbl[2];
+       u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT                    0
+#define DCBX_ETS_TSA_CBS                       1
+#define DCBX_ETS_TSA_ETS                       2
+};
+
+struct dcbx_app_priority_entry {
+       u32 entry;
+#define DCBX_APP_PRI_MAP_MASK       0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT      0
+#define DCBX_APP_PRI_0              0x01
+#define DCBX_APP_PRI_1              0x02
+#define DCBX_APP_PRI_2              0x04
+#define DCBX_APP_PRI_3              0x08
+#define DCBX_APP_PRI_4              0x10
+#define DCBX_APP_PRI_5              0x20
+#define DCBX_APP_PRI_6              0x40
+#define DCBX_APP_PRI_7              0x80
+#define DCBX_APP_SF_MASK            0x00000300
+#define DCBX_APP_SF_SHIFT           8
+#define DCBX_APP_SF_ETHTYPE         0
+#define DCBX_APP_SF_PORT            1
+#define DCBX_APP_PROTOCOL_ID_MASK   0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT  16
+};
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+       u32 flags;
+#define DCBX_APP_ENABLED_MASK           0x00000001
+#define DCBX_APP_ENABLED_SHIFT          0
+#define DCBX_APP_WILLING_MASK           0x00000002
+#define DCBX_APP_WILLING_SHIFT          1
+#define DCBX_APP_ERROR_MASK             0x00000004
+#define DCBX_APP_ERROR_SHIFT            2
+       /* Not in use
+          #define DCBX_APP_DEFAULT_PRI_MASK       0x00000f00
+          #define DCBX_APP_DEFAULT_PRI_SHIFT      8
+        */
+#define DCBX_APP_MAX_TCS_MASK           0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT          12
+#define DCBX_APP_NUM_ENTRIES_MASK       0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT      16
+       struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+       /* PG feature */
+       struct dcbx_ets_feature ets;
+       /* PFC feature */
+       u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK             0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT            0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0            0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1            0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2            0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3            0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4            0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5            0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6            0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7            0x80
+
+#define DCBX_PFC_FLAGS_MASK                     0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT                    8
+#define DCBX_PFC_CAPS_MASK                      0x00000f00
+#define DCBX_PFC_CAPS_SHIFT                     8
+#define DCBX_PFC_MBC_MASK                       0x00004000
+#define DCBX_PFC_MBC_SHIFT                      14
+#define DCBX_PFC_WILLING_MASK                   0x00008000
+#define DCBX_PFC_WILLING_SHIFT                  15
+#define DCBX_PFC_ENABLED_MASK                   0x00010000
+#define DCBX_PFC_ENABLED_SHIFT                  16
+#define DCBX_PFC_ERROR_MASK                     0x00020000
+#define DCBX_PFC_ERROR_SHIFT                    17
+
+       /* APP feature */
+       struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+       u32 config;
+#define DCBX_CONFIG_VERSION_MASK            0x00000003
+#define DCBX_CONFIG_VERSION_SHIFT           0
+#define DCBX_CONFIG_VERSION_DISABLED        0
+#define DCBX_CONFIG_VERSION_IEEE            1
+#define DCBX_CONFIG_VERSION_CEE             2
+
+       u32 flags;
+       struct dcbx_features features;
+};
+
+struct dcbx_mib {
+       u32 prefix_seq_num;
+       u32 flags;
+       /*
+          #define DCBX_CONFIG_VERSION_MASK            0x00000003
+          #define DCBX_CONFIG_VERSION_SHIFT           0
+          #define DCBX_CONFIG_VERSION_DISABLED        0
+          #define DCBX_CONFIG_VERSION_IEEE            1
+          #define DCBX_CONFIG_VERSION_CEE             2
+        */
+       struct dcbx_features features;
+       u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+       u16 valid;
+       u16 length;
+       u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      G L O B A L   */
+/*                                    */
+/**************************************/
+struct public_global {
+       u32 max_path; /* 32bit is wasty, but this will be used often */
+       u32 max_ports; /* (Global) 32bit is wasty, this will be used often */
+#define MODE_1P        1 /* TBD - NEED TO THINK OF A BETTER NAME */
+#define MODE_2P        2
+#define MODE_3P        3
+#define MODE_4P        4
+       u32 debug_mb_offset;
+       u32 phymod_dbg_mb_offset;
+       struct couple_mode_teaming cmt;
+       s32 internal_temperature;
+       u32 mfw_ver;
+       u32 running_bundle_id;
+       s32 external_temperature;
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P A T H       */
+/*                                    */
+/**************************************/
+
+/****************************************************************************
+ * Shared Memory 2 Region                                                   *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 128 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
+struct fw_flr_mb {
+       u32 aggint;
+       u32 opgen_addr;
+       u32 accum_ack;          /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+#define ACCUM_ACK_PF_BASE      0
+#define ACCUM_ACK_PF_SHIFT     0
+
+#define ACCUM_ACK_VF_BASE      8
+#define ACCUM_ACK_VF_SHIFT     3
+
+#define ACCUM_ACK_IOV_DIS_BASE 256
+#define ACCUM_ACK_IOV_DIS_SHIFT        8
+
+};
+
+struct public_path {
+       struct fw_flr_mb flr_mb;
+       /*
+        * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+        * which were disabled/flred
+        */
+       u32 mcp_vf_disabled[VF_MAX_STATIC / 32];        /* 0x003c */
+
+       u32 process_kill;
+       /* Reset on mcp reset, and incremented for eveny process kill event. */
+#define PROCESS_KILL_COUNTER_MASK              0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT             0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK         0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT                16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id*32 + aeu_bit)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      P O R T       */
+/*                                    */
+/**************************************/
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+       u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+       u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+       /* hdr used internally by the MFW */
+       u32 hdr;
+       u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+       struct dci_fc_npiv_cfg fc_npiv_cfg;
+       struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+/****************************************************************************
+ * Driver <-> FW Mailbox                                                    *
+ ****************************************************************************/
+
+struct public_port {
+       u32 validity_map;       /* 0x0 (4*2 = 0x8) */
+
+       /* validity bits */
+#define MCP_VALIDITY_PCI_CFG                    0x00100000
+#define MCP_VALIDITY_MB                         0x00200000
+#define MCP_VALIDITY_DEV_INFO                   0x00400000
+#define MCP_VALIDITY_RESERVED                   0x00000007
+
+       /* One licensing bit should be set */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038 /* yaniv - tbd  */
+#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
+
+       /* Active MFW */
+#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+#define MCP_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+#define MCP_VALIDITY_ACTIVE_MFW_NCSI            0x00000040
+#define MCP_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
+
+       u32 link_status;
+#define LINK_STATUS_LINK_UP                    0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK                      0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD           (1<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD           (2<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G                       (3<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G                       (4<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G                       (5<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G                       (6<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G                      (7<<1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G                       (8<<1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED                     0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE                    0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED                    0x00000080
+
+#define LINK_STATUS_PFC_ENABLED                                0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE       0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE       0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE           0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE           0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE           0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE           0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE          0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE           0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE     (0<<18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       (1<<18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE      (2<<18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE                    (3<<18)
+
+#define LINK_STATUS_SFP_TX_FAULT                               0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED                    0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED                    0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT               0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT                 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT                0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ                                
0x04000000
+
+       u32 link_status1;
+       u32 ext_phy_fw_version;
+       u32 drv_phy_cfg_addr;   /* Points to pmm_phy_cfg (For READ-ONLY) */
+
+       u32 port_stx;
+
+       u32 stat_nig_timer;
+
+       struct port_mf_cfg port_mf_config;
+       struct port_stats stats;
+
+       u32 media_type;
+#define        MEDIA_UNSPECIFIED               0x0
+#define        MEDIA_SFPP_10G_FIBER    0x1
+#define        MEDIA_XFP_FIBER                 0x2
+#define        MEDIA_DA_TWINAX                 0x3
+#define        MEDIA_BASE_T                    0x4
+#define MEDIA_SFP_1G_FIBER             0x5
+#define MEDIA_MODULE_FIBER             0x6
+#define        MEDIA_KR                                0xf0
+#define        MEDIA_NOT_PRESENT               0xff
+
+       u32 lfa_status;
+#define LFA_LINK_FLAP_REASON_OFFSET            0
+#define LFA_LINK_FLAP_REASON_MASK              0x000000ff
+#define LFA_NO_REASON                                  (0<<0)
+#define LFA_LINK_DOWN                                  (1<<0)
+#define LFA_FORCE_INIT                                 (1<<1)
+#define LFA_LOOPBACK_MISMATCH                          (1<<2)
+#define LFA_SPEED_MISMATCH                             (1<<3)
+#define LFA_FLOW_CTRL_MISMATCH                         (1<<4)
+#define LFA_ADV_SPEED_MISMATCH                         (1<<5)
+#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET       8
+#define LINK_FLAP_AVOIDANCE_COUNT_MASK         0x0000ff00
+#define LINK_FLAP_COUNT_OFFSET                 16
+#define LINK_FLAP_COUNT_MASK                   0x00ff0000
+
+       u32 link_change_count;
+
+       /* LLDP params */
+       struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+       struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+       struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+       /* DCBX related MIB */
+       struct dcbx_local_params local_admin_dcbx_mib;
+       struct dcbx_mib remote_dcbx_mib;
+       struct dcbx_mib operational_dcbx_mib;
+
+       /* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+       u32 fc_npiv_nvram_tbl_addr;
+       u32 fc_npiv_nvram_tbl_size;
+       u32 transceiver_data;
+#define PMM_TRANSCEIVER_STATE_MASK             0x000000FF
+#define PMM_TRANSCEIVER_STATE_SHIFT            0x00000000
+#define PMM_TRANSCEIVER_STATE_UNPLUGGED                0x00000000
+#define PMM_TRANSCEIVER_STATE_PRESENT          0x00000001
+#define PMM_TRANSCEIVER_STATE_VALID            0x00000003
+#define PMM_TRANSCEIVER_STATE_UPDATING         0x00000008
+#define PMM_TRANSCEIVER_TYPE_MASK              0x0000FF00
+#define PMM_TRANSCEIVER_TYPE_SHIFT             0x00000008
+#define PMM_TRANSCEIVER_TYPE_NONE              0x00000000
+#define PMM_TRANSCEIVER_TYPE_UNKNOWN           0x000000FF
+#define PMM_TRANSCEIVER_TYPE_1G_PCC    0x01    /* 1G Passive copper cable */
+#define PMM_TRANSCEIVER_TYPE_1G_ACC    0x02    /* 1G Active copper cable  */
+#define PMM_TRANSCEIVER_TYPE_1G_LX                             0x03
+#define PMM_TRANSCEIVER_TYPE_1G_SX                             0x04
+#define PMM_TRANSCEIVER_TYPE_10G_SR                            0x05
+#define PMM_TRANSCEIVER_TYPE_10G_LR                            0x06
+#define PMM_TRANSCEIVER_TYPE_10G_LRM                   0x07
+#define PMM_TRANSCEIVER_TYPE_10G_ER                            0x08
+#define PMM_TRANSCEIVER_TYPE_10G_PCC   0x09    /* 10G Passive copper cable */
+#define PMM_TRANSCEIVER_TYPE_10G_ACC   0x0a    /* 10G Active copper cable  */
+#define PMM_TRANSCEIVER_TYPE_XLPPI                             0x0b
+#define PMM_TRANSCEIVER_TYPE_40G_LR4                   0x0c
+#define PMM_TRANSCEIVER_TYPE_40G_SR4                   0x0d
+#define PMM_TRANSCEIVER_TYPE_40G_CR4                   0x0e
+#define PMM_TRANSCEIVER_TYPE_100G_AOC  0x0f    /* Active optical cable */
+#define PMM_TRANSCEIVER_TYPE_100G_SR4                  0x10
+#define PMM_TRANSCEIVER_TYPE_100G_LR4                  0x11
+#define PMM_TRANSCEIVER_TYPE_100G_ER4                  0x12
+#define PMM_TRANSCEIVER_TYPE_100G_ACC  0x13    /* Active copper cable */
+#define PMM_TRANSCEIVER_TYPE_100G_CR4                  0x14
+#define PMM_TRANSCEIVER_TYPE_4x10G_SR                  0x15
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_S 0x16
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_M 0x18
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define PMM_TRANSCEIVER_TYPE_25G_PCC_L 0x1a
+#define PMM_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define PMM_TRANSCEIVER_TYPE_25G_SR                            0x1c
+#define PMM_TRANSCEIVER_TYPE_25G_LR                            0x1d
+#define PMM_TRANSCEIVER_TYPE_25G_AOC                   0x1e
+
+#define PMM_TRANSCEIVER_TYPE_4x10G                                     0x1d
+#define PMM_TRANSCEIVER_TYPE_4x25G_CR                                  0x1e
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40GR                       0x30
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR                     0x31
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR                     0x32
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR                    0x33
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR                    0x34
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR                    0x35
+#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC                   0x36
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C      F U N C       */
+/*                                    */
+/**************************************/
+
+struct public_func {
+
+       u32 iscsi_boot_signature;
+       u32 iscsi_boot_block_offset;
+
+       /* MTU size per funciton is needed for the OV feature */
+       u32 mtu_size;
+       /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+       /* For PCP values 0-3 use the map lower */
+       /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+        * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+        */
+       u32 c2s_pcp_map_lower;
+       /* For PCP values 4-7 use the map upper */
+       /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+        * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+        */
+       u32 c2s_pcp_map_upper;
+
+       /* For PCP default value get the MSB byte of the map default */
+       u32 c2s_pcp_map_default;
+
+       u32 reserved[4];
+
+       /* replace old mf_cfg */
+       u32 config;
+       /* E/R/I/D */
+       /* function 0 of each port cannot be hidden */
+#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING          0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT    0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK               0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT              4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE              0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE               0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX               0x00000030
+
+       /* MINBW, MAXBW */
+       /* value range - 0..100, increments in 1 %  */
+#define FUNC_MF_CFG_MIN_BW_MASK                 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT                8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK                 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT                16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x00640000
+
+       u32 status;
+#define FUNC_STATUS_VLINK_DOWN                 0x00000001
+
+       u32 mac_upper;          /* MAC */
+#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT            FUNC_MF_CFG_UPPERMAC_MASK
+       u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
+
+       u32 fcoe_wwn_port_name_upper;
+       u32 fcoe_wwn_port_name_lower;
+
+       u32 fcoe_wwn_node_name_upper;
+       u32 fcoe_wwn_node_name_lower;
+
+       u32 ovlan_stag;         /* tags */
+#define FUNC_MF_CFG_OV_STAG_MASK              0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT             0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT           FUNC_MF_CFG_OV_STAG_MASK
+
+       u32 pf_allocation;      /* vf per pf */
+
+       u32 preserve_data;      /* Will be used bt CCM */
+
+       u32 driver_last_activity_ts;
+
+       /*
+        * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+        * VFs
+        */
+       u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];    /* 0x0044 */
+
+       u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK       0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT      0
+
+#define DRV_ID_MCP_HSI_VER_MASK                0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT       16
+#define DRV_ID_MCP_HSI_VER_CURRENT     (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK           0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT          24
+#define DRV_ID_DRV_TYPE_UNKNOWN                (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX          (1 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_WINDOWS                (2 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_DIAG           (3 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_PREBOOT                (4 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_SOLARIS                (5 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_VMWARE         (6 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_FREEBSD                (7 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_AIX            (8 << DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK                0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT       31
+#define DRV_ID_DRV_INIT_HW_FLAG                (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       M B          */
+/*                                    */
+/**************************************/
+/* This is the only section that the driver can write to, and each */
+/* Basically each driver request to set feature parameters,
+ * will be done using a different command, which will be linked
+ * to a specific data structure from the union below.
+ * For huge strucuture, the common blank structure should be used.
+ */
+
+struct mcp_mac {
+       u32 mac_upper;          /* Upper 16 bits are always zeroes */
+       u32 mac_lower;
+};
+
+struct mcp_val64 {
+       u32 lo;
+       u32 hi;
+};
+
+struct mcp_file_att {
+       u32 nvm_start_addr;
+       u32 len;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+       u32 version;
+       u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+/* statistics for ncsi */
+struct lan_stats_stc {
+       u64 ucast_rx_pkts;
+       u64 ucast_tx_pkts;
+       u32 fcs_err;
+       u32 rserved;
+};
+
+struct fcoe_stats_stc {
+       u64 rx_pkts;
+       u64 tx_pkts;
+       u32 fcs_err;
+       u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+       u64 rx_pdus;
+       u64 tx_pdus;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+       u64 rx_pkts;
+       u64 tx_pkts;
+       u64 rx_bytes;
+       u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+       u32 ocbb_host_addr;
+       u32 ocsd_host_addr;
+       u32 ocsd_req_update_interval;
+};
+
+union drv_union_data {
+       u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];        /* LOAD_REQ */
+       struct mcp_mac wol_mac; /* UNLOAD_DONE */
+
+       struct pmm_phy_cfg drv_phy_cfg;
+
+       struct mcp_val64 val64; /* For PHY / AVS commands */
+
+       u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+       struct mcp_file_att file_att;
+
+       u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+
+       struct drv_version_stc drv_version;
+
+       struct lan_stats_stc lan_stats;
+       struct fcoe_stats_stc fcoe_stats;
+       struct iscsi_stats_stc icsci_stats;
+       struct rdma_stats_stc rdma_stats;
+       struct ocbb_data_stc ocbb_info;
+
+       /* ... */
+};
+
+struct public_drv_mb {
+
+       u32 drv_mb_header;
+#define DRV_MSG_CODE_MASK                       0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+#define DRV_MSG_CODE_INIT_HW                    0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ                        0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+#define DRV_MSG_CODE_INIT_PHY                  0x22000000
+       /* Params - FORCE - Reinitialize the link regardless of LFA */
+       /*        - DONT_CARE - Don't flap the link if up */
+#define DRV_MSG_CODE_LINK_RESET                        0x23000000
+
+       /* Vitaly: LLDP commands */
+#define DRV_MSG_CODE_SET_LLDP                   0x24000000
+#define DRV_MSG_CODE_SET_DCBX                   0x25000000
+       /* OneView feature driver HSI */
+#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG                0x26000000
+#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM         0x27000000
+#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS   0x28000000
+#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER    0x29000000
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE    0x31000000
+#define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
+#define DRV_MSG_CODE_OV_UPDATE_MTU             0x33000000
+
+#define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
+
+#define DRV_MSG_CODE_INITIATE_FLR               0x02000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX                0xc0010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN                0x00010000
+#define DRV_MSG_CODE_NVM_PUT_FILE_DATA         0x00020000
+#define DRV_MSG_CODE_NVM_GET_FILE_ATT          0x00030000
+#define DRV_MSG_CODE_NVM_READ_NVRAM            0x00050000
+#define DRV_MSG_CODE_NVM_WRITE_NVRAM           0x00060000
+#define DRV_MSG_CODE_NVM_DEL_FILE              0x00080000
+#define DRV_MSG_CODE_MCP_RESET                 0x00090000
+#define DRV_MSG_CODE_SET_SECURE_MODE           0x000a0000
+#define DRV_MSG_CODE_PHY_RAW_READ              0x000b0000
+#define DRV_MSG_CODE_PHY_RAW_WRITE             0x000c0000
+#define DRV_MSG_CODE_PHY_CORE_READ             0x000d0000
+#define DRV_MSG_CODE_PHY_CORE_WRITE            0x000e0000
+#define DRV_MSG_CODE_SET_VERSION               0x000f0000
+#define DRV_MSG_CODE_MCP_HALT                  0x00100000
+#define DRV_MSG_CODE_PMD_DIAG_DUMP             0x00140000
+#define DRV_MSG_CODE_PMD_DIAG_EYE              0x00150000
+#define DRV_MSG_CODE_TRANSCEIVER_READ          0x00160000
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE         0x00170000
+
+#define DRV_MSG_CODE_SET_VMAC                   0x00110000
+#define DRV_MSG_CODE_GET_VMAC                   0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_MAC              1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN             2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN             3
+
+#define DRV_MSG_CODE_GET_STATS                  0x00130000
+#define DRV_MSG_CODE_STATS_TYPE_LAN             1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE            2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI           3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA           4
+
+#define DRV_MSG_CODE_OCBB_DATA                 0x00180000
+#define DRV_MSG_CODE_SET_BW                    0x00190000
+#define DRV_MSG_CODE_MASK_PARITIES             0x001a0000
+#define DRV_MSG_CODE_INDUCE_FAILURE            0x001b0000
+#define DRV_MSG_FAN_FAILURE_TYPE               (1 << 0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE       (1 << 1)
+
+#define DRV_MSG_CODE_GPIO_READ                 0x001c0000
+#define DRV_MSG_CODE_GPIO_WRITE                        0x001d0000
+
+#define DRV_MSG_CODE_SET_LED_MODE              0x00200000
+#define DRV_MSG_CODE_EMPTY_MB                  0x00220000
+
+#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+       u32 drv_mb_param;
+       /* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN         0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP            0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED        0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED         0x00000003
+
+       /* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER        0x00000001
+
+       /* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE            0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE                0x00000002
+
+       /* LLDP / DCBX params */
+#define DRV_MB_PARAM_LLDP_SEND_MASK            0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT           0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK           0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT          1
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK          0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT         3
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK  0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW    0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE  0x2
+
+#define DRV_MB_PARAM_NVM_OFFSET_SHIFT          0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK           0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_SHIFT             24
+#define DRV_MB_PARAM_NVM_LEN_MASK              0xFF000000
+
+#define DRV_MB_PARAM_PHY_ADDR_SHIFT            0
+#define DRV_MB_PARAM_PHY_ADDR_MASK             0x1FF0FFFF
+#define DRV_MB_PARAM_PHY_LANE_SHIFT            16
+#define DRV_MB_PARAM_PHY_LANE_MASK             0x000F0000
+#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT     29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK      0x20000000
+#define DRV_MB_PARAM_PHY_PORT_SHIFT            30
+#define DRV_MB_PARAM_PHY_PORT_MASK             0xc0000000
+
+#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT         0
+#define DRV_MB_PARAM_PHYMOD_LANE_MASK          0x000000FF
+#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT         8
+#define DRV_MB_PARAM_PHYMOD_SIZE_MASK          0x000FFF00
+       /* configure vf MSIX params */
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT   0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK    0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT  8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK   0x0000FF00
+
+       /* OneView configuration parametres */
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT         0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK          0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE          0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS                    1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC   2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER         3
+#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP                4
+#define DRV_MB_PARAM_OV_CURR_CFG_CNU           5
+#define DRV_MB_PARAM_OV_CURR_CFG_DCI           6
+#define DRV_MB_PARAM_OV_CURR_CFG_HII           7
+
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT                 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK                  0x000000FF
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE                  (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED     (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS     (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND                 (1 << 2)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_CHAP_SUCCESS            (1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_LUN_FOUND                        
(1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT               (1 << 4)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED                        
(1 << 5)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF                    (1 << 6)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED                          0
+
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT              0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK               0x000000FF
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT             0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK                      0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK                0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK                0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK                0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK         0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT              0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK               0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN            0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING            0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED   0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE             0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT         0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK          0xFFFFFFFF
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER         0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT            0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK             0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT            2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK             0x000000FC
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT     8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK      0x0000FF00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT          16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK           0xFFFF0000
+
+#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT         0
+#define DRV_MB_PARAM_GPIO_NUMBER_MASK          0x0000FFFF
+#define DRV_MB_PARAM_GPIO_VALUE_SHIFT          16
+#define DRV_MB_PARAM_GPIO_VALUE_MASK           0xFFFF0000
+
+       u32 fw_mb_header;
+#define FW_MSG_CODE_MASK                        0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE            0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA        0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI        0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG       0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE           0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+#define FW_MSG_CODE_INIT_PHY_DONE              0x21200000
+#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS  0x21300000
+#define FW_MSG_CODE_LINK_RESET_DONE            0x23000000
+#define FW_MSG_CODE_SET_LLDP_DONE               0x24000000
+#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT  0x24010000
+#define FW_MSG_CODE_SET_DCBX_DONE               0x25000000
+#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE        0x26000000
+#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE         0x27000000
+#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE   0x28000000
+#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE    0x29000000
+#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE    0x31000000
+#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000
+#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE  0x33000000
+#define FW_MSG_CODE_NIG_DRAIN_DONE              0x30000000
+#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE        0xb0010000
+#define FW_MSG_CODE_FLR_ACK                     0x02000000
+#define FW_MSG_CODE_FLR_NACK                    0x02100000
+#define FW_MSG_CODE_SET_DRIVER_DONE            0x02200000
+#define FW_MSG_CODE_SET_VMAC_SUCCESS            0x02300000
+#define FW_MSG_CODE_SET_VMAC_FAIL               0x02400000
+
+#define FW_MSG_CODE_NVM_OK                     0x00010000
+#define FW_MSG_CODE_NVM_INVALID_MODE           0x00020000
+#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED      0x00030000
+#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE        0x00040000
+#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND      0x00050000
+#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND         0x00060000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
+#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
+#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC    0x00090000
+#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR    0x000a0000
+#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE    0x000b0000
+#define FW_MSG_CODE_NVM_FILE_NOT_FOUND         0x000c0000
+#define FW_MSG_CODE_NVM_OPERATION_FAILED       0x000d0000
+#define FW_MSG_CODE_NVM_FAILED_UNALIGNED       0x000e0000
+#define FW_MSG_CODE_NVM_BAD_OFFSET             0x000f0000
+#define FW_MSG_CODE_NVM_BAD_SIGNATURE          0x00100000
+#define FW_MSG_CODE_NVM_FILE_READ_ONLY         0x00200000
+#define FW_MSG_CODE_NVM_UNKNOWN_FILE           0x00300000
+#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK     0x00400000
+#define FW_MSG_CODE_MCP_RESET_REJECT           0x00600000
+#define FW_MSG_CODE_PHY_OK                     0x00110000
+#define FW_MSG_CODE_PHY_ERROR                  0x00120000
+#define FW_MSG_CODE_SET_SECURE_MODE_ERROR      0x00130000
+#define FW_MSG_CODE_SET_SECURE_MODE_OK         0x00140000
+#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR                0x00150000
+#define FW_MSG_CODE_OK                         0x00160000
+#define FW_MSG_CODE_LED_MODE_INVALID           0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK           0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR        0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE    0x00040000
+#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE    0x00170000
+#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
+#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE    0x000c0000
+#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH    0x00100000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK           0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR        0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT            0x00020000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE                0x000f0000
+#define FW_MSG_CODE_GPIO_OK           0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR        0x00170000
+#define FW_MSG_CODE_GPIO_CTRL_ERR              0x00020000
+#define FW_MSG_CODE_GPIO_INVALID               0x000f0000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+
+#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
+
+       u32 fw_mb_param;
+
+       u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                      0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+       /*
+        * The system time is in the format of
+        * (year-2001)*12*32 + month*32 + day.
+        */
+#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+       /*
+        * Indicate to the firmware not to go into the
+        * OS-absent when it is not getting driver pulse.
+        * This is used for debugging as well for PXE(MBA).
+        */
+
+       u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                      0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
+       /* Indicates to the driver not to assert due to lack
+        * of MCP response */
+#define MCP_EVENT_MASK                          0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
+
+       union drv_union_data union_data;
+
+};
+
+/* MFW - DRV MB */
+/**********************************************************************
+ * Description
+ *   Incremental Aggregative
+ *   8-bit MFW counter per message
+ *   8-bit ack-counter per message
+ * Capabilities
+ *   Provides up to 256 aggregative message per type
+ *   Provides 4 message types in dword
+ *   Message type pointers to byte offset
+ *   Backward Compatibility by using sizeof for the counters.
+ *   No lock requires for 32bit messages
+ * Limitations:
+ * In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
+ * is required to prevent data corruption.
+ **********************************************************************/
+enum MFW_DRV_MSG_TYPE {
+       MFW_DRV_MSG_LINK_CHANGE,
+       MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+       MFW_DRV_MSG_VF_DISABLED,
+       MFW_DRV_MSG_LLDP_DATA_UPDATED,
+       MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+       MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+       MFW_DRV_MSG_ERROR_RECOVERY,
+       MFW_DRV_MSG_BW_UPDATE,
+       MFW_DRV_MSG_S_TAG_UPDATE,
+       MFW_DRV_MSG_GET_LAN_STATS,
+       MFW_DRV_MSG_GET_FCOE_STATS,
+       MFW_DRV_MSG_GET_ISCSI_STATS,
+       MFW_DRV_MSG_GET_RDMA_STATS,
+       MFW_DRV_MSG_FAILURE_DETECTED,
+       MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+       MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs)   (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id)      (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id)     ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id)       (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+#ifdef BIG_ENDIAN              /* Like MFW */
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[msg_id]++;)
+#else
+#define DRV_ACK_MSG(msg_p, msg_id) \
+((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;)
+#endif
+
+#define MFW_DRV_UPDATE(shmem_func, msg_id) \
+((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;)
+
+struct public_mfw_mb {
+       u32 sup_msgs;           /* Assigend with MFW_DRV_MSG_MAX */
+       u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+       u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+/**************************************/
+/*                                    */
+/*     P U B L I C       D A T A      */
+/*                                    */
+/**************************************/
+enum public_sections {
+       PUBLIC_DRV_MB,          /* Points to the first drv_mb of path0 */
+       PUBLIC_MFW_MB,          /* Points to the first mfw_mb of path0 */
+       PUBLIC_GLOBAL,
+       PUBLIC_PATH,
+       PUBLIC_PORT,
+       PUBLIC_FUNC,
+       PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+       u32 ver;
+       u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+       u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+       u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+       /* The sections fields is an array */
+       u32 num_sections;
+       offsize_t sections[PUBLIC_MAX_SECTIONS];
+       struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+       struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+       struct public_global global;
+       struct public_path path[MCP_GLOB_PATH_MAX];
+       struct public_port port[MCP_GLOB_PORT_MAX];
+       struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR   0xa0
+#define MAX_I2C_TRANSACTION_SIZE       16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE  256
+
+#endif /* MCP_PUBLIC_H */
diff --git a/drivers/net/qede/ecore/nvm_cfg.h b/drivers/net/qede/ecore/nvm_cfg.h
new file mode 100644
index 0000000..9ef32ec
--- /dev/null
+++ b/drivers/net/qede/ecore/nvm_cfg.h
@@ -0,0 +1,935 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+/****************************************************************************
+ *
+ * Name:        nvm_cfg.h
+ *
+ * Description: NVM config file - Generated file from nvm cfg excel.
+ *              DO NOT MODIFY !!!
+ *
+ * Created:     1/14/2016
+ *
+ ****************************************************************************/
+
+#ifndef NVM_CFG_H
+#define NVM_CFG_H
+
+struct nvm_cfg_mac_address {
+       u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK                             0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET                           0
+       u32 mac_addr_lo;
+};
+
+/******************************************
+ * nvm_cfg1 structs
+ ******************************************/
+struct nvm_cfg1_glob {
+       u32 generic_cont0;      /* 0x0 */
+#define NVM_CFG1_GLOB_BOARD_SWAP_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET                         0
+#define NVM_CFG1_GLOB_BOARD_SWAP_NONE                           0x0
+#define NVM_CFG1_GLOB_BOARD_SWAP_PATH                           0x1
+#define NVM_CFG1_GLOB_BOARD_SWAP_PORT                           0x2
+#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_MASK                              0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET                            4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED                        0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT                           0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4                             0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0                           0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5                           0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0                           0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD                                0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP                               0x7
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK              0x00001000
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET            12
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED          0x0
+#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED           0x1
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK                       0x001FE000
+#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET                     13
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK                      0x1FE00000
+#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET                    21
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK                         0x20000000
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET                       29
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED                     0x0
+#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED                      0x1
+#define NVM_CFG1_GLOB_ENABLE_ATC_MASK                           0x40000000
+#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET                         30
+#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED                       0x0
+#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED                        0x1
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK                       0x80000000
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET                     31
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED                   0x0
+#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED                    0x1
+       u32 engineering_change[3];      /* 0x4 */
+       u32 manufacturing_id;   /* 0x10 */
+       u32 serial_number[4];   /* 0x14 */
+       u32 pcie_cfg;           /* 0x24 */
+#define NVM_CFG1_GLOB_PCI_GEN_MASK                              0x00000003
+#define NVM_CFG1_GLOB_PCI_GEN_OFFSET                            0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1                          0x0
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2                          0x1
+#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3                          0x2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK                   0x00000004
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET                 2
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED               0x0
+#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED                0x1
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK                         0x00000018
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET                       3
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED               0x0
+#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED                  0x2
+#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK     0x00000020
+#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET   5
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK                 0x000003C0
+#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET               6
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK                     0x00001C00
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET                   10
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW                       0x0
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB                      0x1
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB                    0x2
+#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB                    0x3
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK                     0x001FE000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET                   13
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK                     0x1FE00000
+#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET                   21
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK                      0x60000000
+#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET                    29
+       /*  Set the duration, in seconds, fan failure signal should be
+          sampled */
+#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK        0x80000000
+#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET      31
+       u32 mgmt_traffic;       /* 0x28 */
+#define NVM_CFG1_GLOB_RESERVED60_MASK                           0x00000001
+#define NVM_CFG1_GLOB_RESERVED60_OFFSET                         0
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK                     0x000001FE
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET                   1
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK                     0x0001FE00
+#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET                   9
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK                        0x01FE0000
+#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET                      17
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK                        0x06000000
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET                      25
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED                    0x0
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII                        0x1
+#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII                       0x2
+#define NVM_CFG1_GLOB_AUX_MODE_MASK                             0x78000000
+#define NVM_CFG1_GLOB_AUX_MODE_OFFSET                           27
+#define NVM_CFG1_GLOB_AUX_MODE_DEFAULT                          0x0
+#define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY                       0x1
+       /*  Indicates whether external thermal sonsor is available */
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK              0x80000000
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET            31
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED          0x0
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED           0x1
+       u32 core_cfg;           /* 0x2C */
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK                    0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET                  0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G                0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G                0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G               0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F              0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E              0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G                0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G                0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G                0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G                0xD
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK             0x00000100
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET           8
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED         0x0
+#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED          0x1
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK            0x00000200
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET          9
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED        0x0
+#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED         0x1
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK                      0x0003FC00
+#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET                    10
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK                     0x03FC0000
+#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET                   18
+#define NVM_CFG1_GLOB_AVS_MODE_MASK                             0x1C000000
+#define NVM_CFG1_GLOB_AVS_MODE_OFFSET                           26
+#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP                       0x0
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG                    0x1
+#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP                    0x2
+#define NVM_CFG1_GLOB_AVS_MODE_DISABLED                         0x3
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK                 0x60000000
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET               29
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED             0x0
+#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED              0x1
+       u32 e_lane_cfg1;        /* 0x30 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+       u32 e_lane_cfg2;        /* 0x34 */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+#define NVM_CFG1_GLOB_SMBUS_MODE_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET                         8
+#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED                       0x0
+#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ                         0x1
+#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ                         0x2
+#define NVM_CFG1_GLOB_NCSI_MASK                                 0x0000F000
+#define NVM_CFG1_GLOB_NCSI_OFFSET                               12
+#define NVM_CFG1_GLOB_NCSI_DISABLED                             0x0
+#define NVM_CFG1_GLOB_NCSI_ENABLED                              0x1
+       /*  Maximum advertised pcie link width */
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK                       0x000F0000
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET                     16
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_16_LANES                   0x0
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE                     0x1
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES                    0x2
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES                    0x3
+#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES                    0x4
+       /*  ASPM L1 mode */
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK                         0x00300000
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET                       20
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED                       0x0
+#define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY          0x1
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK                  0x01C00000
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET                22
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED              0x0
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C           0x1
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY              0x2
+#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS         0x3
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK          0x06000000
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET        25
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE       0x0
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL      0x1
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL      0x2
+#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH          0x3
+       /*  Set the PLDM sensor modes */
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK                     0x38000000
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET                   27
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL                 0x0
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL                 0x1
+#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH                     0x2
+       u32 f_lane_cfg1;        /* 0x38 */
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK                        0x0000000F
+#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET                      0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK                        0x000000F0
+#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET                      4
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK                        0x00000F00
+#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET                      8
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK                        0x0000F000
+#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET                      12
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK                        0x000F0000
+#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET                      16
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK                        0x00F00000
+#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET                      20
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK                        0x0F000000
+#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET                      24
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK                        0xF0000000
+#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET                      28
+       u32 f_lane_cfg2;        /* 0x3C */
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK                    0x00000001
+#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET                  0
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK                    0x00000002
+#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET                  1
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK                    0x00000004
+#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET                  2
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK                    0x00000008
+#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET                  3
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK                    0x00000010
+#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET                  4
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK                    0x00000020
+#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET                  5
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK                    0x00000040
+#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET                  6
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK                    0x00000080
+#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET                  7
+       /*  Control the period between two successive checks */
+#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK    0x0000FF00
+#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET  8
+       /*  Set shutdown temperature */
+#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK       0x00FF0000
+#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET     16
+       /*  Set max. count for over operational temperature */
+#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK             0xFF000000
+#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET           24
+       u32 eagle_preemphasis;  /* 0x40 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+       u32 eagle_driver_current;       /* 0x44 */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+       u32 falcon_preemphasis; /* 0x48 */
+#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET                       0
+#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET                       8
+#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET                       16
+#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET                       24
+       u32 falcon_driver_current;      /* 0x4C */
+#define NVM_CFG1_GLOB_LANE0_AMP_MASK                            0x000000FF
+#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET                          0
+#define NVM_CFG1_GLOB_LANE1_AMP_MASK                            0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET                          8
+#define NVM_CFG1_GLOB_LANE2_AMP_MASK                            0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET                          16
+#define NVM_CFG1_GLOB_LANE3_AMP_MASK                            0xFF000000
+#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET                          24
+       u32 pci_id;             /* 0x50 */
+#define NVM_CFG1_GLOB_VENDOR_ID_MASK                            0x0000FFFF
+#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET                          0
+       /*  Set caution temperature */
+#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK        0x00FF0000
+#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET      16
+       /*  Set external thermal sensor I2C address */
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK      0xFF000000
+#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET    24
+       u32 pci_subsys_id;      /* 0x54 */
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET                0
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK                  0xFFFF0000
+#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET                16
+       u32 bar;                /* 0x58 */
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK                   0x0000000F
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET                 0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED               0x0
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K                     0x1
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K                     0x2
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K                     0x3
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K                    0x4
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K                    0x5
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K                    0x6
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K                   0x7
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K                   0x8
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K                   0x9
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M                     0xA
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M                     0xB
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M                     0xC
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M                     0xD
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M                    0xE
+#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M                    0xF
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK                     0x000000F0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET                   4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED                 0x0
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K                       0x1
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K                       0x2
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K                      0x3
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K                      0x4
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K                      0x5
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K                     0x6
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K                     0x7
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K                     0x8
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M                       0x9
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M                       0xA
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M                       0xB
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M                       0xC
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M                      0xD
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M                      0xE
+#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M                      0xF
+#define NVM_CFG1_GLOB_BAR2_SIZE_MASK                            0x00000F00
+#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET                          8
+#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED                        0x0
+#define NVM_CFG1_GLOB_BAR2_SIZE_64K                             0x1
+#define NVM_CFG1_GLOB_BAR2_SIZE_128K                            0x2
+#define NVM_CFG1_GLOB_BAR2_SIZE_256K                            0x3
+#define NVM_CFG1_GLOB_BAR2_SIZE_512K                            0x4
+#define NVM_CFG1_GLOB_BAR2_SIZE_1M                              0x5
+#define NVM_CFG1_GLOB_BAR2_SIZE_2M                              0x6
+#define NVM_CFG1_GLOB_BAR2_SIZE_4M                              0x7
+#define NVM_CFG1_GLOB_BAR2_SIZE_8M                              0x8
+#define NVM_CFG1_GLOB_BAR2_SIZE_16M                             0x9
+#define NVM_CFG1_GLOB_BAR2_SIZE_32M                             0xA
+#define NVM_CFG1_GLOB_BAR2_SIZE_64M                             0xB
+#define NVM_CFG1_GLOB_BAR2_SIZE_128M                            0xC
+#define NVM_CFG1_GLOB_BAR2_SIZE_256M                            0xD
+#define NVM_CFG1_GLOB_BAR2_SIZE_512M                            0xE
+#define NVM_CFG1_GLOB_BAR2_SIZE_1G                              0xF
+       /*  Set the duration, in seconds, fan failure signal should be
+          sampled */
+#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK                 0x0000F000
+#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET               12
+       u32 eagle_txfir_main;   /* 0x5C */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+       u32 eagle_txfir_post;   /* 0x60 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+       u32 falcon_txfir_main;  /* 0x64 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET                   24
+       u32 falcon_txfir_post;  /* 0x68 */
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK                     0x000000FF
+#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET                   0
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK                     0x0000FF00
+#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET                   8
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK                     0x00FF0000
+#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET                   16
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK                     0xFF000000
+#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET                   24
+       u32 manufacture_ver;    /* 0x6C */
+#define NVM_CFG1_GLOB_MANUF0_VER_MASK                           0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET                         0
+#define NVM_CFG1_GLOB_MANUF1_VER_MASK                           0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET                         6
+#define NVM_CFG1_GLOB_MANUF2_VER_MASK                           0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET                         12
+#define NVM_CFG1_GLOB_MANUF3_VER_MASK                           0x00FC0000
+#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET                         18
+#define NVM_CFG1_GLOB_MANUF4_VER_MASK                           0x3F000000
+#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET                         24
+       u32 manufacture_time;   /* 0x70 */
+#define NVM_CFG1_GLOB_MANUF0_TIME_MASK                          0x0000003F
+#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET                        0
+#define NVM_CFG1_GLOB_MANUF1_TIME_MASK                          0x00000FC0
+#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET                        6
+#define NVM_CFG1_GLOB_MANUF2_TIME_MASK                          0x0003F000
+#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET                        12
+       u32 led_global_settings;        /* 0x74 */
+#define NVM_CFG1_GLOB_LED_SWAP_0_MASK                           0x0000000F
+#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET                         0
+#define NVM_CFG1_GLOB_LED_SWAP_1_MASK                           0x000000F0
+#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET                         4
+#define NVM_CFG1_GLOB_LED_SWAP_2_MASK                           0x00000F00
+#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET                         8
+#define NVM_CFG1_GLOB_LED_SWAP_3_MASK                           0x0000F000
+#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET                         12
+       u32 generic_cont1;      /* 0x78 */
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK                         0x000003FF
+#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET                       0
+       u32 mbi_version;        /* 0x7C */
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK                        0x000000FF
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET                      0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK                        0x0000FF00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET                      8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK                        0x00FF0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET                      16
+       u32 mbi_date;           /* 0x80 */
+       u32 misc_sig;           /* 0x84 */
+       /*  Define the GPIO mapping to switch i2c mux */
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK                   0x000000FF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET                 0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK                   0x0000FF00
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET                 8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA                      0x0
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0                   0x1
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1                   0x2
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2                   0x3
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3                   0x4
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4                   0x5
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5                   0x6
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6                   0x7
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7                   0x8
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8                   0x9
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9                   0xA
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10                  0xB
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11                  0xC
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12                  0xD
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13                  0xE
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14                  0xF
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15                  0x10
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16                  0x11
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17                  0x12
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18                  0x13
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19                  0x14
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20                  0x15
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21                  0x16
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22                  0x17
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23                  0x18
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24                  0x19
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25                  0x1A
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26                  0x1B
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27                  0x1C
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28                  0x1D
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29                  0x1E
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30                  0x1F
+#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31                  0x20
+       u32 device_capabilities;        /* 0x88 */
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET              0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE                  0x2
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI                 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE                  0x8
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP                 0x10
+       u32 power_dissipated;   /* 0x8C */
+#define NVM_CFG1_GLOB_POWER_DIS_D0_MASK                         0x000000FF
+#define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET                       0
+#define NVM_CFG1_GLOB_POWER_DIS_D1_MASK                         0x0000FF00
+#define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET                       8
+#define NVM_CFG1_GLOB_POWER_DIS_D2_MASK                         0x00FF0000
+#define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET                       16
+#define NVM_CFG1_GLOB_POWER_DIS_D3_MASK                         0xFF000000
+#define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET                       24
+       u32 power_consumed;     /* 0x90 */
+#define NVM_CFG1_GLOB_POWER_CONS_D0_MASK                        0x000000FF
+#define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET                      0
+#define NVM_CFG1_GLOB_POWER_CONS_D1_MASK                        0x0000FF00
+#define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET                      8
+#define NVM_CFG1_GLOB_POWER_CONS_D2_MASK                        0x00FF0000
+#define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET                      16
+#define NVM_CFG1_GLOB_POWER_CONS_D3_MASK                        0xFF000000
+#define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET                      24
+       u32 efi_version;        /* 0x94 */
+       u32 reserved[42];       /* 0x98 */
+};
+
+struct nvm_cfg1_path {
+       u32 reserved[30];       /* 0x0 */
+};
+
+struct nvm_cfg1_port {
+       u32 reserved__m_relocated_to_option_123;        /* 0x0 */
+       u32 reserved__m_relocated_to_option_124;        /* 0x4 */
+       u32 generic_cont0;      /* 0x8 */
+#define NVM_CFG1_PORT_LED_MODE_MASK                             0x000000FF
+#define NVM_CFG1_PORT_LED_MODE_OFFSET                           0
+#define NVM_CFG1_PORT_LED_MODE_MAC1                             0x0
+#define NVM_CFG1_PORT_LED_MODE_PHY1                             0x1
+#define NVM_CFG1_PORT_LED_MODE_PHY2                             0x2
+#define NVM_CFG1_PORT_LED_MODE_PHY3                             0x3
+#define NVM_CFG1_PORT_LED_MODE_MAC2                             0x4
+#define NVM_CFG1_PORT_LED_MODE_PHY4                             0x5
+#define NVM_CFG1_PORT_LED_MODE_PHY5                             0x6
+#define NVM_CFG1_PORT_LED_MODE_PHY6                             0x7
+#define NVM_CFG1_PORT_LED_MODE_MAC3                             0x8
+#define NVM_CFG1_PORT_LED_MODE_PHY7                             0x9
+#define NVM_CFG1_PORT_LED_MODE_PHY8                             0xA
+#define NVM_CFG1_PORT_LED_MODE_PHY9                             0xB
+#define NVM_CFG1_PORT_LED_MODE_MAC4                             0xC
+#define NVM_CFG1_PORT_LED_MODE_PHY10                            0xD
+#define NVM_CFG1_PORT_LED_MODE_PHY11                            0xE
+#define NVM_CFG1_PORT_LED_MODE_PHY12                            0xF
+#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK                        0x0000FF00
+#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET                      8
+#define NVM_CFG1_PORT_DCBX_MODE_MASK                            0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET                          16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED                        0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE                            0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE                             0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC                         0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK            0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET          20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET        0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE            0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI           0x4
+       u32 pcie_cfg;           /* 0xC */
+#define NVM_CFG1_PORT_RESERVED15_MASK                           0x00000007
+#define NVM_CFG1_PORT_RESERVED15_OFFSET                         0
+       u32 features;           /* 0x10 */
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK           0x00000001
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET         0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED       0x0
+#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED        0x1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK                     0x00000002
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET                   1
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED                 0x0
+#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED                  0x1
+       u32 speed_cap_mask;     /* 0x14 */
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK            0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET          0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G            0x40
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK            0xFFFF0000
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET          16
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G              0x1
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G             0x2
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G             0x8
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G             0x10
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G             0x20
+#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G            0x40
+       u32 link_settings;      /* 0x18 */
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                       0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET                     0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ                  0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK                     0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET                   4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK                       0x00000780
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET                     7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG                    0x0
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G                         0x1
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G                        0x2
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G                        0x4
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G                        0x5
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G                        0x6
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G                       0x7
+#define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ                  0x8
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK                     0x00003800
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET                   11
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG                  0x1
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX                       0x2
+#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX                       0x4
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK      0x00004000
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET    14
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED  0x0
+#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED   0x1
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK                       0x00018000
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET                     15
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM                 0x0
+#define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM                        0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK                       0x000E0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET                     17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_NONE             0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_FIRECODE         0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_RS               0x2
+       u32 phy_cfg;            /* 0x1C */
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK                  0x0000FFFF
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET                0
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG                 0x1
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER             0x2
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER                 0x4
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN       0x8
+#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN        0x10
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK                 0x00FF0000
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET               16
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS               0x0
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR                   0x2
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2                  0x3
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4                  0x4
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI                  0x8
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI                  0x9
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X                0xB
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII                0xC
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI                0x11
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI                0x12
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI                 0x21
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI                 0x22
+#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI               0x31
+#define NVM_CFG1_PORT_AN_MODE_MASK                              0xFF000000
+#define NVM_CFG1_PORT_AN_MODE_OFFSET                            24
+#define NVM_CFG1_PORT_AN_MODE_NONE                              0x0
+#define NVM_CFG1_PORT_AN_MODE_CL73                              0x1
+#define NVM_CFG1_PORT_AN_MODE_CL37                              0x2
+#define NVM_CFG1_PORT_AN_MODE_CL73_BAM                          0x3
+#define NVM_CFG1_PORT_AN_MODE_CL37_BAM                          0x4
+#define NVM_CFG1_PORT_AN_MODE_HPAM                              0x5
+#define NVM_CFG1_PORT_AN_MODE_SGMII                             0x6
+       u32 mgmt_traffic;       /* 0x20 */
+#define NVM_CFG1_PORT_RESERVED61_MASK                           0x0000000F
+#define NVM_CFG1_PORT_RESERVED61_OFFSET                         0
+       u32 ext_phy;            /* 0x24 */
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK                    0x000000FF
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET                  0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE                    0x0
+#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844                0x1
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK                 0x0000FF00
+#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET               8
+       u32 mba_cfg1;           /* 0x28 */
+#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK                        0x00000001
+#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET                      0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED                    0x0
+#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED                     0x1
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK            0x00000006
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET          1
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK                       0x00000078
+#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET                     3
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK                    0x00000080
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET                  7
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S                  0x0
+#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B                  0x1
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK                0x00000100
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET              8
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED            0x0
+#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED             0x1
+#define NVM_CFG1_PORT_RESERVED5_MASK                            0x0001FE00
+#define NVM_CFG1_PORT_RESERVED5_OFFSET                          9
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK                   0x001E0000
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET                 17
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG                0x0
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G                     0x1
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G                    0x2
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G                    0x4
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G                    0x5
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G                    0x6
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G                   0x7
+#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ              0x8
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK     0x00E00000
+#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET   21
+       u32 mba_cfg2;           /* 0x2C */
+#define NVM_CFG1_PORT_RESERVED65_MASK                           0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED65_OFFSET                         0
+#define NVM_CFG1_PORT_RESERVED66_MASK                           0x00010000
+#define NVM_CFG1_PORT_RESERVED66_OFFSET                         16
+       u32 vf_cfg;             /* 0x30 */
+#define NVM_CFG1_PORT_RESERVED8_MASK                            0x0000FFFF
+#define NVM_CFG1_PORT_RESERVED8_OFFSET                          0
+#define NVM_CFG1_PORT_RESERVED6_MASK                            0x000F0000
+#define NVM_CFG1_PORT_RESERVED6_OFFSET                          16
+       struct nvm_cfg_mac_address lldp_mac_address;    /* 0x34 */
+       u32 led_port_settings;  /* 0x3C */
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK                   0x000000FF
+#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET                 0
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK                   0x0000FF00
+#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET                 8
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK                   0x00FF0000
+#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET                 16
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G                      0x1
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G                     0x2
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G                     0x8
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G                     0x10
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G                     0x20
+#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G                    0x40
+       u32 transceiver_00;     /* 0x40 */
+       /*  Define for mapping of transceiver signal module absent */
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK                     0x000000FF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET                   0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA                       0x0
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0                    0x1
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1                    0x2
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2                    0x3
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3                    0x4
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4                    0x5
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5                    0x6
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6                    0x7
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7                    0x8
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8                    0x9
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9                    0xA
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10                   0xB
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11                   0xC
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12                   0xD
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13                   0xE
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14                   0xF
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15                   0x10
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16                   0x11
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17                   0x12
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18                   0x13
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19                   0x14
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20                   0x15
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21                   0x16
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22                   0x17
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23                   0x18
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24                   0x19
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25                   0x1A
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26                   0x1B
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27                   0x1C
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28                   0x1D
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29                   0x1E
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30                   0x1F
+#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31                   0x20
+       /*  Define the GPIO mux settings  to switch i2c mux to this port */
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK                  0x00000F00
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET                8
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK                  0x0000F000
+#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET                12
+       u32 device_ids;         /* 0x44 */
+#define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK                       0x000000FF
+#define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET                     0
+#define NVM_CFG1_PORT_FCOE_DID_SUFFIX_MASK                      0x0000FF00
+#define NVM_CFG1_PORT_FCOE_DID_SUFFIX_OFFSET                    8
+#define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_MASK                     0x00FF0000
+#define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_OFFSET                   16
+#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK                  0xFF000000
+#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET                24
+       u32 board_cfg;          /* 0x48 */
+       /*  This field defines the board technology
+          (backpane,transceiver,external PHY) */
+#define NVM_CFG1_PORT_PORT_TYPE_MASK                            0x000000FF
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET                          0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED                       0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE                          0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE                       0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY                         0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE                    0x4
+       /*  This field defines the GPIO mapped to tx_disable signal in SFP */
+#define NVM_CFG1_PORT_TX_DISABLE_MASK                           0x0000FF00
+#define NVM_CFG1_PORT_TX_DISABLE_OFFSET                         8
+#define NVM_CFG1_PORT_TX_DISABLE_NA                             0x0
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO0                          0x1
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO1                          0x2
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO2                          0x3
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO3                          0x4
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO4                          0x5
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO5                          0x6
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO6                          0x7
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO7                          0x8
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO8                          0x9
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO9                          0xA
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO10                         0xB
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO11                         0xC
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO12                         0xD
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO13                         0xE
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO14                         0xF
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO15                         0x10
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO16                         0x11
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO17                         0x12
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO18                         0x13
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO19                         0x14
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO20                         0x15
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO21                         0x16
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO22                         0x17
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO23                         0x18
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO24                         0x19
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO25                         0x1A
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO26                         0x1B
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO27                         0x1C
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO28                         0x1D
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO29                         0x1E
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO30                         0x1F
+#define NVM_CFG1_PORT_TX_DISABLE_GPIO31                         0x20
+       u32 reserved[131];      /* 0x4C */
+};
+
+struct nvm_cfg1_func {
+       struct nvm_cfg_mac_address mac_address; /* 0x0 */
+       u32 rsrv1;              /* 0x8 */
+#define NVM_CFG1_FUNC_RESERVED1_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED1_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED2_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED2_OFFSET                          16
+       u32 rsrv2;              /* 0xC */
+#define NVM_CFG1_FUNC_RESERVED3_MASK                            0x0000FFFF
+#define NVM_CFG1_FUNC_RESERVED3_OFFSET                          0
+#define NVM_CFG1_FUNC_RESERVED4_MASK                            0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED4_OFFSET                          16
+       u32 device_id;          /* 0x10 */
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK                  0x0000FFFF
+#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET                0
+#define NVM_CFG1_FUNC_RESERVED77_MASK                           0xFFFF0000
+#define NVM_CFG1_FUNC_RESERVED77_OFFSET                         16
+       u32 cmn_cfg;            /* 0x14 */
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK                0x00000007
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET              0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE                 0x0
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT          0x3
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT           0x4
+#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE                0x7
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK                     0x0007FFF8
+#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET                   3
+#define NVM_CFG1_FUNC_PERSONALITY_MASK                          0x00780000
+#define NVM_CFG1_FUNC_PERSONALITY_OFFSET                        19
+#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET                      0x0
+#define NVM_CFG1_FUNC_PERSONALITY_ISCSI                         0x1
+#define NVM_CFG1_FUNC_PERSONALITY_FCOE                          0x2
+#define NVM_CFG1_FUNC_PERSONALITY_ROCE                          0x3
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK                     0x7F800000
+#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET                   23
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK                   0x80000000
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET                 31
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED               0x0
+#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED                0x1
+       u32 pci_cfg;            /* 0x18 */
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK                 0x0000007F
+#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET               0
+#define NVM_CFG1_FUNC_RESERVESD12_MASK                          0x00003F80
+#define NVM_CFG1_FUNC_RESERVESD12_OFFSET                        7
+#define NVM_CFG1_FUNC_BAR1_SIZE_MASK                            0x0003C000
+#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET                          14
+#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED                        0x0
+#define NVM_CFG1_FUNC_BAR1_SIZE_64K                             0x1
+#define NVM_CFG1_FUNC_BAR1_SIZE_128K                            0x2
+#define NVM_CFG1_FUNC_BAR1_SIZE_256K                            0x3
+#define NVM_CFG1_FUNC_BAR1_SIZE_512K                            0x4
+#define NVM_CFG1_FUNC_BAR1_SIZE_1M                              0x5
+#define NVM_CFG1_FUNC_BAR1_SIZE_2M                              0x6
+#define NVM_CFG1_FUNC_BAR1_SIZE_4M                              0x7
+#define NVM_CFG1_FUNC_BAR1_SIZE_8M                              0x8
+#define NVM_CFG1_FUNC_BAR1_SIZE_16M                             0x9
+#define NVM_CFG1_FUNC_BAR1_SIZE_32M                             0xA
+#define NVM_CFG1_FUNC_BAR1_SIZE_64M                             0xB
+#define NVM_CFG1_FUNC_BAR1_SIZE_128M                            0xC
+#define NVM_CFG1_FUNC_BAR1_SIZE_256M                            0xD
+#define NVM_CFG1_FUNC_BAR1_SIZE_512M                            0xE
+#define NVM_CFG1_FUNC_BAR1_SIZE_1G                              0xF
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK                        0x03FC0000
+#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET                      18
+       struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;      /* 0x1C */
+       struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;      /* 0x24 */
+       u32 preboot_generic_cfg;        /* 0x2C */
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK                   0x0000FFFF
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET                 0
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK                         0x00010000
+#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET                       16
+       u32 reserved[8];        /* 0x30 */
+};
+
+struct nvm_cfg1 {
+       struct nvm_cfg1_glob glob;      /* 0x0 */
+       struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];   /* 0x140 */
+       struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];   /* 0x230 */
+       struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];   /* 0xB90 */
+};
+
+/******************************************
+ * nvm_cfg structs
+ ******************************************/
+enum nvm_cfg_sections {
+       NVM_CFG_SECTION_NVM_CFG1,
+       NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+       u32 num_sections;
+       u32 sections_offset[NVM_CFG_SECTION_MAX];
+       struct nvm_cfg1 cfg1;
+};
+
+#endif /* NVM_CFG_H */
diff --git a/drivers/net/qede/ecore/reg_addr.h 
b/drivers/net/qede/ecore/reg_addr.h
new file mode 100644
index 0000000..35081f6
--- /dev/null
+++ b/drivers/net/qede/ecore/reg_addr.h
@@ -0,0 +1,1112 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+       0
+
+#define  CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE          ( \
+               0xfff << 0)
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+       12
+
+#define  CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE           ( \
+               0xfff << 12)
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+       24
+
+#define  CDU_REG_CID_ADDR_PARAMS_NCIB                  ( \
+               0xff << 24)
+
+#define  XSDM_REG_OPERATION_GEN \
+       0xf80408UL
+#define  NIG_REG_RX_BRB_OUT_EN \
+       0x500e18UL
+#define  NIG_REG_STORM_OUT_EN \
+       0x500e08UL
+#define  PSWRQ2_REG_L2P_VALIDATE_VFID \
+       0x240c50UL
+#define  PGLUE_B_REG_USE_CLIENTID_IN_TAG       \
+       0x2aae04UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER       \
+       0x2aa16cUL
+#define  BAR0_MAP_REG_MSDM_RAM \
+       0x1d00000UL
+#define  BAR0_MAP_REG_USDM_RAM \
+       0x1d80000UL
+#define  BAR0_MAP_REG_PSDM_RAM \
+       0x1f00000UL
+#define  BAR0_MAP_REG_TSDM_RAM \
+       0x1c80000UL
+#define  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+       0x5011f4UL
+#define  PRS_REG_SEARCH_TCP \
+       0x1f0400UL
+#define  PRS_REG_SEARCH_UDP \
+       0x1f0404UL
+#define  PRS_REG_SEARCH_FCOE \
+       0x1f0408UL
+#define  PRS_REG_SEARCH_ROCE \
+       0x1f040cUL
+#define  PRS_REG_SEARCH_OPENFLOW       \
+       0x1f0434UL
+#define  TM_REG_PF_ENABLE_CONN \
+       0x2c043cUL
+#define  TM_REG_PF_ENABLE_TASK \
+       0x2c0444UL
+#define  TM_REG_PF_SCAN_ACTIVE_CONN \
+       0x2c04fcUL
+#define  TM_REG_PF_SCAN_ACTIVE_TASK \
+       0x2c0500UL
+#define  IGU_REG_LEADING_EDGE_LATCH \
+       0x18082cUL
+#define  IGU_REG_TRAILING_EDGE_LATCH \
+       0x180830UL
+#define  QM_REG_USG_CNT_PF_TX \
+       0x2f2eacUL
+#define  QM_REG_USG_CNT_PF_OTHER       \
+       0x2f2eb0UL
+#define  DORQ_REG_PF_DB_ENABLE \
+       0x100508UL
+#define  QM_REG_PF_EN \
+       0x2f2ea4UL
+#define  TCFC_REG_STRONG_ENABLE_PF \
+       0x2d0708UL
+#define  CCFC_REG_STRONG_ENABLE_PF \
+       0x2e0708UL
+#define  PGLUE_B_REG_PGL_ADDR_88_F0 \
+       0x2aa404UL
+#define  PGLUE_B_REG_PGL_ADDR_8C_F0 \
+       0x2aa408UL
+#define  PGLUE_B_REG_PGL_ADDR_90_F0 \
+       0x2aa40cUL
+#define  PGLUE_B_REG_PGL_ADDR_94_F0 \
+       0x2aa410UL
+#define  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+       0x2aa138UL
+#define  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+       0x2aa174UL
+#define  MISC_REG_GEN_PURP_CR0 \
+       0x008c80UL
+#define  MCP_REG_SCRATCH       \
+       0xe20000UL
+#define  CNIG_REG_NW_PORT_MODE_BB_B0 \
+       0x218200UL
+#define  MISCS_REG_CHIP_NUM \
+       0x00976cUL
+#define  MISCS_REG_CHIP_REV \
+       0x009770UL
+#define  MISCS_REG_CMT_ENABLED_FOR_PAIR \
+       0x00971cUL
+#define  MISCS_REG_CHIP_TEST_REG       \
+       0x009778UL
+#define  MISCS_REG_CHIP_METAL \
+       0x009774UL
+#define  BRB_REG_HEADER_SIZE \
+       0x340804UL
+#define  BTB_REG_HEADER_SIZE \
+       0xdb0804UL
+#define  CAU_REG_LONG_TIMEOUT_THRESHOLD \
+       0x1c0708UL
+#define  CCFC_REG_ACTIVITY_COUNTER \
+       0x2e8800UL
+#define  CDU_REG_CID_ADDR_PARAMS       \
+       0x580900UL
+#define  DBG_REG_CLIENT_ENABLE \
+       0x010004UL
+#define  DMAE_REG_INIT \
+       0x00c000UL
+#define  DORQ_REG_IFEN \
+       0x100040UL
+#define  GRC_REG_TIMEOUT_EN \
+       0x050404UL
+#define  IGU_REG_BLOCK_CONFIGURATION \
+       0x180040UL
+#define  MCM_REG_INIT \
+       0x1200000UL
+#define  MCP2_REG_DBG_DWORD_ENABLE \
+       0x052404UL
+#define  MISC_REG_PORT_MODE \
+       0x008c00UL
+#define MISC_REG_BLOCK_256B_EN \
+       0x008c14UL
+#define MISCS_REG_RESET_PL_HV \
+       0x009060UL
+#define  MISCS_REG_CLK_100G_MODE       \
+       0x009070UL
+#define MISCS_REG_RESET_PL_HV_2 \
+       0x009150UL
+#define  MSDM_REG_ENABLE_IN1 \
+       0xfc0004UL
+#define  MSEM_REG_ENABLE_IN \
+       0x1800004UL
+#define  NIG_REG_CM_HDR \
+       0x500840UL
+#define  NCSI_REG_CONFIG       \
+       0x040200UL
+#define PSWRQ2_REG_RBC_DONE \
+       0x240000UL
+#define PSWRQ2_REG_CFG_DONE \
+       0x240004UL
+#define  PBF_REG_INIT \
+       0xd80000UL
+#define  PTU_REG_ATC_INIT_ARRAY \
+       0x560000UL
+#define  PCM_REG_INIT \
+       0x1100000UL
+#define  PGLUE_B_REG_ADMIN_PER_PF_REGION       \
+       0x2a9000UL
+#define  PRM_REG_DISABLE_PRM \
+       0x230000UL
+#define  PRS_REG_SOFT_RST \
+       0x1f0000UL
+#define  PSDM_REG_ENABLE_IN1 \
+       0xfa0004UL
+#define  PSEM_REG_ENABLE_IN \
+       0x1600004UL
+#define  PSWRQ_REG_DBG_SELECT \
+       0x280020UL
+#define  PSWRQ2_REG_CDUT_P_SIZE \
+       0x24000cUL
+#define  PSWHST_REG_DISCARD_INTERNAL_WRITES \
+       0x2a0040UL
+#define  PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+       0x29e050UL
+#define  PSWRD_REG_DBG_SELECT \
+       0x29c040UL
+#define  PSWRD2_REG_CONF11 \
+       0x29d064UL
+#define  PSWWR_REG_USDM_FULL_TH \
+       0x29a040UL
+#define  PSWWR2_REG_CDU_FULL_TH2       \
+       0x29b040UL
+#define  QM_REG_MAXPQSIZE_0 \
+       0x2f0434UL
+#define  RSS_REG_RSS_INIT_EN \
+       0x238804UL
+#define  RDIF_REG_STOP_ON_ERROR \
+       0x300040UL
+#define  SRC_REG_SOFT_RST \
+       0x23874cUL
+#define  TCFC_REG_ACTIVITY_COUNTER \
+       0x2d8800UL
+#define  TCM_REG_INIT \
+       0x1180000UL
+#define  TM_REG_PXP_READ_DATA_FIFO_INIT \
+       0x2c0014UL
+#define  TSDM_REG_ENABLE_IN1 \
+       0xfb0004UL
+#define  TSEM_REG_ENABLE_IN \
+       0x1700004UL
+#define  TDIF_REG_STOP_ON_ERROR \
+       0x310040UL
+#define  UCM_REG_INIT \
+       0x1280000UL
+#define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+       0x051004UL
+#define  USDM_REG_ENABLE_IN1 \
+       0xfd0004UL
+#define  USEM_REG_ENABLE_IN \
+       0x1900004UL
+#define  XCM_REG_INIT \
+       0x1000000UL
+#define  XSDM_REG_ENABLE_IN1 \
+       0xf80004UL
+#define  XSEM_REG_ENABLE_IN \
+       0x1400004UL
+#define  YCM_REG_INIT \
+       0x1080000UL
+#define  YSDM_REG_ENABLE_IN1 \
+       0xf90004UL
+#define  YSEM_REG_ENABLE_IN \
+       0x1500004UL
+#define  XYLD_REG_SCBD_STRICT_PRIO \
+       0x4c0000UL
+#define  TMLD_REG_SCBD_STRICT_PRIO \
+       0x4d0000UL
+#define  MULD_REG_SCBD_STRICT_PRIO \
+       0x4e0000UL
+#define  YULD_REG_SCBD_STRICT_PRIO \
+       0x4c8000UL
+#define  MISC_REG_SHARED_MEM_ADDR \
+       0x008c20UL
+#define  DMAE_REG_GO_C0 \
+       0x00c048UL
+#define  DMAE_REG_GO_C1 \
+       0x00c04cUL
+#define  DMAE_REG_GO_C2 \
+       0x00c050UL
+#define  DMAE_REG_GO_C3 \
+       0x00c054UL
+#define  DMAE_REG_GO_C4 \
+       0x00c058UL
+#define  DMAE_REG_GO_C5 \
+       0x00c05cUL
+#define  DMAE_REG_GO_C6 \
+       0x00c060UL
+#define  DMAE_REG_GO_C7 \
+       0x00c064UL
+#define  DMAE_REG_GO_C8 \
+       0x00c068UL
+#define  DMAE_REG_GO_C9 \
+       0x00c06cUL
+#define  DMAE_REG_GO_C10       \
+       0x00c070UL
+#define  DMAE_REG_GO_C11       \
+       0x00c074UL
+#define  DMAE_REG_GO_C12       \
+       0x00c078UL
+#define  DMAE_REG_GO_C13       \
+       0x00c07cUL
+#define  DMAE_REG_GO_C14       \
+       0x00c080UL
+#define  DMAE_REG_GO_C15       \
+       0x00c084UL
+#define  DMAE_REG_GO_C16       \
+       0x00c088UL
+#define  DMAE_REG_GO_C17       \
+       0x00c08cUL
+#define  DMAE_REG_GO_C18       \
+       0x00c090UL
+#define  DMAE_REG_GO_C19       \
+       0x00c094UL
+#define  DMAE_REG_GO_C20       \
+       0x00c098UL
+#define  DMAE_REG_GO_C21       \
+       0x00c09cUL
+#define  DMAE_REG_GO_C22       \
+       0x00c0a0UL
+#define  DMAE_REG_GO_C23       \
+       0x00c0a4UL
+#define  DMAE_REG_GO_C24       \
+       0x00c0a8UL
+#define  DMAE_REG_GO_C25       \
+       0x00c0acUL
+#define  DMAE_REG_GO_C26       \
+       0x00c0b0UL
+#define  DMAE_REG_GO_C27       \
+       0x00c0b4UL
+#define  DMAE_REG_GO_C28       \
+       0x00c0b8UL
+#define  DMAE_REG_GO_C29       \
+       0x00c0bcUL
+#define  DMAE_REG_GO_C30       \
+       0x00c0c0UL
+#define  DMAE_REG_GO_C31       \
+       0x00c0c4UL
+#define  DMAE_REG_CMD_MEM \
+       0x00c800UL
+#define  QM_REG_MAXPQSIZETXSEL_0       \
+       0x2f0440UL
+#define  QM_REG_SDMCMDREADY \
+       0x2f1e10UL
+#define  QM_REG_SDMCMDADDR \
+       0x2f1e04UL
+#define  QM_REG_SDMCMDDATALSB \
+       0x2f1e08UL
+#define  QM_REG_SDMCMDDATAMSB \
+       0x2f1e0cUL
+#define  QM_REG_SDMCMDGO       \
+       0x2f1e14UL
+#define  QM_REG_RLPFCRD \
+       0x2f4d80UL
+#define  QM_REG_RLPFINCVAL \
+       0x2f4c80UL
+#define  QM_REG_RLGLBLCRD \
+       0x2f4400UL
+#define  QM_REG_RLGLBLINCVAL \
+       0x2f3400UL
+#define  IGU_REG_ATTENTION_ENABLE \
+       0x18083cUL
+#define  IGU_REG_ATTN_MSG_ADDR_L       \
+       0x180820UL
+#define  IGU_REG_ATTN_MSG_ADDR_H       \
+       0x180824UL
+#define  MISC_REG_AEU_GENERAL_ATTN_0 \
+       0x008400UL
+#define  CAU_REG_SB_ADDR_MEMORY \
+       0x1c8000UL
+#define  CAU_REG_SB_VAR_MEMORY \
+       0x1c6000UL
+#define  CAU_REG_PI_MEMORY \
+       0x1d0000UL
+#define  IGU_REG_PF_CONFIGURATION \
+       0x180800UL
+#define  MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+       0x00849cUL
+#define  MISC_REG_AEU_MASK_ATTN_IGU \
+       0x008494UL
+#define  IGU_REG_CLEANUP_STATUS_0 \
+       0x180980UL
+#define  IGU_REG_CLEANUP_STATUS_1 \
+       0x180a00UL
+#define  IGU_REG_CLEANUP_STATUS_2 \
+       0x180a80UL
+#define  IGU_REG_CLEANUP_STATUS_3 \
+       0x180b00UL
+#define  IGU_REG_CLEANUP_STATUS_4 \
+       0x180b80UL
+#define  IGU_REG_COMMAND_REG_32LSB_DATA \
+       0x180840UL
+#define  IGU_REG_COMMAND_REG_CTRL \
+       0x180848UL
+#define  IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN     ( \
+               0x1 << 1)
+#define  IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN      ( \
+               0x1 << 0)
+#define  IGU_REG_MAPPING_MEMORY \
+       0x184000UL
+#define  MISCS_REG_GENERIC_POR_0       \
+       0x0096d4UL
+#define  MCP_REG_NVM_CFG4 \
+       0xe0642cUL
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE   ( \
+               0x7 << 0)
+#define  MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+       0
+#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
+#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL
+#define NWM_REG_MAC0 0x800400UL
+#define NWM_REG_MAC0_SIZE 256
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL
+#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL
+#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL
+#define XMAC_REG_MODE 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE 0x210040UL
+#define XMAC_REG_TX_CTRL_LO 0x210020UL
+#define XMAC_REG_CTRL 0x210000UL
+#define XMAC_REG_RX_CTRL 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1<<12)
+#define MISC_REG_CLK_100G_MODE 0x008c10UL
+#define MISC_REG_OPTE_MODE 0x008c0cUL
+#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define PRS_REG_SEARCH_TAG1 0x1f0444UL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL
+#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL
+#define MISCS_REG_ECO_RESERVED 0x0097b4UL
+#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
+#define XMAC_REG_CTRL_TX_EN (0x1<<0)
+#define XMAC_REG_CTRL_RX_EN (0x1<<1)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xff<<24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff<<16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff<<16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xff<<24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff<<0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff<<0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
+#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF 0x1f0430UL
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL
+#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL
+#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL
+#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL
+#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL
+#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL
+#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL
+#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL
+#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL
+#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1
+#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL
+#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL
+#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL
+#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL
+#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL
+#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL
+#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL
+#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL
+#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0
+#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL
+#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL
+#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL
+#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL
+#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL
+#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL
+#define BRB_REG_SHARED_HR_AREA 0x340880UL
+#define BRB_REG_TC_GUARANTIED_0 0x340900UL
+#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL
+#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL
+#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL
+#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL
+#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL
+#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL
+#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL
+#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL
+#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL
+#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL
+#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL
+#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
+#define MCP_REG_CPU_STATE 0xe05004UL
+#define MCP_REG_CPU_MODE 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1<<10)
+#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
+#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
+#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
+#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1<<10)
+#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
+#define TM_REG_INT_STS_1 0x2c0190UL
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1<<6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1<<5)
+#define TM_REG_INT_MASK_1 0x2c0194UL
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1<<5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1<<6)
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
+#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
+#define YSEM_REG_FAST_MEMORY 0x1540000UL
+#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL
+#define TSEM_REG_FAST_MEMORY 0x1740000UL
+#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL
+#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL
+#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL
+#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL
+#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL
+#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX 0x11814c4UL
+#define TCM_REG_SM_CON_CTX 0x11814ccUL
+#define TCM_REG_AGG_TASK_CTX 0x11814c8UL
+#define TCM_REG_SM_TASK_CTX 0x11814d0UL
+#define MSEM_REG_FAST_MEMORY 0x1840000UL
+#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL
+#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL
+#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL
+#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL
+#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL
+#define MCM_REG_CTX_RBC_ACCS 0x1201800UL
+#define MCM_REG_AGG_CON_CTX 0x1201804UL
+#define MCM_REG_SM_CON_CTX 0x120180cUL
+#define MCM_REG_AGG_TASK_CTX 0x1201808UL
+#define MCM_REG_SM_TASK_CTX 0x1201810UL
+#define USEM_REG_FAST_MEMORY 0x1940000UL
+#define USEM_REG_DBG_FRAME_MODE 0x1901408UL
+#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE 0x1901404UL
+#define USEM_REG_DBG_MODE1_CFG 0x1901420UL
+#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL
+#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL
+#define UCM_REG_CTX_RBC_ACCS 0x1281700UL
+#define UCM_REG_AGG_CON_CTX 0x1281704UL
+#define UCM_REG_SM_CON_CTX 0x128170cUL
+#define UCM_REG_AGG_TASK_CTX 0x1281708UL
+#define UCM_REG_SM_TASK_CTX 0x1281710UL
+#define XSEM_REG_FAST_MEMORY 0x1440000UL
+#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL
+#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL
+#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL
+#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL
+#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL
+#define XCM_REG_CTX_RBC_ACCS 0x1001800UL
+#define XCM_REG_AGG_CON_CTX 0x1001804UL
+#define XCM_REG_SM_CON_CTX 0x1001808UL
+#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL
+#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL
+#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL
+#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL
+#define YCM_REG_CTX_RBC_ACCS 0x1081800UL
+#define YCM_REG_AGG_CON_CTX 0x1081804UL
+#define YCM_REG_SM_CON_CTX 0x108180cUL
+#define YCM_REG_AGG_TASK_CTX 0x1081808UL
+#define YCM_REG_SM_TASK_CTX 0x1081810UL
+#define PSEM_REG_FAST_MEMORY 0x1640000UL
+#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL
+#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL
+#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL
+#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL
+#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL
+#define PCM_REG_CTX_RBC_ACCS 0x1101440UL
+#define PCM_REG_SM_CON_CTX 0x1101444UL
+#define GRC_REG_DBG_SELECT 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL
+#define GRC_REG_DBG_SHIFT 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL
+#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL
+#define CNIG_REG_DBG_SELECT_K2 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL
+#define NCSI_REG_DBG_SELECT 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL
+#define NCSI_REG_DBG_SHIFT 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL
+#define BMB_REG_DBG_SELECT 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL
+#define BMB_REG_DBG_SHIFT 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL
+#define PCIE_REG_DBG_SELECT 0x0547e8UL
+#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL
+#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL
+#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL
+#define PCIE_REG_DBG_SHIFT 0x0547f0UL
+#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL
+#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL
+#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL
+#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL
+#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL
+#define MCP2_REG_DBG_SELECT 0x052400UL
+#define MCP2_REG_DBG_SHIFT 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL
+#define PSWHST_REG_DBG_SELECT 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL
+#define PSWHST2_REG_DBG_SELECT 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL
+#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL
+#define PSWWR_REG_DBG_SELECT 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL
+#define PSWRQ2_REG_DBG_SELECT 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL
+#define PGLCS_REG_DBG_SELECT 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL
+#define PTU_REG_DBG_SELECT 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL
+#define PTU_REG_DBG_SHIFT 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME 0x560110UL
+#define DMAE_REG_DBG_SELECT 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL
+#define DMAE_REG_DBG_SHIFT 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL
+#define TCM_REG_DBG_SELECT 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL
+#define TCM_REG_DBG_SHIFT 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL
+#define MCM_REG_DBG_SELECT 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL
+#define MCM_REG_DBG_SHIFT 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL
+#define UCM_REG_DBG_SELECT 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL
+#define UCM_REG_DBG_SHIFT 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL
+#define XCM_REG_DBG_SELECT 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL
+#define XCM_REG_DBG_SHIFT 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL
+#define YCM_REG_DBG_SELECT 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL
+#define YCM_REG_DBG_SHIFT 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL
+#define PCM_REG_DBG_SELECT 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL
+#define PCM_REG_DBG_SHIFT 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL
+#define QM_REG_DBG_SELECT 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL
+#define QM_REG_DBG_SHIFT 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL
+#define TM_REG_DBG_SELECT 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL
+#define TM_REG_DBG_SHIFT 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL
+#define DORQ_REG_DBG_SELECT 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL
+#define BRB_REG_DBG_SELECT 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL
+#define BRB_REG_DBG_SHIFT 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL
+#define SRC_REG_DBG_SELECT 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL
+#define SRC_REG_DBG_SHIFT 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME 0x238710UL
+#define PRS_REG_DBG_SELECT 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL
+#define TSDM_REG_DBG_SELECT 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL
+#define USDM_REG_DBG_SELECT 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL
+#define XSDM_REG_DBG_SELECT 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL
+#define YSDM_REG_DBG_SELECT 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL
+#define PSDM_REG_DBG_SELECT 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL
+#define TSEM_REG_DBG_SELECT 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL
+#define TSEM_REG_DBG_SHIFT 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL
+#define MSEM_REG_DBG_SELECT 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL
+#define MSEM_REG_DBG_SHIFT 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL
+#define USEM_REG_DBG_SELECT 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL
+#define USEM_REG_DBG_SHIFT 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL
+#define XSEM_REG_DBG_SELECT 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL
+#define XSEM_REG_DBG_SHIFT 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL
+#define YSEM_REG_DBG_SELECT 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL
+#define YSEM_REG_DBG_SHIFT 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL
+#define PSEM_REG_DBG_SELECT 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL
+#define PSEM_REG_DBG_SHIFT 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL
+#define RSS_REG_DBG_SELECT 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL
+#define RSS_REG_DBG_SHIFT 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL
+#define TMLD_REG_DBG_SELECT 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL
+#define MULD_REG_DBG_SELECT 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL
+#define MULD_REG_DBG_SHIFT 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL
+#define YULD_REG_DBG_SELECT 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL
+#define YULD_REG_DBG_SHIFT 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL
+#define XYLD_REG_DBG_SELECT 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL
+#define PRM_REG_DBG_SELECT 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL
+#define PRM_REG_DBG_SHIFT 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL
+#define PBF_PB1_REG_DBG_SELECT 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL
+#define RPB_REG_DBG_SELECT 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL
+#define RPB_REG_DBG_SHIFT 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL
+#define BTB_REG_DBG_SELECT 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL
+#define PBF_REG_DBG_SELECT 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL
+#define PBF_REG_DBG_SHIFT 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL
+#define RDIF_REG_DBG_SELECT 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL
+#define RDIF_REG_DBG_SHIFT 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL
+#define TDIF_REG_DBG_SELECT 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL
+#define TDIF_REG_DBG_SHIFT 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL
+#define CDU_REG_DBG_SELECT 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL
+#define CDU_REG_DBG_SHIFT 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME 0x580714UL
+#define CCFC_REG_DBG_SELECT 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL
+#define TCFC_REG_DBG_SELECT 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL
+#define IGU_REG_DBG_SELECT 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL
+#define IGU_REG_DBG_SHIFT 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME 0x181588UL
+#define CAU_REG_DBG_SELECT 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL
+#define UMAC_REG_DBG_SELECT 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL
+#define UMAC_REG_DBG_SHIFT 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL
+#define NIG_REG_DBG_SELECT 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL
+#define NIG_REG_DBG_SHIFT 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME 0x502150UL
+#define WOL_REG_DBG_SELECT 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL
+#define WOL_REG_DBG_SHIFT 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME 0x600150UL
+#define BMBN_REG_DBG_SELECT 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL
+#define BMBN_REG_DBG_SHIFT 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL
+#define NWM_REG_DBG_SELECT 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL
+#define NWM_REG_DBG_SHIFT 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL
+#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL
+#define BRB_REG_BIG_RAM_DATA 0x341500UL
+#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL
+#define BMB_REG_BIG_RAM_DATA 0x540f00UL
+#define MISCS_REG_RESET_PL_UA 0x009050UL
+#define MISC_REG_RESET_PL_UA 0x008050UL
+#define MISC_REG_RESET_PL_HV 0x008060UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL
+#define SEM_FAST_REG_INT_RAM 0x020000UL
+#define DBG_REG_DBG_BLOCK_ON 0x010454UL
+#define DBG_REG_FRAMING_MODE 0x010058UL
+#define SEM_FAST_REG_DEBUG_MODE 0x000744UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_FILTER_CID 0x000754UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL
+#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL
+#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL
+#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL
+#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL
+#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL
+#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL
+#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL
+#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL
+#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL
+#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL
+#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL
+#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL
+#define DBG_REG_INTR_BUFFER 0x014000UL
+#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL
+#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL
+#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL
+#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL
+#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL
+#define SEM_FAST_REG_STALL_0 0x000488UL
+#define SEM_FAST_REG_STALLED 0x000494UL
+#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL
+#define RSS_REG_RSS_RAM_ADDR 0x238c30UL
+#define RSS_REG_RSS_RAM_DATA 0x238c20UL
+#define MISCS_REG_BLOCK_256B_EN 0x009074UL
+#define MCP_REG_CPU_REG_FILE 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE 32
+#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL
+#define DBG_REG_FULL_MODE 0x010060UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL
+#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL
+#define DBG_REG_PCI_FUNC_NUM 0x010a98UL
+#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL
+#define DBG_REG_PCI_REQ_CREDIT 0x010440UL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_OUTPUT_ENABLE 0x01000cUL
+#define DBG_REG_DEBUG_TARGET 0x01005cUL
+#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL
+#define NIG_REG_DEBUG_PORT 0x5020d0UL
+#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL
+#define DBG_REG_ETHERNET_HDR_7 0x010b34UL
+#define DBG_REG_ETHERNET_HDR_6 0x010b30UL
+#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL
+#define DBG_REG_ETHERNET_HDR_4 0x010b28UL
+#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL
+#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL
+#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL
+#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL
+#define DBG_REG_TIMESTAMP_TICK 0x010b50UL
+#define DBG_REG_FILTER_ID_NUM 0x0109d4UL
+#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL
+#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL
+#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL
+#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL
+#define DBG_REG_TRIGGER_ENABLE 0x01054cUL
+#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL
+#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL
+#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL
+#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL
+#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL
+#define DBG_REG_STORM_ID_NUM 0x010b14UL
+#define DBG_REG_CALENDAR_SLOT0 0x010014UL
+#define DBG_REG_HW_ID_NUM 0x010b10UL
+#define DBG_REG_FILTER_ENABLE 0x0109d0UL
+#define DBG_REG_TIMESTAMP 0x010b4cUL
+#define DBG_REG_CPU_TIMEOUT 0x010450UL
+#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL
+#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL
+#define GRC_REG_TRACE_FIFO 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL
+#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL
+#define TSEM_REG_VF_ERROR 0x1700408UL
+#define USEM_REG_VF_ERROR 0x1900408UL
+#define MSEM_REG_VF_ERROR 0x1800408UL
+#define XSEM_REG_VF_ERROR 0x1400408UL
+#define YSEM_REG_VF_ERROR 0x1500408UL
+#define PSEM_REG_VF_ERROR 0x1600408UL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL
+#define IGU_REG_VF_CONFIGURATION 0x180804UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL
+#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL
+#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL
+#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL
+#define BRB_REG_PER_TC_COUNTERS 0x341a00UL
+
+/* added */
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL
+#define PCIE_REG_PRTY_MASK 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL
+#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL
+#define SEM_FAST_REG_INT_RAM_SIZE 20480
+#define MCP_REG_SCRATCH_SIZE 57344
+
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL
-- 
1.8.3.1

Reply via email to