Add code for nic business, including qps structures, qps configuration,
wqs configuration for qps, nic business configuration functionalities.

Signed-off-by: Ziyang Xuan <xuanziya...@huawei.com>
---
 drivers/net/hinic/base/hinic_pmd_nic.h    |   85 ++
 drivers/net/hinic/base/hinic_pmd_niccfg.c | 1408 +++++++++++++++++++++++++++++
 drivers/net/hinic/base/hinic_pmd_niccfg.h |  333 +++++++
 drivers/net/hinic/base/hinic_pmd_nicio.c  |  920 +++++++++++++++++++
 drivers/net/hinic/base/hinic_pmd_nicio.h  |   53 ++
 drivers/net/hinic/base/hinic_pmd_qp.c     |   26 +
 drivers/net/hinic/base/hinic_pmd_qp.h     |   76 ++
 drivers/net/hinic/base/hinic_pmd_wq.c     |  164 ++++
 drivers/net/hinic/base/hinic_pmd_wq.h     |   52 ++
 9 files changed, 3117 insertions(+)
 create mode 100644 drivers/net/hinic/base/hinic_pmd_nic.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_niccfg.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_niccfg.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_nicio.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_nicio.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_qp.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_qp.h
 create mode 100644 drivers/net/hinic/base/hinic_pmd_wq.c
 create mode 100644 drivers/net/hinic/base/hinic_pmd_wq.h

diff --git a/drivers/net/hinic/base/hinic_pmd_nic.h 
b/drivers/net/hinic/base/hinic_pmd_nic.h
new file mode 100644
index 0000000..7bea294
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_nic.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_NIC_H_
+#define _HINIC_PMD_NIC_H_
+
+#define HINIC_FLUSH_QUEUE_TIMEOUT 3000
+
+struct hinic_hwdev;
+struct hinic_wq;
+
+struct hinic_sq {
+       struct hinic_wq         *wq;
+       volatile u16            *cons_idx_addr;
+       void __iomem            *db_addr;
+
+       u16     q_id;
+       u16     owner;
+       u16     sq_depth;
+};
+
+struct hinic_rq {
+       struct hinic_wq         *wq;
+       volatile u16            *pi_virt_addr;
+       dma_addr_t              pi_dma_addr;
+
+       u16                     irq_id;
+       u16                     msix_entry_idx;
+       u16                     q_id;
+       u16                     rq_depth;
+};
+
+struct hinic_qp {
+       struct hinic_sq         sq;
+       struct hinic_rq         rq;
+};
+
+struct vf_data_storage {
+       u8 vf_mac_addr[ETH_ALEN];
+       bool registered;
+       bool pf_set_mac;
+       u16 pf_vlan;
+       u8 pf_qos;
+
+       bool link_forced;
+       bool link_up;           /* only valid if VF link is forced */
+};
+
+struct hinic_nic_io {
+       struct hinic_hwdev      *hwdev;
+
+       u16                     global_qpn;
+       u8                      link_status;
+
+       struct hinic_wq         *sq_wq;
+       struct hinic_wq         *rq_wq;
+
+       u16                     max_qps;
+       u16                     num_qps;
+
+       u16                     num_sqs;
+       u16                     num_rqs;
+
+       u16                     sq_depth;
+       u16                     rq_depth;
+
+       u16                     rq_buf_size;
+       u16                     vhd_mode;
+
+       struct hinic_qp         *qps;
+       /* sq ci mem base addr of the function*/
+       void                    *ci_vaddr_base;
+       dma_addr_t              ci_dma_base;
+
+       struct hinic_event      event;
+       void                    *event_handle;
+
+       u16                     max_vfs;
+       u16                     num_vfs;
+       u8                      vf_link_mode;
+       struct vf_data_storage  *vf_infos;
+};
+
+#endif /* _HINIC_PMD_NIC_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.c 
b/drivers/net/hinic/base/hinic_pmd_niccfg.c
new file mode 100644
index 0000000..6da2172
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.c
@@ -0,0 +1,1408 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+
+#define l2nic_msg_to_mgmt_sync(hwdev, cmd, buf_in,             \
+                              in_size, buf_out, out_size)      \
+       hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC, cmd,     \
+                       buf_in, in_size,                        \
+                       buf_out, out_size, 0)
+
+int hinic_init_function_table(void *hwdev, u16 rx_buf_sz)
+{
+       struct hinic_function_table function_table;
+       u16 out_size = sizeof(function_table);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&function_table, 0, sizeof(function_table));
+       function_table.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       function_table.func_id = hinic_global_func_id(hwdev);
+       function_table.mtu = 0x3FFF;    /* default, max mtu */
+       function_table.rx_wqe_buf_size = rx_buf_sz;
+
+       err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+                                    HINIC_PORT_CMD_INIT_FUNC,
+                                    &function_table, sizeof(function_table),
+                                    &function_table, &out_size, 0);
+       if (err || function_table.mgmt_msg_head.status || !out_size) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to init func table, ret = %d",
+                       function_table.mgmt_msg_head.status);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+/**
+ * hinic_get_base_qpn - get global number of queue
+ * @hwdev: the hardware interface of a nic device
+ * @global_qpn: vat page size
+ * @return
+ *   0 on success,
+ *   negative error value otherwise.
+ **/
+int hinic_get_base_qpn(void *hwdev, u16 *global_qpn)
+{
+       struct hinic_cmd_qpn cmd_qpn;
+       u16 out_size = sizeof(cmd_qpn);
+       int err;
+
+       if (!hwdev || !global_qpn) {
+               PMD_DRV_LOG(ERR, "Hwdev or global_qpn is NULL");
+               return -EINVAL;
+       }
+
+       memset(&cmd_qpn, 0, sizeof(cmd_qpn));
+       cmd_qpn.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       cmd_qpn.func_id = hinic_global_func_id(hwdev);
+
+       err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_L2NIC,
+                                    HINIC_PORT_CMD_GET_GLOBAL_QPN,
+                                    &cmd_qpn, sizeof(cmd_qpn), &cmd_qpn,
+                                    &out_size, 0);
+       if (err || !out_size || cmd_qpn.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get base qpn, status(%d)",
+                       cmd_qpn.mgmt_msg_head.status);
+               return -EINVAL;
+       }
+
+       *global_qpn = cmd_qpn.base_qpn;
+
+       return 0;
+}
+
+/**
+ * hinic_set_mac - Init mac_vlan table in NIC.
+ * @hwdev: the hardware interface of a nic device
+ * @mac_addr: mac address
+ * @vlan_id: set 0 for mac_vlan table initialization
+ * @func_id: global function id of NIC
+ * @return
+ *   0 on success and stats is filled,
+ *   negative error value otherwise.
+ */
+int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_port_mac_set mac_info;
+       u16 out_size = sizeof(mac_info);
+       int err;
+
+       if (!hwdev || !mac_addr) {
+               PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL");
+               return -EINVAL;
+       }
+
+       memset(&mac_info, 0, sizeof(mac_info));
+       mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       mac_info.func_id = func_id;
+       mac_info.vlan_id = vlan_id;
+       memmove(mac_info.mac, mac_addr, ETH_ALEN);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_MAC, &mac_info,
+                                    sizeof(mac_info), &mac_info, &out_size);
+       if (err || !out_size || (mac_info.mgmt_msg_head.status &&
+           mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {
+               PMD_DRV_LOG(ERR, "Failed to set MAC, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, mac_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+       if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {
+               PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore set 
operation.");
+               return HINIC_PF_SET_VF_ALREADY;
+       }
+
+       return 0;
+}
+
+/**
+ * hinic_del_mac - Uninit mac_vlan table in NIC.
+ * @hwdev: the hardware interface of a nic device
+ * @mac_addr: mac address
+ * @vlan_id: set 0 for mac_vlan table initialization
+ * @func_id: global function id of NIC
+ * @return
+ *   0 on success and stats is filled,
+ *   negative error value otherwise.
+ */
+int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id,
+                 u16 func_id)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_port_mac_set mac_info;
+       u16 out_size = sizeof(mac_info);
+       int err;
+
+       if (!hwdev || !mac_addr) {
+               PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL");
+               return -EINVAL;
+       }
+
+       if (vlan_id >= VLAN_N_VID) {
+               PMD_DRV_LOG(ERR, "Invalid VLAN number");
+               return -EINVAL;
+       }
+
+       memset(&mac_info, 0, sizeof(mac_info));
+       mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       mac_info.func_id = func_id;
+       mac_info.vlan_id = vlan_id;
+       memmove(mac_info.mac, mac_addr, ETH_ALEN);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_DEL_MAC, &mac_info,
+                                    sizeof(mac_info), &mac_info, &out_size);
+       if (err || !out_size || (mac_info.mgmt_msg_head.status &&
+               mac_info.mgmt_msg_head.status != HINIC_PF_SET_VF_ALREADY)) {
+               PMD_DRV_LOG(ERR, "Failed to delete MAC, err: %d, status: 0x%x, 
out size: 0x%x",
+                       err, mac_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+       if (mac_info.mgmt_msg_head.status == HINIC_PF_SET_VF_ALREADY) {
+               PMD_DRV_LOG(WARNING, "PF has already set vf mac, Ignore delete 
operation.");
+               return HINIC_PF_SET_VF_ALREADY;
+       }
+
+       return 0;
+}
+
+int hinic_get_default_mac(void *hwdev, u8 *mac_addr)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_port_mac_set mac_info;
+       u16 out_size = sizeof(mac_info);
+       int err;
+
+       if (!hwdev || !mac_addr) {
+               PMD_DRV_LOG(ERR, "Hwdev or mac_addr is NULL");
+               return -EINVAL;
+       }
+
+       memset(&mac_info, 0, sizeof(mac_info));
+       mac_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       mac_info.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MAC,
+                                    &mac_info, sizeof(mac_info),
+                                    &mac_info, &out_size);
+       if (err || !out_size || mac_info.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to get mac, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, mac_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       memmove(mac_addr, mac_info.mac, ETH_ALEN);
+
+       return 0;
+}
+
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_mtu mtu_info;
+       u16 out_size = sizeof(mtu_info);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&mtu_info, 0, sizeof(mtu_info));
+       mtu_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       mtu_info.func_id = hinic_global_func_id(hwdev);
+       mtu_info.mtu = new_mtu;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
+                                    &mtu_info, sizeof(mtu_info),
+                                    &mtu_info, &out_size);
+       if (err || !out_size || mtu_info.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set mtu, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, mtu_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_get_link_status(void *hwdev, u8 *link_state)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_get_link get_link;
+       u16 out_size = sizeof(get_link);
+       int err;
+
+       if (!hwdev || !link_state) {
+               PMD_DRV_LOG(ERR, "Hwdev or link_state is NULL");
+               return -EINVAL;
+       }
+
+       memset(&get_link, 0, sizeof(get_link));
+       get_link.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       get_link.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
+                                    &get_link, sizeof(get_link),
+                                    &get_link, &out_size);
+       if (err || !out_size || get_link.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to get link state, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, get_link.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       *link_state = get_link.link_status;
+
+       return 0;
+}
+
+/**
+ * hinic_set_vport_enable - Notify firmware that driver is ready or not.
+ * @hwdev: the hardware interface of a nic device
+ * @enable: 1: driver is ready; 0: driver is not ok.
+ * Return: 0 on success and state is filled, negative error value otherwise.
+ **/
+int hinic_set_vport_enable(void *hwdev, bool enable)
+{
+       struct hinic_hwdev *hardware_dev = (struct hinic_hwdev *)hwdev;
+       struct hinic_vport_state en_state;
+       u16 out_size = sizeof(en_state);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&en_state, 0, sizeof(en_state));
+       en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       en_state.func_id = hinic_global_func_id(hwdev);
+       en_state.state = (enable ? 1 : 0);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VPORT_ENABLE,
+                                    &en_state, sizeof(en_state),
+                                    &en_state, &out_size);
+       if (err || !out_size || en_state.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set vport state, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, en_state.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * hinic_set_port_enable - open MAG to receive packets.
+ * @hwdev: the hardware interface of a nic device
+ * @enable: 1: open MAG; 0: close MAG.
+ * @return
+ *   0 on success and stats is filled,
+ *   negative error value otherwise.
+ */
+int hinic_set_port_enable(void *hwdev, bool enable)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_port_state en_state;
+       u16 out_size = sizeof(en_state);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&en_state, 0, sizeof(en_state));
+       en_state.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       en_state.state = (enable ? HINIC_PORT_ENABLE : HINIC_PORT_DISABLE);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PORT_ENABLE,
+                                    &en_state, sizeof(en_state),
+                                    &en_state, &out_size);
+       if (err || !out_size || en_state.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set phy port state, err: %d, 
status: 0x%x, out size: 0x%x",
+                       err, en_state.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_port_info port_msg;
+       u16 out_size = sizeof(port_msg);
+       int err;
+
+       if (!hwdev || !port_info) {
+               PMD_DRV_LOG(ERR, "Hwdev or port_info is NULL");
+               return -EINVAL;
+       }
+
+       memset(&port_msg, 0, sizeof(port_msg));
+       port_msg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       port_msg.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_INFO,
+                                    &port_msg, sizeof(port_msg),
+                                    &port_msg, &out_size);
+       if (err || !out_size || port_msg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get port info, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, port_msg.mgmt_msg_head.status, out_size);
+               return err;
+       }
+
+       port_info->autoneg_cap = port_msg.autoneg_cap;
+       port_info->autoneg_state = port_msg.autoneg_state;
+       port_info->duplex = port_msg.duplex;
+       port_info->port_type = port_msg.port_type;
+       port_info->speed = port_msg.speed;
+
+       return 0;
+}
+
+int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_pause_config pause_info;
+       u16 out_size = sizeof(pause_info);
+       int err;
+
+       if (!nic_hwdev) {
+               PMD_DRV_LOG(ERR, "Nic_hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&pause_info, 0, sizeof(pause_info));
+       pause_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       pause_info.func_id = hinic_global_func_id(hwdev);
+       pause_info.auto_neg = nic_pause.auto_neg;
+       pause_info.rx_pause = nic_pause.rx_pause;
+       pause_info.tx_pause = nic_pause.tx_pause;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
+                                    &pause_info, sizeof(pause_info),
+                                    &pause_info, &out_size);
+       if (err || !out_size || pause_info.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set pause info, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, pause_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw,
+                     u8 *pgid, u8 *up_bw, u8 *prio)
+{
+       struct hinic_up_ets_cfg ets;
+       u16 out_size = sizeof(ets);
+       u16 up_bw_t = 0;
+       u8 pg_bw_t = 0;
+       int i, err;
+
+       if (!hwdev || !up_tc || !pg_bw || !pgid || !up_bw || !prio) {
+               PMD_DRV_LOG(ERR, "Hwdev, up_tc, pg_bw, pgid, up_bw or prio is 
NULL");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < HINIC_DCB_TC_MAX; i++) {
+               up_bw_t += *(up_bw + i);
+               pg_bw_t += *(pg_bw + i);
+
+               if (*(up_tc + i) > HINIC_DCB_TC_MAX) {
+                       PMD_DRV_LOG(ERR,
+                               "Invalid up %d mapping tc: %d", i,
+                               *(up_tc + i));
+                       return -EINVAL;
+               }
+       }
+
+       if (pg_bw_t != 100 || (up_bw_t % 100) != 0) {
+               PMD_DRV_LOG(ERR,
+                       "Invalid pg_bw: %d or up_bw: %d", pg_bw_t, up_bw_t);
+               return -EINVAL;
+       }
+
+       memset(&ets, 0, sizeof(ets));
+       ets.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       ets.port_id = 0;    /* reserved */
+       memcpy(ets.up_tc, up_tc, HINIC_DCB_TC_MAX);
+       memcpy(ets.pg_bw, pg_bw, HINIC_DCB_UP_MAX);
+       memcpy(ets.pgid, pgid, HINIC_DCB_UP_MAX);
+       memcpy(ets.up_bw, up_bw, HINIC_DCB_UP_MAX);
+       memcpy(ets.prio, prio, HINIC_DCB_UP_MAX);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ETS,
+                                    &ets, sizeof(ets), &ets, &out_size);
+       if (err || ets.mgmt_msg_head.status || !out_size) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to set ets, err: %d, status: 0x%x, out size: 
0x%x",
+                       err, ets.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats)
+{
+       struct hinic_port_stats_info vport_stats_cmd;
+       struct hinic_cmd_vport_stats vport_stats_rsp;
+       u16 out_size = sizeof(vport_stats_rsp);
+       int err;
+
+       if (!hwdev || !stats) {
+               PMD_DRV_LOG(ERR, "Hwdev or stats is NULL");
+               return -EINVAL;
+       }
+
+       memset(&vport_stats_rsp, 0, sizeof(vport_stats_rsp));
+       memset(&vport_stats_cmd, 0, sizeof(vport_stats_cmd));
+       vport_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       vport_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION;
+       vport_stats_cmd.func_id = hinic_global_func_id(hwdev);
+       vport_stats_cmd.stats_size = sizeof(vport_stats_rsp);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_VPORT_STAT,
+                                    &vport_stats_cmd, sizeof(vport_stats_cmd),
+                                    &vport_stats_rsp, &out_size);
+       if (err || !out_size || vport_stats_rsp.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Get vport stats from fw failed, err: %d, status: 0x%x, 
out size: 0x%x",
+                       err, vport_stats_rsp.mgmt_msg_head.status, out_size);
+               return -EFAULT;
+       }
+
+       memcpy(stats, &vport_stats_rsp.stats, sizeof(*stats));
+
+       return 0;
+}
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats)
+{
+       struct hinic_port_stats_info port_stats_cmd;
+       struct hinic_port_stats port_stats_rsp;
+       u16 out_size = sizeof(port_stats_rsp);
+       int err;
+
+       if (!hwdev || !stats) {
+               PMD_DRV_LOG(ERR, "Hwdev or stats is NULL");
+               return -EINVAL;
+       }
+
+       memset(&port_stats_rsp, 0, sizeof(port_stats_rsp));
+       memset(&port_stats_cmd, 0, sizeof(port_stats_cmd));
+       port_stats_cmd.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       port_stats_cmd.stats_version = HINIC_PORT_STATS_VERSION;
+       port_stats_cmd.stats_size = sizeof(port_stats_rsp);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_PORT_STATISTICS,
+                                    &port_stats_cmd, sizeof(port_stats_cmd),
+                                    &port_stats_rsp, &out_size);
+       if (err || !out_size || port_stats_rsp.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get port statistics, err: %d, status: 0x%x, 
out size: 0x%x",
+                       err, port_stats_rsp.mgmt_msg_head.status, out_size);
+               return -EFAULT;
+       }
+
+       memcpy(stats, &port_stats_rsp.stats, sizeof(*stats));
+
+       return 0;
+}
+
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type rss_type)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct nic_rss_context_tbl *ctx_tbl;
+       struct hinic_cmd_buf *cmd_buf;
+       u32 ctx = 0;
+       u64 out_param;
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       cmd_buf = hinic_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+               return -ENOMEM;
+       }
+
+       ctx |= HINIC_RSS_TYPE_SET(1, VALID) |
+               HINIC_RSS_TYPE_SET(rss_type.ipv4, IPV4) |
+               HINIC_RSS_TYPE_SET(rss_type.ipv6, IPV6) |
+               HINIC_RSS_TYPE_SET(rss_type.ipv6_ext, IPV6_EXT) |
+               HINIC_RSS_TYPE_SET(rss_type.tcp_ipv4, TCP_IPV4) |
+               HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6, TCP_IPV6) |
+               HINIC_RSS_TYPE_SET(rss_type.tcp_ipv6_ext, TCP_IPV6_EXT) |
+               HINIC_RSS_TYPE_SET(rss_type.udp_ipv4, UDP_IPV4) |
+               HINIC_RSS_TYPE_SET(rss_type.udp_ipv6, UDP_IPV6);
+
+       cmd_buf->size = sizeof(struct nic_rss_context_tbl);
+
+       ctx_tbl = (struct nic_rss_context_tbl *)cmd_buf->buf;
+       ctx_tbl->group_index = cpu_to_be32(tmpl_idx);
+       ctx_tbl->offset = 0;
+       ctx_tbl->size = sizeof(u32);
+       ctx_tbl->size = cpu_to_be32(ctx_tbl->size);
+       ctx_tbl->rsvd = 0;
+       ctx_tbl->ctx = cpu_to_be32(ctx);
+
+       /* cfg the rss context table by command queue */
+       err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+                                    HINIC_MOD_L2NIC,
+                                    HINIC_UCODE_CMD_SET_RSS_CONTEXT_TABLE,
+                                    cmd_buf, &out_param, 0);
+
+       hinic_free_cmd_buf(hwdev, cmd_buf);
+
+       if (err || out_param != 0) {
+               PMD_DRV_LOG(ERR, "Failed to set rss context table");
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx, struct nic_rss_type 
*rss_type)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_context_table ctx_tbl;
+       u16 out_size = sizeof(ctx_tbl);
+       int err;
+
+       if (!hwdev || !rss_type) {
+               PMD_DRV_LOG(ERR, "Hwdev or rss_type is NULL");
+               return -EINVAL;
+       }
+
+       ctx_tbl.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       ctx_tbl.func_id = hinic_global_func_id(hwdev);
+       ctx_tbl.template_id = (u8)tmpl_idx;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_CTX_TBL,
+                                    &ctx_tbl, sizeof(ctx_tbl),
+                                    &ctx_tbl, &out_size);
+       if (err || !out_size || ctx_tbl.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get hash type, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, ctx_tbl.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       rss_type->ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV4);
+       rss_type->ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6);
+       rss_type->ipv6_ext = HINIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT);
+       rss_type->tcp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4);
+       rss_type->tcp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6);
+       rss_type->tcp_ipv6_ext =
+                       HINIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6_EXT);
+       rss_type->udp_ipv4 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4);
+       rss_type->udp_ipv6 = HINIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6);
+
+       return 0;
+}
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_template_key temp_key;
+       u16 out_size = sizeof(temp_key);
+       int err;
+
+       if (!hwdev || !temp) {
+               PMD_DRV_LOG(ERR, "Hwdev or temp is NULL");
+               return -EINVAL;
+       }
+
+       memset(&temp_key, 0, sizeof(temp_key));
+       temp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       temp_key.func_id = hinic_global_func_id(hwdev);
+       temp_key.template_id = (u8)tmpl_idx;
+       memcpy(temp_key.key, temp, HINIC_RSS_KEY_SIZE);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL,
+                                    &temp_key, sizeof(temp_key),
+                                    &temp_key, &out_size);
+       if (err || !out_size || temp_key.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to set hash key, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, temp_key.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_template_key temp_key;
+       u16 out_size = sizeof(temp_key);
+       int err;
+
+       if (!hwdev || !temp) {
+               PMD_DRV_LOG(ERR, "Hwdev or temp is NULL");
+               return -EINVAL;
+       }
+
+       memset(&temp_key, 0, sizeof(temp_key));
+       temp_key.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       temp_key.func_id = hinic_global_func_id(hwdev);
+       temp_key.template_id = (u8)tmpl_idx;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL,
+                                    &temp_key, sizeof(temp_key),
+                                    &temp_key, &out_size);
+       if (err || !out_size || temp_key.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to get hash key, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, temp_key.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       memcpy(temp, temp_key.key, HINIC_RSS_KEY_SIZE);
+
+       return 0;
+}
+
+/**
+ * hinic_rss_set_hash_engine - Init rss hash function .
+ * @hwdev: the hardware interface of a nic device
+ * @tmpl_idx: index of rss template from NIC.
+ * @type: hash function, such as Toeplitz or XOR.
+ * @return
+ *   0 on success and stats is filled,
+ *   negative error value otherwise.
+ */
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_engine_type hash_type;
+       u16 out_size = sizeof(hash_type);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&hash_type, 0, sizeof(hash_type));
+       hash_type.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       hash_type.func_id = hinic_global_func_id(hwdev);
+       hash_type.hash_engine = type;
+       hash_type.template_id = tmpl_idx;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RSS_HASH_ENGINE,
+                                    &hash_type, sizeof(hash_type),
+                                    &hash_type, &out_size);
+       if (err || !out_size || hash_type.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to get hash engine, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, hash_type.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct nic_rss_indirect_tbl *indir_tbl;
+       struct hinic_cmd_buf *cmd_buf;
+       int i;
+       u32 *temp;
+       u32 indir_size;
+       u64 out_param;
+       int err;
+
+       if (!hwdev || !indir_table) {
+               PMD_DRV_LOG(ERR, "Hwdev or indir_table is NULL");
+               return -EINVAL;
+       }
+
+       cmd_buf = hinic_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+               return -ENOMEM;
+       }
+
+       cmd_buf->size = sizeof(struct nic_rss_indirect_tbl);
+       indir_tbl = (struct nic_rss_indirect_tbl *)cmd_buf->buf;
+       indir_tbl->group_index = cpu_to_be32(tmpl_idx);
+
+       for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++) {
+               indir_tbl->entry[i] = (u8)(*(indir_table + i));
+
+               if (0x3 == (i & 0x3)) {
+                       temp = (u32 *)&indir_tbl->entry[i - 3];
+                       *temp = cpu_to_be32(*temp);
+               }
+       }
+
+       /* configure the rss indirect table by command queue */
+       indir_size = HINIC_RSS_INDIR_SIZE / 2;
+       indir_tbl->offset = 0;
+       indir_tbl->size = cpu_to_be32(indir_size);
+
+       err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+                                    HINIC_MOD_L2NIC,
+                                    HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+                                    cmd_buf, &out_param, 0);
+       if (err || out_param != 0) {
+               PMD_DRV_LOG(ERR, "Failed to set rss indir table");
+               err = -EFAULT;
+               goto free_buf;
+       }
+
+       indir_tbl->offset = cpu_to_be32(indir_size);
+       indir_tbl->size = cpu_to_be32(indir_size);
+       memcpy(indir_tbl->entry, &indir_tbl->entry[indir_size], indir_size);
+
+       err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+                                    HINIC_MOD_L2NIC,
+                                    HINIC_UCODE_CMD_SET_RSS_INDIR_TABLE,
+                                    cmd_buf, &out_param, 0);
+       if (err || out_param != 0) {
+               PMD_DRV_LOG(ERR, "Failed to set rss indir table");
+               err = -EFAULT;
+       }
+
+free_buf:
+       hinic_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_indir_table rss_cfg;
+       u16 out_size = sizeof(rss_cfg);
+       int err = 0, i;
+
+       if (!hwdev || !indir_table) {
+               PMD_DRV_LOG(ERR, "Hwdev or indir_table is NULL");
+               return -EINVAL;
+       }
+
+       memset(&rss_cfg, 0, sizeof(rss_cfg));
+       rss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       rss_cfg.func_id = hinic_global_func_id(hwdev);
+       rss_cfg.template_id = (u8)tmpl_idx;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev,
+                                    HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL,
+                                    &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+                                    &out_size);
+       if (err || !out_size || rss_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to get indir table, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, rss_cfg.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       hinic_be32_to_cpu(rss_cfg.indir, HINIC_RSS_INDIR_SIZE);
+       for (i = 0; i < HINIC_RSS_INDIR_SIZE; i++)
+               indir_table[i] = rss_cfg.indir[i];
+
+       return 0;
+}
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_config rss_cfg;
+       u16 out_size = sizeof(rss_cfg);
+       int err;
+
+       /* micro code required: number of TC should be power of 2 */
+       if (!hwdev || !prio_tc || (tc_num & (tc_num - 1))) {
+               PMD_DRV_LOG(ERR, "Hwdev or prio_tc is NULL, or tc_num: %u Not 
power of 2",
+                       tc_num);
+               return -EINVAL;
+       }
+
+       memset(&rss_cfg, 0, sizeof(rss_cfg));
+       rss_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       rss_cfg.func_id = hinic_global_func_id(hwdev);
+       rss_cfg.rss_en = rss_en;
+       rss_cfg.template_id = tmpl_idx;
+       rss_cfg.rq_priority_number = tc_num ? (u8)ilog2(tc_num) : 0;
+
+       memcpy(rss_cfg.prio_tc, prio_tc, HINIC_DCB_UP_MAX);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_CFG,
+                                    &rss_cfg, sizeof(rss_cfg), &rss_cfg,
+                                    &out_size);
+       if (err || !out_size || rss_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set rss cfg, err: %d, status: 0x%x, 
out size: 0x%x",
+                       err, rss_cfg.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * hinic_rss_template_alloc - get rss template id from the chip,
+ *                           all functions share 96 templates.
+ * @hwdev: the pointer to the private hardware device object
+ * @tmpl_idx: index of rss template from chip.
+ * Return: 0 on success and stats is filled, negative error value otherwise.
+ **/
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_template_mgmt template_mgmt;
+       u16 out_size = sizeof(template_mgmt);
+       int err;
+
+       if (!hwdev || !tmpl_idx) {
+               PMD_DRV_LOG(ERR, "Hwdev or tmpl_idx is NULL");
+               return -EINVAL;
+       }
+
+       memset(&template_mgmt, 0, sizeof(template_mgmt));
+       template_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       template_mgmt.func_id = hinic_global_func_id(hwdev);
+       template_mgmt.cmd = NIC_RSS_CMD_TEMP_ALLOC;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+                                    &template_mgmt, sizeof(template_mgmt),
+                                    &template_mgmt, &out_size);
+       if (err || !out_size || template_mgmt.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to alloc rss template, err: %d, 
status: 0x%x, out size: 0x%x",
+                       err, template_mgmt.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       *tmpl_idx = template_mgmt.template_id;
+
+       return 0;
+}
+
+/**
+ * hinic_rss_template_alloc - free rss template id to the chip
+ * @hwdev: the hardware interface of a nic device
+ * @tmpl_idx: index of rss template from NIC.
+ * Return: 0 on success and stats is filled, negative error value otherwise.
+ **/
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rss_template_mgmt template_mgmt;
+       u16 out_size = sizeof(template_mgmt);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&template_mgmt, 0, sizeof(template_mgmt));
+       template_mgmt.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       template_mgmt.func_id = hinic_global_func_id(hwdev);
+       template_mgmt.template_id = tmpl_idx;
+       template_mgmt.cmd = NIC_RSS_CMD_TEMP_FREE;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RSS_TEMP_MGR,
+                                    &template_mgmt, sizeof(template_mgmt),
+                                    &template_mgmt, &out_size);
+       if (err || !out_size || template_mgmt.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to free rss template, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, template_mgmt.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * hinic_set_rx_vhd_mode - change rx buffer size after initialization,
+ * @hwdev: the hardware interface of a nic device
+ * @mode: not needed.
+ * @rx_buf_sz: receive buffer size.
+ * @return
+ *   0 on success and stats is filled,
+ *   negative error value otherwise.
+ */
+int hinic_set_rx_vhd_mode(void *hwdev, u16 mode, u16 rx_buf_sz)
+{
+       struct hinic_set_vhd_mode vhd_mode_cfg;
+       u16 out_size = sizeof(vhd_mode_cfg);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&vhd_mode_cfg, 0, sizeof(vhd_mode_cfg));
+
+       vhd_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       vhd_mode_cfg.func_id = hinic_global_func_id(hwdev);
+       vhd_mode_cfg.vhd_type = mode;
+       vhd_mode_cfg.rx_wqe_buffer_size = rx_buf_sz;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_VHD_CFG,
+                                    &vhd_mode_cfg, sizeof(vhd_mode_cfg),
+                                    &vhd_mode_cfg, &out_size);
+       if (err || !out_size || vhd_mode_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to set vhd mode, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, vhd_mode_cfg.mgmt_msg_head.status, out_size);
+
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int hinic_set_rx_mode(void *hwdev, u32 enable)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_rx_mode_config rx_mode_cfg;
+       u16 out_size = sizeof(rx_mode_cfg);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&rx_mode_cfg, 0, sizeof(rx_mode_cfg));
+       rx_mode_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       rx_mode_cfg.func_id = hinic_global_func_id(hwdev);
+       rx_mode_cfg.rx_mode = enable;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
+                                    &rx_mode_cfg, sizeof(rx_mode_cfg),
+                                    &rx_mode_cfg, &out_size);
+       if (err || !out_size || rx_mode_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set rx mode, err: %d, status: 0x%x, 
out size: 0x%x",
+                       err, rx_mode_cfg.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_checksum_offload rx_csum_cfg;
+       u16 out_size = sizeof(rx_csum_cfg);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&rx_csum_cfg, 0, sizeof(rx_csum_cfg));
+       rx_csum_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       rx_csum_cfg.func_id = hinic_global_func_id(hwdev);
+       rx_csum_cfg.rx_csum_offload = en;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+                                    &rx_csum_cfg, sizeof(rx_csum_cfg),
+                                    &rx_csum_cfg, &out_size);
+       if (err || !out_size || rx_csum_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to set rx csum offload, err: %d, status: 0x%x, 
out size: 0x%x",
+                       err, rx_csum_cfg.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_set_tx_tso(void *hwdev, u8 tso_en)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_tso_config tso_cfg;
+       u16 out_size = sizeof(tso_cfg);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&tso_cfg, 0, sizeof(tso_cfg));
+       tso_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       tso_cfg.func_id = hinic_global_func_id(hwdev);
+       tso_cfg.tso_en = tso_en;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_TSO,
+                                    &tso_cfg, sizeof(tso_cfg), &tso_cfg,
+                                    &out_size);
+       if (err || !out_size || tso_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set tso, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, tso_cfg.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_lro_config lro_cfg;
+       u16 out_size = sizeof(lro_cfg);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&lro_cfg, 0, sizeof(lro_cfg));
+       lro_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       lro_cfg.func_id = hinic_global_func_id(hwdev);
+       lro_cfg.lro_ipv4_en = ipv4_en;
+       lro_cfg.lro_ipv6_en = ipv6_en;
+       lro_cfg.lro_max_wqe_num = max_wqe_num;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LRO,
+                                    &lro_cfg, sizeof(lro_cfg), &lro_cfg,
+                                    &out_size);
+       if (err || !out_size || lro_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to set lro offload, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, lro_cfg.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_set_anti_attack(void *hwdev, bool enable)
+{
+       struct hinic_hwdev *nic_hwdev = (struct hinic_hwdev *)hwdev;
+       struct hinic_port_anti_attack_rate rate;
+       u16 out_size = sizeof(rate);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&rate, 0, sizeof(rate));
+       rate.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       rate.func_id = hinic_global_func_id(hwdev);
+       rate.enable = enable;
+       rate.cir = ANTI_ATTACK_DEFAULT_CIR;
+       rate.xir = ANTI_ATTACK_DEFAULT_XIR;
+       rate.cbs = ANTI_ATTACK_DEFAULT_CBS;
+       rate.xbs = ANTI_ATTACK_DEFAULT_XBS;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE,
+                                    &rate, sizeof(rate), &rate,
+                                    &out_size);
+       if (err || !out_size || rate.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "can't %s port Anti-Attack rate limit, err: 
%d, status: 0x%x, out size: 0x%x",
+                       (enable ? "enable" : "disable"), err,
+                       rate.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Set autoneg status and restart port link status */
+int hinic_reset_port_link_cfg(void *hwdev)
+{
+       struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+       struct hinic_reset_link_cfg reset_cfg;
+       u16 out_size = sizeof(reset_cfg);
+       int err;
+
+       memset(&reset_cfg, 0, sizeof(reset_cfg));
+       reset_cfg.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       reset_cfg.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_RESET_LINK_CFG,
+                                    &reset_cfg, sizeof(reset_cfg),
+                                    &reset_cfg, &out_size);
+       if (err || !out_size || reset_cfg.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Reset port link configure failed, err: %d, 
status: 0x%x, out size: 0x%x",
+                       err, reset_cfg.mgmt_msg_head.status, out_size);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int hinic_set_fast_recycle_mode(void *hwdev, u8 mode)
+{
+       struct hinic_fast_recycled_mode fast_recycled_mode;
+       u16 out_size = sizeof(fast_recycled_mode);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return -EINVAL;
+       }
+
+       memset(&fast_recycled_mode, 0, sizeof(fast_recycled_mode));
+       fast_recycled_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       fast_recycled_mode.func_id = hinic_global_func_id(hwdev);
+       fast_recycled_mode.fast_recycled_mode = mode;
+
+       err = hinic_msg_to_mgmt_sync(hwdev, HINIC_MOD_COMM,
+                                    HINIC_MGMT_CMD_FAST_RECYCLE_MODE_SET,
+                                    &fast_recycled_mode,
+                                    sizeof(fast_recycled_mode),
+                                    &fast_recycled_mode, &out_size, 0);
+       if (err || fast_recycled_mode.mgmt_msg_head.status || !out_size) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to set recycle mode, ret = %d",
+                       fast_recycled_mode.mgmt_msg_head.status);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int hinic_config_rx_mode(void *nic_dev, u32 rx_mode_ctrl)
+{
+       hinic_nic_dev *hinic_dev;
+       struct hinic_hwdev *nic_hwdev;
+       int err;
+
+       if (!nic_dev) {
+               PMD_DRV_LOG(ERR, "nic_dev is NULL");
+               return -EINVAL;
+       }
+
+       hinic_dev = (hinic_nic_dev *)nic_dev;
+       nic_hwdev = (struct hinic_hwdev *)hinic_dev->hwdev;
+       err = hinic_set_rx_mode(nic_hwdev, rx_mode_ctrl);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to set rx mode");
+               return -EINVAL;
+       }
+
+       hinic_dev->rx_mode_status = rx_mode_ctrl;
+
+       return 0;
+}
+
+void hinic_clear_vport_stats(struct hinic_hwdev *hwdev)
+{
+       struct hinic_clear_vport_stats clear_vport_stats;
+       u16 out_size = sizeof(clear_vport_stats);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return;
+       }
+
+       memset(&clear_vport_stats, 0, sizeof(clear_vport_stats));
+       clear_vport_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       clear_vport_stats.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAN_VPORT_STAT,
+                                    &clear_vport_stats,
+                                    sizeof(clear_vport_stats),
+                                    &clear_vport_stats, &out_size);
+       if (err || !out_size || clear_vport_stats.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to clear vport statistics, err: %d, 
status: 0x%x, out size: 0x%x",
+                       err, clear_vport_stats.mgmt_msg_head.status, out_size);
+       }
+}
+
+void hinic_clear_phy_port_stats(struct hinic_hwdev *hwdev)
+{
+       struct hinic_clear_port_stats clear_phy_port_stats;
+       u16 out_size = sizeof(clear_phy_port_stats);
+       int err;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "Hwdev is NULL");
+               return;
+       }
+
+       memset(&clear_phy_port_stats, 0, sizeof(clear_phy_port_stats));
+       clear_phy_port_stats.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       clear_phy_port_stats.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev,
+                                    HINIC_PORT_CMD_CLEAR_PORT_STATISTICS,
+                                    &clear_phy_port_stats,
+                                    sizeof(clear_phy_port_stats),
+                                    &clear_phy_port_stats, &out_size);
+       if (err || !out_size || clear_phy_port_stats.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to clear phy port statistics, err: %d, 
status: 0x%x, out size: 0x%x",
+                       err, clear_phy_port_stats.mgmt_msg_head.status,
+                       out_size);
+       }
+}
+
+int hinic_set_link_status_follow(void *hwdev,
+                                enum hinic_link_follow_status status)
+{
+       struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+       struct hinic_set_link_follow follow;
+       u16 out_size = sizeof(follow);
+       int err;
+
+       if (!hwdev)
+               return -EINVAL;
+
+       if (status >= HINIC_LINK_FOLLOW_STATUS_MAX) {
+               PMD_DRV_LOG(ERR,
+                       "Invalid link follow status: %d", status);
+               return -EINVAL;
+       }
+
+       memset(&follow, 0, sizeof(follow));
+       follow.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       follow.func_id = hinic_global_func_id(hwdev);
+       follow.follow_status = status;
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_SET_LINK_FOLLOW,
+                                    &follow, sizeof(follow),
+                                    &follow, &out_size);
+       if ((follow.mgmt_msg_head.status != HINIC_MGMT_CMD_UNSUPPORTED &&
+            follow.mgmt_msg_head.status) || err || !out_size) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to set link status follow phy port status, err: 
%d, status: 0x%x, out size: 0x%x",
+                       err, follow.mgmt_msg_head.status, out_size);
+               return -EFAULT;
+       }
+
+       return follow.mgmt_msg_head.status;
+}
+
+int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised)
+{
+       struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+       struct hinic_link_mode_cmd link_mode;
+       u16 out_size = sizeof(link_mode);
+       int err;
+
+       if (!hwdev || !supported || !advertised)
+               return -EINVAL;
+
+       memset(&link_mode, 0, sizeof(link_mode));
+       link_mode.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       link_mode.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
+                                    &link_mode, sizeof(link_mode),
+                                    &link_mode, &out_size);
+       if (err || !out_size || link_mode.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get link mode, err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, link_mode.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       *supported = link_mode.supported;
+       *advertised = link_mode.advertised;
+
+       return 0;
+}
+
+/**
+ * hinic_flush_qp_res - Flush tx && rx chip resources in case of set vport fake
+ * failed when device start.
+ * @hwdev: the hardware interface of a nic device
+ * Return: 0 on success, negative error value otherwise.
+ **/
+int hinic_flush_qp_res(void *hwdev)
+{
+       struct hinic_hwdev *dev = (struct hinic_hwdev *)hwdev;
+       struct hinic_clear_qp_resource qp_res;
+       u16 out_size = sizeof(qp_res);
+       int err;
+
+       memset(&qp_res, 0, sizeof(qp_res));
+       qp_res.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       qp_res.func_id = hinic_global_func_id(hwdev);
+
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_CLEAR_QP_RES,
+                                    &qp_res, sizeof(qp_res), &qp_res,
+                                    &out_size);
+       if (err || !out_size || qp_res.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR, "Failed to clear sq resources, err: %d, 
status: 0x%x, out size: 0x%x",
+                       err, qp_res.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver)
+{
+       struct hinic_hwdev *dev = hwdev;
+       struct hinic_version_info ver_info;
+       u16 out_size = sizeof(ver_info);
+       int err;
+
+       if (!hwdev || !fw_ver)
+               return -EINVAL;
+
+       memset(&ver_info, 0, sizeof(ver_info));
+       ver_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_MGMT_VERSION,
+                                    &ver_info, sizeof(ver_info), &ver_info,
+                                    &out_size);
+       if (err || !out_size || ver_info.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+               "Failed to get mgmt version, err: %d, status: 0x%x, out size: 
0x%x",
+               err, ver_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       memcpy(fw_ver->mgmt_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+       memset(&ver_info, 0, sizeof(ver_info));
+       ver_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       out_size = sizeof(ver_info);
+       err = l2nic_msg_to_mgmt_sync(hwdev, HINIC_PORT_CMD_GET_BOOT_VERSION,
+                                    &ver_info, sizeof(ver_info), &ver_info,
+                                    &out_size);
+       if (err || !out_size || ver_info.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get boot version,err: %d, status: 0x%x, out 
size: 0x%x",
+                       err, ver_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       memcpy(fw_ver->boot_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+       memset(&ver_info, 0, sizeof(ver_info));
+       ver_info.mgmt_msg_head.resp_aeq_num = HINIC_AEQ1;
+       out_size = sizeof(ver_info);
+       err = l2nic_msg_to_mgmt_sync(hwdev,
+                                    HINIC_PORT_CMD_GET_MICROCODE_VERSION,
+                                    &ver_info, sizeof(ver_info), &ver_info,
+                                    &out_size);
+       if (err || !out_size || ver_info.mgmt_msg_head.status) {
+               PMD_DRV_LOG(ERR,
+                       "Failed to get microcode version, err: %d, status: 
0x%x, out size: 0x%x",
+                       err, ver_info.mgmt_msg_head.status, out_size);
+               return -EINVAL;
+       }
+
+       memcpy(fw_ver->microcode_ver, ver_info.ver, HINIC_FW_VERSION_NAME);
+
+       return 0;
+}
+
diff --git a/drivers/net/hinic/base/hinic_pmd_niccfg.h 
b/drivers/net/hinic/base/hinic_pmd_niccfg.h
new file mode 100644
index 0000000..0cc143e
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_niccfg.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_NICCFG_H_
+#define _HINIC_PMD_NICCFG_H_
+
+#define OS_VF_ID_TO_HW(os_vf_id) ((os_vf_id) + 1)
+#define HW_VF_ID_TO_OS(hw_vf_id) ((hw_vf_id) - 1)
+
+#define HINIC_VLAN_PRIORITY_SHIFT      13
+
+#define HINIC_RSS_INDIR_SIZE           256
+#define HINIC_DCB_TC_MAX               0x8
+#define HINIC_DCB_UP_MAX               0x8
+#define HINIC_DCB_PG_MAX               0x8
+#define HINIC_RSS_KEY_SIZE             40
+
+#define HINIC_MAX_NUM_RQ               64
+
+enum hinic_rss_hash_type {
+       HINIC_RSS_HASH_ENGINE_TYPE_XOR = 0,
+       HINIC_RSS_HASH_ENGINE_TYPE_TOEP,
+
+       HINIC_RSS_HASH_ENGINE_TYPE_MAX,
+};
+
+struct nic_port_info {
+       u8      port_type;
+       u8      autoneg_cap;
+       u8      autoneg_state;
+       u8      duplex;
+       u8      speed;
+};
+
+enum nic_speed_level {
+       LINK_SPEED_10MB = 0,
+       LINK_SPEED_100MB,
+       LINK_SPEED_1GB,
+       LINK_SPEED_10GB,
+       LINK_SPEED_25GB,
+       LINK_SPEED_40GB,
+       LINK_SPEED_100GB,
+       LINK_SPEED_MAX
+};
+
+enum hinic_link_status {
+       HINIC_LINK_DOWN = 0,
+       HINIC_LINK_UP
+};
+
+struct nic_pause_config {
+       u32 auto_neg;
+       u32 rx_pause;
+       u32 tx_pause;
+};
+
+struct nic_rss_type {
+       u8 tcp_ipv6_ext;
+       u8 ipv6_ext;
+       u8 tcp_ipv6;
+       u8 ipv6;
+       u8 tcp_ipv4;
+       u8 ipv4;
+       u8 udp_ipv6;
+       u8 udp_ipv4;
+};
+
+enum hinic_rx_mod {
+       HINIC_RX_MODE_UC = 1 << 0,
+       HINIC_RX_MODE_MC = 1 << 1,
+       HINIC_RX_MODE_BC = 1 << 2,
+       HINIC_RX_MODE_MC_ALL = 1 << 3,
+       HINIC_RX_MODE_PROMISC = 1 << 4,
+};
+
+enum hinic_link_mode {
+       HINIC_10GE_BASE_KR = 0,
+       HINIC_40GE_BASE_KR4 = 1,
+       HINIC_40GE_BASE_CR4 = 2,
+       HINIC_100GE_BASE_KR4 = 3,
+       HINIC_100GE_BASE_CR4 = 4,
+       HINIC_25GE_BASE_KR_S = 5,
+       HINIC_25GE_BASE_CR_S = 6,
+       HINIC_25GE_BASE_KR = 7,
+       HINIC_25GE_BASE_CR = 8,
+       HINIC_GE_BASE_KX = 9,
+       HINIC_LINK_MODE_NUMBERS,
+
+       HINIC_SUPPORTED_UNKNOWN = 0xFFFF,
+};
+
+#define HINIC_DEFAULT_RX_MODE  (HINIC_RX_MODE_UC | HINIC_RX_MODE_MC |  \
+                               HINIC_RX_MODE_BC)
+
+#define HINIC_MAX_MTU_SIZE             (9600)
+#define HINIC_MIN_MTU_SIZE             (256)
+
+/* MIN_MTU + ETH_HLEN + CRC (256+14+4) */
+#define HINIC_MIN_FRAME_SIZE           274
+
+/* MAX_MTU + ETH_HLEN + CRC + VLAN(9600+14+4+4) */
+#define HINIC_MAX_JUMBO_FRAME_SIZE     (9622)
+
+#define HINIC_PORT_DISABLE             0x0
+#define HINIC_PORT_ENABLE              0x3
+
+struct hinic_vport_stats {
+       u64 tx_unicast_pkts_vport;
+       u64 tx_unicast_bytes_vport;
+       u64 tx_multicast_pkts_vport;
+       u64 tx_multicast_bytes_vport;
+       u64 tx_broadcast_pkts_vport;
+       u64 tx_broadcast_bytes_vport;
+
+       u64 rx_unicast_pkts_vport;
+       u64 rx_unicast_bytes_vport;
+       u64 rx_multicast_pkts_vport;
+       u64 rx_multicast_bytes_vport;
+       u64 rx_broadcast_pkts_vport;
+       u64 rx_broadcast_bytes_vport;
+
+       u64 tx_discard_vport;
+       u64 rx_discard_vport;
+       u64 tx_err_vport;
+       u64 rx_err_vport; /* rx checksum err pkts in ucode */
+};
+
+struct hinic_phy_port_stats {
+       u64 mac_rx_total_pkt_num;
+       u64 mac_rx_total_oct_num;
+       u64 mac_rx_bad_pkt_num;
+       u64 mac_rx_bad_oct_num;
+       u64 mac_rx_good_pkt_num;
+       u64 mac_rx_good_oct_num;
+       u64 mac_rx_uni_pkt_num;
+       u64 mac_rx_multi_pkt_num;
+       u64 mac_rx_broad_pkt_num;
+
+       u64 mac_tx_total_pkt_num;
+       u64 mac_tx_total_oct_num;
+       u64 mac_tx_bad_pkt_num;
+       u64 mac_tx_bad_oct_num;
+       u64 mac_tx_good_pkt_num;
+       u64 mac_tx_good_oct_num;
+       u64 mac_tx_uni_pkt_num;
+       u64 mac_tx_multi_pkt_num;
+       u64 mac_tx_broad_pkt_num;
+
+       u64 mac_rx_fragment_pkt_num;
+       u64 mac_rx_undersize_pkt_num;
+       u64 mac_rx_undermin_pkt_num;
+       u64 mac_rx_64_oct_pkt_num;
+       u64 mac_rx_65_127_oct_pkt_num;
+       u64 mac_rx_128_255_oct_pkt_num;
+       u64 mac_rx_256_511_oct_pkt_num;
+       u64 mac_rx_512_1023_oct_pkt_num;
+       u64 mac_rx_1024_1518_oct_pkt_num;
+       u64 mac_rx_1519_2047_oct_pkt_num;
+       u64 mac_rx_2048_4095_oct_pkt_num;
+       u64 mac_rx_4096_8191_oct_pkt_num;
+       u64 mac_rx_8192_9216_oct_pkt_num;
+       u64 mac_rx_9217_12287_oct_pkt_num;
+       u64 mac_rx_12288_16383_oct_pkt_num;
+       u64 mac_rx_1519_max_bad_pkt_num;
+       u64 mac_rx_1519_max_good_pkt_num;
+       u64 mac_rx_oversize_pkt_num;
+       u64 mac_rx_jabber_pkt_num;
+
+       u64 mac_rx_mac_pause_num;
+       u64 mac_rx_pfc_pkt_num;
+       u64 mac_rx_pfc_pri0_pkt_num;
+       u64 mac_rx_pfc_pri1_pkt_num;
+       u64 mac_rx_pfc_pri2_pkt_num;
+       u64 mac_rx_pfc_pri3_pkt_num;
+       u64 mac_rx_pfc_pri4_pkt_num;
+       u64 mac_rx_pfc_pri5_pkt_num;
+       u64 mac_rx_pfc_pri6_pkt_num;
+       u64 mac_rx_pfc_pri7_pkt_num;
+       u64 mac_rx_mac_control_pkt_num;
+       u64 mac_rx_y1731_pkt_num;
+       u64 mac_rx_sym_err_pkt_num;
+       u64 mac_rx_fcs_err_pkt_num;
+       u64 mac_rx_send_app_good_pkt_num;
+       u64 mac_rx_send_app_bad_pkt_num;
+
+       u64 mac_tx_fragment_pkt_num;
+       u64 mac_tx_undersize_pkt_num;
+       u64 mac_tx_undermin_pkt_num;
+       u64 mac_tx_64_oct_pkt_num;
+       u64 mac_tx_65_127_oct_pkt_num;
+       u64 mac_tx_128_255_oct_pkt_num;
+       u64 mac_tx_256_511_oct_pkt_num;
+       u64 mac_tx_512_1023_oct_pkt_num;
+       u64 mac_tx_1024_1518_oct_pkt_num;
+       u64 mac_tx_1519_2047_oct_pkt_num;
+       u64 mac_tx_2048_4095_oct_pkt_num;
+       u64 mac_tx_4096_8191_oct_pkt_num;
+       u64 mac_tx_8192_9216_oct_pkt_num;
+       u64 mac_tx_9217_12287_oct_pkt_num;
+       u64 mac_tx_12288_16383_oct_pkt_num;
+       u64 mac_tx_1519_max_bad_pkt_num;
+       u64 mac_tx_1519_max_good_pkt_num;
+       u64 mac_tx_oversize_pkt_num;
+       u64 mac_trans_jabber_pkt_num;
+
+       u64 mac_tx_mac_pause_num;
+       u64 mac_tx_pfc_pkt_num;
+       u64 mac_tx_pfc_pri0_pkt_num;
+       u64 mac_tx_pfc_pri1_pkt_num;
+       u64 mac_tx_pfc_pri2_pkt_num;
+       u64 mac_tx_pfc_pri3_pkt_num;
+       u64 mac_tx_pfc_pri4_pkt_num;
+       u64 mac_tx_pfc_pri5_pkt_num;
+       u64 mac_tx_pfc_pri6_pkt_num;
+       u64 mac_tx_pfc_pri7_pkt_num;
+       u64 mac_tx_mac_control_pkt_num;
+       u64 mac_tx_y1731_pkt_num;
+       u64 mac_tx_1588_pkt_num;
+       u64 mac_tx_err_all_pkt_num;
+       u64 mac_tx_from_app_good_pkt_num;
+       u64 mac_tx_from_app_bad_pkt_num;
+
+       u64 rx_higig2_ext_pkts_port;
+       u64 rx_higig2_message_pkts_port;
+       u64 rx_higig2_error_pkts_port;
+       u64 rx_higig2_cpu_ctrl_pkts_port;
+       u64 rx_higig2_unicast_pkts_port;
+       u64 rx_higig2_broadcast_pkts_port;
+       u64 rx_higig2_l2_multicast_pkts;
+       u64 rx_higig2_l3_multicast_pkts;
+
+       u64 tx_higig2_message_pkts_port;
+       u64 tx_higig2_ext_pkts_port;
+       u64 tx_higig2_cpu_ctrl_pkts_port;
+       u64 tx_higig2_unicast_pkts_port;
+       u64 tx_higig2_broadcast_pkts_port;
+       u64 tx_higig2_l2_multicast_pkts;
+       u64 tx_higig2_l3_multicast_pkts;
+};
+
+enum hinic_link_follow_status {
+       HINIC_LINK_FOLLOW_DEFAULT,
+       HINIC_LINK_FOLLOW_PORT,
+       HINIC_LINK_FOLLOW_SEPARATE,
+       HINIC_LINK_FOLLOW_STATUS_MAX,
+};
+
+#define HINIC_FW_VERSION_NAME  16
+struct hinic_fw_version {
+       u8      mgmt_ver[HINIC_FW_VERSION_NAME];
+       u8      microcode_ver[HINIC_FW_VERSION_NAME];
+       u8      boot_ver[HINIC_FW_VERSION_NAME];
+};
+
+int hinic_set_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_del_mac(void *hwdev, u8 *mac_addr, u16 vlan_id, u16 func_id);
+
+int hinic_update_mac(void *hwdev, u8 *old_mac, u8 *new_mac, u16 vlan_id,
+                    u16 func_id);
+
+int hinic_get_default_mac(void *hwdev, u8 *mac_addr);
+
+int hinic_set_port_mtu(void *hwdev, u32 new_mtu);
+
+int hinic_set_vport_enable(void *hwdev, bool enable);
+
+int hinic_set_port_enable(void *hwdev, bool enable);
+
+int hinic_get_link_status(void *hwdev, u8 *link_state);
+
+int hinic_get_port_info(void *hwdev, struct nic_port_info *port_info);
+
+int hinic_config_rx_mode(void *nic_dev, u32 rx_mode_ctrl);
+
+int hinic_set_rx_vhd_mode(void *hwdev, u16 vhd_mode, u16 rx_buf_sz);
+
+int hinic_set_pause_config(void *hwdev, struct nic_pause_config nic_pause);
+
+int hinic_reset_port_link_cfg(void *hwdev);
+
+int hinic_dcb_set_ets(void *hwdev, u8 *up_tc, u8 *pg_bw, u8 *pgid, u8 *up_bw,
+                     u8 *prio);
+
+int hinic_set_anti_attack(void *hwdev, bool enable);
+
+/* offload feature */
+int hinic_set_rx_lro(void *hwdev, u8 ipv4_en, u8 ipv6_en, u8 max_wqe_num);
+
+int hinic_get_vport_stats(void *hwdev, struct hinic_vport_stats *stats);
+
+int hinic_get_phy_port_stats(void *hwdev, struct hinic_phy_port_stats *stats);
+
+/* rss */
+int hinic_set_rss_type(void *hwdev, u32 tmpl_idx,
+                      struct nic_rss_type rss_type);
+
+int hinic_get_rss_type(void *hwdev, u32 tmpl_idx,
+                      struct nic_rss_type *rss_type);
+
+int hinic_rss_set_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);
+
+int hinic_rss_get_template_tbl(void *hwdev, u32 tmpl_idx, u8 *temp);
+
+int hinic_rss_set_hash_engine(void *hwdev, u8 tmpl_idx, u8 type);
+
+int hinic_rss_get_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);
+
+int hinic_rss_set_indir_tbl(void *hwdev, u32 tmpl_idx, u32 *indir_table);
+
+int hinic_rss_cfg(void *hwdev, u8 rss_en, u8 tmpl_idx, u8 tc_num, u8 *prio_tc);
+
+int hinic_rss_template_alloc(void *hwdev, u8 *tmpl_idx);
+
+int hinic_rss_template_free(void *hwdev, u8 tmpl_idx);
+
+int hinic_set_rx_mode(void *hwdev, u32 enable);
+
+int hinic_set_rx_csum_offload(void *hwdev, u32 en);
+
+int hinic_set_tx_tso(void *hwdev, u8 tso_en);
+
+int hinic_set_link_status_follow(void *hwdev,
+                                enum hinic_link_follow_status status);
+
+int hinic_get_link_mode(void *hwdev, u32 *supported, u32 *advertised);
+
+int hinic_flush_qp_res(void *hwdev);
+
+int hinic_get_fw_version(void *hwdev, struct hinic_fw_version *fw_ver);
+
+#endif /* _HINIC_PMD_NICCFG_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.c 
b/drivers/net/hinic/base/hinic_pmd_nicio.c
new file mode 100644
index 0000000..7989fc7
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_nicio.c
@@ -0,0 +1,920 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+#include "../hinic_pmd_rx.h"
+
+#define WQ_PREFETCH_MAX                        6
+#define WQ_PREFETCH_MIN                        1
+#define WQ_PREFETCH_THRESHOLD          256
+
+struct hinic_qp_ctxt_header {
+       u16     num_queues;
+       u16     queue_type;
+       u32     addr_offset;
+};
+
+struct hinic_sq_ctxt {
+       u32     ceq_attr;
+
+       u32     ci_owner;
+
+       u32     wq_pfn_hi;
+       u32     wq_pfn_lo;
+
+       u32     pref_cache;
+       u32     pref_owner;
+       u32     pref_wq_pfn_hi_ci;
+       u32     pref_wq_pfn_lo;
+
+       u32     rsvd8;
+       u32     rsvd9;
+
+       u32     wq_block_pfn_hi;
+       u32     wq_block_pfn_lo;
+};
+
+struct hinic_rq_ctxt {
+       u32     ceq_attr;
+
+       u32     pi_intr_attr;
+
+       u32     wq_pfn_hi_ci;
+       u32     wq_pfn_lo;
+
+       u32     pref_cache;
+       u32     pref_owner;
+
+       u32     pref_wq_pfn_hi_ci;
+       u32     pref_wq_pfn_lo;
+
+       u32     pi_paddr_hi;
+       u32     pi_paddr_lo;
+
+       u32     wq_block_pfn_hi;
+       u32     wq_block_pfn_lo;
+};
+
+struct hinic_sq_ctxt_block {
+       struct hinic_qp_ctxt_header     cmdq_hdr;
+       struct hinic_sq_ctxt            sq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_rq_ctxt_block {
+       struct hinic_qp_ctxt_header     cmdq_hdr;
+       struct hinic_rq_ctxt            rq_ctxt[HINIC_Q_CTXT_MAX];
+};
+
+struct hinic_clean_queue_ctxt {
+       struct hinic_qp_ctxt_header     cmdq_hdr;
+       u32                             ctxt_size;
+};
+
+static void init_sq(struct hinic_sq *sq, struct hinic_wq *wq, u16 q_id,
+                  volatile void *cons_idx_addr, void __iomem *db_addr)
+{
+       sq->wq = wq;
+       sq->q_id = q_id;
+       sq->owner = 1;
+
+       sq->cons_idx_addr = (volatile u16 *)cons_idx_addr;
+       sq->db_addr = db_addr;
+}
+
+static int init_rq(struct hinic_rq *rq, void *dev_hdl, struct hinic_wq *wq,
+                  u16 q_id, __rte_unused u16 rq_msix_idx)
+{
+       rq->wq = wq;
+       rq->q_id = q_id;
+
+       rq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(dev_hdl,
+                                                              PAGE_SIZE,
+                                                              &rq->pi_dma_addr,
+                                                              GFP_KERNEL);
+       if (!rq->pi_virt_addr) {
+               PMD_DRV_LOG(ERR, "Failed to allocate pi virt addr");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void clean_rq(struct hinic_rq *rq, void *dev_hdl)
+{
+       dma_free_coherent_volatile(dev_hdl, PAGE_SIZE,
+                                  (volatile void *)rq->pi_virt_addr,
+                                  rq->pi_dma_addr);
+}
+
+static void hinic_qp_prepare_cmdq_header(
+                               struct hinic_qp_ctxt_header *qp_ctxt_hdr,
+                               enum hinic_qp_ctxt_type ctxt_type,
+                               u16 num_queues, u16 max_queues, u16 q_id)
+{
+       qp_ctxt_hdr->queue_type = ctxt_type;
+       qp_ctxt_hdr->num_queues = num_queues;
+
+       if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
+               qp_ctxt_hdr->addr_offset =
+                               SQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+       else
+               qp_ctxt_hdr->addr_offset =
+                               RQ_CTXT_OFFSET(max_queues, max_queues, q_id);
+
+       qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
+
+       hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
+}
+
+static void hinic_sq_prepare_ctxt(struct hinic_sq *sq, u16 global_qpn,
+                          struct hinic_sq_ctxt *sq_ctxt)
+{
+       struct hinic_wq *wq = sq->wq;
+       u64 wq_page_addr;
+       u64 wq_page_pfn, wq_block_pfn;
+       u32 wq_page_pfn_hi, wq_page_pfn_lo;
+       u32 wq_block_pfn_hi, wq_block_pfn_lo;
+       u16 pi_start, ci_start;
+
+       ci_start = (u16)(wq->cons_idx);
+       pi_start = (u16)(wq->prod_idx);
+
+       /* read the first page from the HW table */
+       wq_page_addr = wq->queue_buf_paddr;
+
+       wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+       wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+       wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+       wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+       wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+       wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+       /* must config as ceq disabled */
+       sq_ctxt->ceq_attr = SQ_CTXT_CEQ_ATTR_SET(global_qpn, GLOBAL_SQ_ID) |
+                               SQ_CTXT_CEQ_ATTR_SET(0, ARM) |
+                               SQ_CTXT_CEQ_ATTR_SET(0, CEQ_ID) |
+                               SQ_CTXT_CEQ_ATTR_SET(0, EN);
+
+       sq_ctxt->ci_owner = SQ_CTXT_CI_SET(ci_start, IDX) |
+                               SQ_CTXT_CI_SET(1, OWNER);
+
+       sq_ctxt->wq_pfn_hi =
+                       SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+                       SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
+
+       sq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+       sq_ctxt->pref_cache =
+               SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+               SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+               SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+       sq_ctxt->pref_owner = 1;
+
+       sq_ctxt->pref_wq_pfn_hi_ci =
+               SQ_CTXT_PREF_SET(ci_start, CI) |
+               SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI);
+
+       sq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+       sq_ctxt->wq_block_pfn_hi =
+               SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+       sq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+       hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
+}
+
+static void hinic_rq_prepare_ctxt(struct hinic_rq *rq,
+                       struct hinic_rq_ctxt *rq_ctxt)
+{
+       struct hinic_wq *wq = rq->wq;
+       u64 wq_page_addr;
+       u64 wq_page_pfn, wq_block_pfn;
+       u32 wq_page_pfn_hi, wq_page_pfn_lo;
+       u32 wq_block_pfn_hi, wq_block_pfn_lo;
+       u16 pi_start, ci_start;
+
+       ci_start = (u16)(wq->cons_idx);
+       pi_start = (u16)(wq->prod_idx);
+
+       /* read the first page from the HW table */
+       wq_page_addr = wq->queue_buf_paddr;
+
+       wq_page_pfn = WQ_PAGE_PFN(wq_page_addr);
+       wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
+       wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
+
+       wq_block_pfn = WQ_BLOCK_PFN(wq_page_addr);
+       wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
+       wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
+
+       /* must config as ceq enable but do not generate ceq */
+       rq_ctxt->ceq_attr = RQ_CTXT_CEQ_ATTR_SET(1, EN) |
+                           RQ_CTXT_CEQ_ATTR_SET(1, OWNER);
+
+       rq_ctxt->pi_intr_attr = RQ_CTXT_PI_SET(pi_start, IDX) |
+                               RQ_CTXT_PI_SET(rq->msix_entry_idx, INTR) |
+                               RQ_CTXT_PI_SET(0, CEQ_ARM);
+
+       rq_ctxt->wq_pfn_hi_ci = RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
+                               RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
+
+       rq_ctxt->wq_pfn_lo = wq_page_pfn_lo;
+
+       rq_ctxt->pref_cache =
+               RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
+               RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
+               RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
+
+       rq_ctxt->pref_owner = 1;
+
+       rq_ctxt->pref_wq_pfn_hi_ci =
+               RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_PFN_HI) |
+               RQ_CTXT_PREF_SET(ci_start, CI);
+
+       rq_ctxt->pref_wq_pfn_lo = wq_page_pfn_lo;
+
+       rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
+       rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
+
+       rq_ctxt->wq_block_pfn_hi =
+               RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, PFN_HI);
+
+       rq_ctxt->wq_block_pfn_lo = wq_block_pfn_lo;
+
+       hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
+}
+
+static int init_sq_ctxts(struct hinic_nic_io *nic_io)
+{
+       struct hinic_hwdev *hwdev = nic_io->hwdev;
+       struct hinic_sq_ctxt_block *sq_ctxt_block;
+       struct hinic_sq_ctxt *sq_ctxt;
+       struct hinic_cmd_buf *cmd_buf;
+       struct hinic_qp *qp;
+       u64 out_param = EIO;
+       u16 q_id, curr_id, global_qpn, max_ctxts, i;
+       int err = 0;
+
+       cmd_buf = hinic_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+               return -ENOMEM;
+       }
+
+       q_id = 0;
+       /* sq and rq number may not equal */
+       while (q_id < nic_io->num_sqs) {
+               sq_ctxt_block = (struct hinic_sq_ctxt_block *)cmd_buf->buf;
+               sq_ctxt = sq_ctxt_block->sq_ctxt;
+
+               max_ctxts = (nic_io->num_sqs - q_id) > HINIC_Q_CTXT_MAX ?
+                               HINIC_Q_CTXT_MAX : (nic_io->num_sqs - q_id);
+
+               hinic_qp_prepare_cmdq_header(&sq_ctxt_block->cmdq_hdr,
+                                            HINIC_QP_CTXT_TYPE_SQ, max_ctxts,
+                                            nic_io->max_qps, q_id);
+
+               for (i = 0; i < max_ctxts; i++) {
+                       curr_id = q_id + i;
+                       qp = &nic_io->qps[curr_id];
+                       global_qpn = nic_io->global_qpn + curr_id;
+
+                       hinic_sq_prepare_ctxt(&qp->sq, global_qpn, &sq_ctxt[i]);
+               }
+
+               cmd_buf->size = SQ_CTXT_SIZE(max_ctxts);
+
+               err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+                                            HINIC_MOD_L2NIC,
+                                            HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
+                                            cmd_buf, &out_param, 0);
+               if ((err) || out_param != 0) {
+                       PMD_DRV_LOG(ERR, "Failed to set SQ ctxts, err:%d, 
out_param:0x%lx",
+                               err, out_param);
+                       err = -EFAULT;
+                       break;
+               }
+
+               q_id += max_ctxts;
+       }
+
+       hinic_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+static int init_rq_ctxts(struct hinic_nic_io *nic_io)
+{
+       struct hinic_hwdev *hwdev = nic_io->hwdev;
+       struct hinic_rq_ctxt_block *rq_ctxt_block;
+       struct hinic_rq_ctxt *rq_ctxt;
+       struct hinic_cmd_buf *cmd_buf;
+       struct hinic_qp *qp;
+       u64 out_param = 0;
+       u16 q_id, curr_id, max_ctxts, i;
+       int err = 0;
+
+       cmd_buf = hinic_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+               return -ENOMEM;
+       }
+
+       q_id = 0;
+       /* sq and rq number may not equal */
+       while (q_id < nic_io->num_rqs) {
+               rq_ctxt_block = (struct hinic_rq_ctxt_block *)cmd_buf->buf;
+               rq_ctxt = rq_ctxt_block->rq_ctxt;
+
+               max_ctxts = (nic_io->num_rqs - q_id) > HINIC_Q_CTXT_MAX ?
+                               HINIC_Q_CTXT_MAX : (nic_io->num_rqs - q_id);
+
+               hinic_qp_prepare_cmdq_header(&rq_ctxt_block->cmdq_hdr,
+                                            HINIC_QP_CTXT_TYPE_RQ, max_ctxts,
+                                            nic_io->max_qps, q_id);
+
+               for (i = 0; i < max_ctxts; i++) {
+                       curr_id = q_id + i;
+                       qp = &nic_io->qps[curr_id];
+
+                       hinic_rq_prepare_ctxt(&qp->rq, &rq_ctxt[i]);
+               }
+
+               cmd_buf->size = RQ_CTXT_SIZE(max_ctxts);
+
+               err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+                                            HINIC_MOD_L2NIC,
+                                            HINIC_UCODE_CMD_MDY_QUEUE_CONTEXT,
+                                            cmd_buf, &out_param, 0);
+
+               if ((err) || out_param != 0) {
+                       PMD_DRV_LOG(ERR, "Failed to set RQ ctxts");
+                       err = -EFAULT;
+                       break;
+               }
+
+               q_id += max_ctxts;
+       }
+
+       hinic_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+static int init_qp_ctxts(struct hinic_nic_io *nic_io)
+{
+       return (init_sq_ctxts(nic_io) || init_rq_ctxts(nic_io));
+}
+
+static int clean_queue_offload_ctxt(struct hinic_nic_io *nic_io,
+                                   enum hinic_qp_ctxt_type ctxt_type)
+{
+       struct hinic_hwdev *hwdev = nic_io->hwdev;
+       struct hinic_clean_queue_ctxt *ctxt_block;
+       struct hinic_cmd_buf *cmd_buf;
+       u64 out_param = 0;
+       int err;
+
+       cmd_buf = hinic_alloc_cmd_buf(hwdev);
+       if (!cmd_buf) {
+               PMD_DRV_LOG(ERR, "Failed to allocate cmd buf");
+               return -ENOMEM;
+       }
+
+       ctxt_block = (struct hinic_clean_queue_ctxt *)cmd_buf->buf;
+       ctxt_block->cmdq_hdr.num_queues = nic_io->max_qps;
+       ctxt_block->cmdq_hdr.queue_type = ctxt_type;
+       ctxt_block->cmdq_hdr.addr_offset = 0;
+
+       /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
+       ctxt_block->ctxt_size = 0x3;
+
+       hinic_cpu_to_be32(ctxt_block, sizeof(*ctxt_block));
+
+       cmd_buf->size = sizeof(*ctxt_block);
+
+       err = hinic_cmdq_direct_resp(hwdev, HINIC_ACK_TYPE_CMDQ,
+                                    HINIC_MOD_L2NIC,
+                                    HINIC_UCODE_CMD_CLEAN_QUEUE_CONTEXT,
+                                    cmd_buf, &out_param, 0);
+
+       if ((err) || (out_param)) {
+               PMD_DRV_LOG(ERR, "Failed to clean queue offload ctxts");
+               err = -EFAULT;
+       }
+
+       hinic_free_cmd_buf(hwdev, cmd_buf);
+
+       return err;
+}
+
+static int clean_qp_offload_ctxt(struct hinic_nic_io *nic_io)
+{
+       /* clean LRO/TSO context space */
+       return (clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_SQ) ||
+               clean_queue_offload_ctxt(nic_io, HINIC_QP_CTXT_TYPE_RQ));
+}
+
+static void hinic_get_func_rx_buf_size(hinic_nic_dev *nic_dev)
+{
+       struct hinic_rxq *rxq;
+       u16 q_id;
+       u16 buf_size = 0;
+
+       for (q_id = 0; q_id < nic_dev->num_rq; q_id++) {
+               rxq = nic_dev->rxqs[q_id];
+
+               if (rxq == NULL)
+                       continue;
+
+               if (q_id == 0)
+                       buf_size = rxq->buf_len;
+
+               buf_size = buf_size > rxq->buf_len ? rxq->buf_len : buf_size;
+       }
+
+       nic_dev->nic_io->rq_buf_size = buf_size;
+}
+
+/* init qps ctxt and set sq ci attr and arm all sq and set vat page_size */
+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_sq_attr sq_attr;
+       u16 q_id;
+       int err, rx_buf_sz;
+
+       /* set vat page size to max queue depth page_size */
+       err = hinic_set_pagesize(hwdev, HINIC_PAGE_SIZE_DPDK);
+       if (err != HINIC_OK) {
+               PMD_DRV_LOG(ERR, "Set vat page size: %d failed, rc: %d",
+                       HINIC_PAGE_SIZE_DPDK, err);
+               return err;
+       }
+
+       err = init_qp_ctxts(nic_io);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Init QP ctxts failed, rc: %d", err);
+               return err;
+       }
+
+       /* clean LRO/TSO context space */
+       err = clean_qp_offload_ctxt(nic_io);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Clean qp offload ctxts failed, rc: %d",
+                       err);
+               return err;
+       }
+
+       /* get func rx buf size */
+       hinic_get_func_rx_buf_size((hinic_nic_dev *)(hwdev->dev_hdl));
+       rx_buf_sz = nic_io->rq_buf_size;
+
+       /* update rx buf size to function table */
+       err = hinic_set_rx_vhd_mode(hwdev, 0, rx_buf_sz);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Set rx vhd mode failed, rc: %d",
+                       err);
+               return err;
+       }
+
+       err = hinic_set_root_ctxt(hwdev, nic_io->rq_depth,
+                                 nic_io->sq_depth, rx_buf_sz);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Set root context failed, rc: %d",
+                       err);
+               return err;
+       }
+
+       for (q_id = 0; q_id < nic_io->num_sqs; q_id++) {
+               sq_attr.ci_dma_base =
+                       HINIC_CI_PADDR(nic_io->ci_dma_base, q_id) >> 2;
+               /* performance: sq ci update threshold as 8 */
+               sq_attr.pending_limit = 1;
+               sq_attr.coalescing_time = 1;
+               sq_attr.intr_en = 0;
+               sq_attr.l2nic_sqn = q_id;
+               sq_attr.dma_attr_off = 0;
+               err = hinic_set_ci_table(hwdev, q_id, &sq_attr);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Set ci table failed, rc: %d",
+                               err);
+                       goto set_cons_idx_table_err;
+               }
+       }
+
+       return 0;
+
+set_cons_idx_table_err:
+       (void)hinic_clean_root_ctxt(hwdev);
+       return err;
+}
+
+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev)
+{
+       int err;
+
+       err = hinic_clean_root_ctxt(hwdev);
+       if (err)
+               PMD_DRV_LOG(ERR, "Failed to clean root ctxt");
+}
+
+static int hinic_init_nic_hwdev(struct hinic_hwdev *hwdev)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       u16 global_qpn, rx_buf_sz;
+       int err;
+
+       err = hinic_get_base_qpn(hwdev, &global_qpn);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to get base qpn");
+               goto err_init_nic_hwdev;
+       }
+
+       nic_io->global_qpn = global_qpn;
+       rx_buf_sz = HINIC_IS_VF(hwdev) ? RX_BUF_LEN_1_5K : RX_BUF_LEN_16K;
+       err = hinic_init_function_table(hwdev, rx_buf_sz);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to init function table");
+               goto err_init_nic_hwdev;
+       }
+
+       err = hinic_set_fast_recycle_mode(hwdev, RECYCLE_MODE_DPDK);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to set fast recycle mode");
+               goto err_init_nic_hwdev;
+       }
+
+       return 0;
+
+err_init_nic_hwdev:
+       return err;
+}
+
+static void hinic_free_nic_hwdev(struct hinic_hwdev *hwdev)
+{
+       hwdev->nic_io = NULL;
+}
+
+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev)
+{
+       return hinic_func_rx_tx_flush(hwdev);
+}
+
+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_wq *wq = &nic_io->sq_wq[q_id];
+
+       return (wq->delta) - 1;
+}
+
+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_wq *wq = &nic_io->rq_wq[q_id];
+
+       return (wq->delta) - 1;
+}
+
+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_wq *wq = &nic_io->sq_wq[q_id];
+
+       return (wq->cons_idx) & wq->mask;
+}
+
+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
+                        int num_wqebbs, u16 owner)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+       if (owner != sq->owner)
+               sq->owner = owner;
+
+       sq->wq->delta += num_wqebbs;
+       sq->wq->prod_idx -= num_wqebbs;
+}
+
+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev,
+                             u16 q_id, int wqebb_cnt)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_sq *sq = &nic_io->qps[q_id].sq;
+
+       hinic_put_wqe(sq->wq, wqebb_cnt);
+}
+
+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+       return hinic_get_wqe(rq->wq, 1, pi);
+}
+
+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+       rq->wq->delta += num_wqebbs;
+       rq->wq->prod_idx -= num_wqebbs;
+}
+
+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_wq *wq = &nic_io->rq_wq[q_id];
+
+       return (wq->cons_idx) & wq->mask;
+}
+
+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int wqe_cnt)
+{
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+       struct hinic_rq *rq = &nic_io->qps[q_id].rq;
+
+       hinic_put_wqe(rq->wq, wqe_cnt);
+}
+
+int hinic_create_rq(hinic_nic_dev *nic_dev, u16 q_id, u16 rq_depth)
+{
+       int err;
+       struct hinic_nic_io *nic_io;
+       struct hinic_qp *qp;
+       struct hinic_rq *rq;
+       struct hinic_hwdev *hwdev;
+
+       hwdev = nic_dev->hwdev;
+       nic_io = hwdev->nic_io;
+       qp = &nic_io->qps[q_id];
+       rq = &qp->rq;
+
+       /* in case of hardware still generate interrupt, do not use msix 0 */
+       rq->msix_entry_idx = 1;
+
+       rq->rq_depth = rq_depth;
+       nic_io->rq_depth = rq_depth;
+
+       err = hinic_wq_allocate(hwdev->dev_hdl, &nic_io->rq_wq[q_id],
+                               HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
+               goto rq_alloc_err;
+       }
+
+       err = init_rq(rq, hwdev->dev_hdl, &nic_io->rq_wq[q_id],
+                     q_id, 0);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to init RQ");
+               goto rq_init_err;
+       }
+
+       return HINIC_OK;
+
+rq_init_err:
+       hinic_wq_free(hwdev->dev_hdl, &nic_io->rq_wq[q_id]);
+
+rq_alloc_err:
+       return err;
+}
+
+void hinic_destroy_rq(hinic_nic_dev *nic_dev, u16 q_id)
+{
+       struct hinic_nic_io *nic_io;
+       struct hinic_qp *qp;
+       struct hinic_hwdev *hwdev;
+
+       hwdev = nic_dev->hwdev;
+       nic_io = hwdev->nic_io;
+       qp = &nic_io->qps[q_id];
+
+       if (qp->rq.wq == NULL)
+               return;
+
+       clean_rq(&qp->rq, nic_io->hwdev->dev_hdl);
+       hinic_wq_free(nic_io->hwdev->dev_hdl, qp->rq.wq);
+       qp->rq.wq = NULL;
+}
+
+int hinic_create_sq(hinic_nic_dev *nic_dev, u16 q_id, u16 sq_depth)
+{
+       int err;
+       struct hinic_nic_io *nic_io;
+       struct hinic_qp *qp;
+       struct hinic_sq *sq;
+       void __iomem *db_addr;
+       struct hinic_hwdev *hwdev;
+       volatile u32 *ci_addr;
+
+       hwdev = nic_dev->hwdev;
+       nic_io = hwdev->nic_io;
+       qp = &nic_io->qps[q_id];
+       sq = &qp->sq;
+
+       sq->sq_depth = sq_depth;
+       nic_io->sq_depth = sq_depth;
+
+       /* alloc wq */
+       err = hinic_wq_allocate(nic_io->hwdev->dev_hdl, &nic_io->sq_wq[q_id],
+                               HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ");
+               return err;
+       }
+
+       /* alloc sq doorbell space */
+       err = hinic_alloc_db_addr(nic_io->hwdev, &db_addr, NULL);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to init db addr");
+               goto alloc_db_err;
+       }
+
+       /* clear hardware ci */
+       ci_addr = (volatile u32 *)HINIC_CI_VADDR(nic_io->ci_vaddr_base, q_id);
+       *ci_addr = 0;
+
+       /* init sq qheader */
+       init_sq(sq, &nic_io->sq_wq[q_id], q_id,
+             (volatile void *)ci_addr, db_addr);
+
+       return HINIC_OK;
+
+alloc_db_err:
+       hinic_wq_free(nic_io->hwdev->dev_hdl, &nic_io->sq_wq[q_id]);
+
+       return err;
+}
+
+void hinic_destroy_sq(hinic_nic_dev *nic_dev, u16 q_id)
+{
+       struct hinic_nic_io *nic_io;
+       struct hinic_qp *qp;
+       struct hinic_hwdev *hwdev;
+
+       hwdev = nic_dev->hwdev;
+       nic_io = hwdev->nic_io;
+       qp = &nic_io->qps[q_id];
+
+       if (qp->sq.wq == NULL)
+               return;
+
+       hinic_free_db_addr(nic_io->hwdev, qp->sq.db_addr, NULL);
+       hinic_wq_free(nic_io->hwdev->dev_hdl, qp->sq.wq);
+       qp->sq.wq = NULL;
+}
+
+static int hinic_alloc_nicio(hinic_nic_dev *nic_dev)
+{
+       int err;
+       u16 max_qps, num_qp;
+       struct hinic_nic_io *nic_io;
+       struct hinic_hwdev *hwdev = nic_dev->hwdev;
+
+       if (!hwdev) {
+               PMD_DRV_LOG(ERR, "hwdev is NULL");
+               return -EFAULT;
+       }
+
+       nic_io = hwdev->nic_io;
+
+       max_qps = hinic_func_max_qnum(hwdev);
+       if ((max_qps & (max_qps - 1))) {
+               PMD_DRV_LOG(ERR, "wrong number of max_qps: %d",
+                       max_qps);
+               return -EINVAL;
+       }
+
+       nic_io->max_qps = max_qps;
+       nic_io->num_qps = max_qps;
+       num_qp = max_qps;
+
+       nic_io->qps = kzalloc_aligned(num_qp * sizeof(*nic_io->qps),
+                                     GFP_KERNEL);
+       if (!nic_io->qps) {
+               PMD_DRV_LOG(ERR, "Failed to allocate qps");
+               err = -ENOMEM;
+               goto alloc_qps_err;
+       }
+
+       nic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev->dev_hdl,
+                                                   CI_TABLE_SIZE(num_qp,
+                                                   PAGE_SIZE),
+                                                   &nic_io->ci_dma_base,
+                                                   GFP_KERNEL);
+       if (!nic_io->ci_vaddr_base) {
+               PMD_DRV_LOG(ERR, "Failed to allocate ci area");
+               err = -ENOMEM;
+               goto ci_base_err;
+       }
+
+       nic_io->sq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->sq_wq),
+                                       GFP_KERNEL);
+       if (!nic_io->sq_wq) {
+               PMD_DRV_LOG(ERR, "Failed to allocate sq wq array");
+               err = -ENOMEM;
+               goto sq_wq_err;
+       }
+
+       nic_io->rq_wq = kzalloc_aligned(num_qp * sizeof(*nic_io->rq_wq),
+                                       GFP_KERNEL);
+       if (!nic_io->rq_wq) {
+               PMD_DRV_LOG(ERR, "Failed to allocate rq wq array");
+               err = -ENOMEM;
+               goto rq_wq_err;
+       }
+
+       return HINIC_OK;
+
+rq_wq_err:
+       kfree(nic_io->sq_wq);
+
+sq_wq_err:
+       dma_free_coherent(hwdev->dev_hdl, CI_TABLE_SIZE(num_qp, PAGE_SIZE),
+                         nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+ci_base_err:
+       kfree(nic_io->qps);
+
+alloc_qps_err:
+       return err;
+}
+
+static void hinic_free_nicio(hinic_nic_dev *nic_dev)
+{
+       struct hinic_hwdev *hwdev = nic_dev->hwdev;
+       struct hinic_nic_io *nic_io = hwdev->nic_io;
+
+       /* nic_io->rq_wq */
+       kfree(nic_io->rq_wq);
+
+       /* nic_io->sq_wq */
+       kfree(nic_io->sq_wq);
+
+       /* nic_io->ci_vaddr_base */
+       dma_free_coherent(hwdev->dev_hdl,
+                         CI_TABLE_SIZE(nic_io->max_qps, PAGE_SIZE),
+                         nic_io->ci_vaddr_base, nic_io->ci_dma_base);
+
+       /* nic_io->qps */
+       kfree(nic_io->qps);
+}
+
+/* alloc nic hwdev and init function table */
+int hinic_init_nicio(hinic_nic_dev *nic_dev)
+{
+       int rc;
+
+       nic_dev->nic_io =
+               (struct hinic_nic_io *)rte_zmalloc("hinic_nicio",
+                                                  sizeof(*nic_dev->nic_io),
+                                                  RTE_CACHE_LINE_SIZE);
+       if (!nic_dev->nic_io) {
+               PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
+                           nic_dev->proc_dev_name);
+               return -ENOMEM;
+       }
+       nic_dev->nic_io->hwdev = nic_dev->hwdev;
+       nic_dev->hwdev->nic_io = nic_dev->nic_io;
+
+       /* alloc root working queue set */
+       rc = hinic_alloc_nicio(nic_dev);
+       if (rc) {
+               PMD_DRV_LOG(ERR, "Allocate nic_io failed, dev_name: %s",
+                           nic_dev->proc_dev_name);
+               goto allc_nicio_fail;
+       }
+
+       rc = hinic_init_nic_hwdev(nic_dev->nic_io->hwdev);
+       if (rc) {
+               PMD_DRV_LOG(ERR, "Initialize hwdev failed, dev_name: %s",
+                           nic_dev->proc_dev_name);
+               goto init_nic_hwdev_fail;
+       }
+
+       return 0;
+
+init_nic_hwdev_fail:
+       hinic_free_nicio(nic_dev);
+
+allc_nicio_fail:
+       rte_free(nic_dev->nic_io);
+       return rc;
+}
+
+void hinic_deinit_nicio(hinic_nic_dev *nic_dev)
+{
+       hinic_free_nicio(nic_dev);
+
+       hinic_free_nic_hwdev(nic_dev->nic_io->hwdev);
+
+       rte_free(nic_dev->nic_io);
+       nic_dev->nic_io = NULL;
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_nicio.h 
b/drivers/net/hinic/base/hinic_pmd_nicio.h
new file mode 100644
index 0000000..ae9c008
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_nicio.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_NICIO_H_
+#define _HINIC_PMD_NICIO_H_
+
+#define RX_BUF_LEN_16K 16384
+#define RX_BUF_LEN_4K  4096
+#define RX_BUF_LEN_1_5K        1536
+
+#define SQ_CTRL_SET(val, member)       (((val) & SQ_CTRL_##member##_MASK) \
+                                       << SQ_CTRL_##member##_SHIFT)
+
+struct hinic_sq_db {
+       u32     db_info;
+};
+
+struct hinic_sge {
+       u32             hi_addr;
+       u32             lo_addr;
+       u32             len;
+};
+
+struct hinic_event {
+       void (*tx_ack)(void *handle, u16 q_id);
+       /* status: 0 - link down; 1 - link up */
+       void (*link_change)(void *handle, int status);
+};
+
+/* init qps ctxt and set sq ci attr and arm all sq */
+int hinic_init_qp_ctxts(struct hinic_hwdev *hwdev);
+void hinic_free_qp_ctxts(struct hinic_hwdev *hwdev);
+int hinic_rx_tx_flush(struct hinic_hwdev *hwdev);
+
+int hinic_get_sq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);
+u16 hinic_get_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);
+void hinic_update_sq_local_ci(struct hinic_hwdev *hwdev, u16 q_id,
+                             int wqebb_cnt);
+void hinic_return_sq_wqe(struct hinic_hwdev *hwdev, u16 q_id,
+                        int num_wqebbs, u16 owner);
+
+int hinic_get_rq_free_wqebbs(struct hinic_hwdev *hwdev, u16 q_id);
+void *hinic_get_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, u16 *pi);
+void hinic_return_rq_wqe(struct hinic_hwdev *hwdev, u16 q_id, int num_wqebbs);
+u16 hinic_get_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id);
+void hinic_update_rq_local_ci(struct hinic_hwdev *hwdev, u16 q_id, int 
wqe_cnt);
+
+void hinic_cpu_to_be32(void *data, int len);
+void hinic_be32_to_cpu(void *data, int len);
+void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, u32 len);
+
+#endif /* _HINIC_PMD_NICIO_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_qp.c 
b/drivers/net/hinic/base/hinic_pmd_qp.c
new file mode 100644
index 0000000..ac1b9f2
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_qp.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+
+void hinic_prepare_rq_wqe(void *wqe, __rte_unused u16 pi, dma_addr_t buf_addr,
+                         dma_addr_t cqe_dma)
+{
+       struct hinic_rq_wqe *rq_wqe = (struct hinic_rq_wqe *)wqe;
+       struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
+       struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
+       struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
+       u32 rq_ceq_len = sizeof(struct hinic_rq_cqe);
+
+       ctrl->ctrl_fmt =
+               RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)),  LEN) |
+               RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), COMPLETE_LEN) |
+               RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), BUFDESC_SECT_LEN) |
+               RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
+
+       hinic_set_sge(&cqe_sect->sge, cqe_dma, rq_ceq_len);
+
+       buf_desc->addr_high = upper_32_bits(buf_addr);
+       buf_desc->addr_low = lower_32_bits(buf_addr);
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_qp.h 
b/drivers/net/hinic/base/hinic_pmd_qp.h
new file mode 100644
index 0000000..a63ae04
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_qp.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_QP_H_
+#define _HINIC_PMD_QP_H_
+
+#define HINIC_MAX_QUEUE_DEPTH          4096
+#define HINIC_MIN_QUEUE_DEPTH          128
+#define HINIC_TXD_ALIGN                 1
+#define HINIC_RXD_ALIGN                 1
+
+struct hinic_sq_ctrl {
+       u32     ctrl_fmt;
+       u32     queue_info;
+};
+
+struct hinic_sq_task {
+       u32             pkt_info0;
+       u32             pkt_info1;
+       u32             pkt_info2;
+       u32             ufo_v6_identify;
+       u32             pkt_info4;
+       u32             rsvd5;
+};
+
+struct hinic_sq_bufdesc {
+       struct hinic_sge sge;
+       u32     rsvd;
+};
+
+struct hinic_sq_wqe {
+       /* sq wqe control section */
+       struct hinic_sq_ctrl            ctrl;
+
+       /* sq task control section */
+       struct hinic_sq_task            task;
+
+       /* sq sge section start address, 1~127 sges */
+       struct hinic_sq_bufdesc     buf_descs[0];
+};
+
+struct hinic_rq_ctrl {
+       u32     ctrl_fmt;
+};
+
+struct hinic_rq_cqe {
+       u32 status;
+       u32 vlan_len;
+       u32 offload_type;
+       u32 rss_hash;
+
+       u32 rsvd[4];
+};
+
+struct hinic_rq_cqe_sect {
+       struct hinic_sge        sge;
+       u32                     rsvd;
+};
+
+struct hinic_rq_bufdesc {
+       u32     addr_high;
+       u32     addr_low;
+};
+
+struct hinic_rq_wqe {
+       struct hinic_rq_ctrl            ctrl;
+       u32                             rsvd;
+       struct hinic_rq_cqe_sect        cqe_sect;
+       struct hinic_rq_bufdesc         buf_desc;
+};
+
+void hinic_prepare_rq_wqe(void *wqe, u16 pi, dma_addr_t buf_addr,
+                         dma_addr_t cqe_dma);
+
+#endif /* _HINIC_PMD_NICIO_H_ */
diff --git a/drivers/net/hinic/base/hinic_pmd_wq.c 
b/drivers/net/hinic/base/hinic_pmd_wq.c
new file mode 100644
index 0000000..05813bf
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_wq.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include "hinic_pmd_dpdev.h"
+
+static void free_wq_pages(void *handle, struct hinic_wq *wq)
+{
+       dma_free_coherent(handle, wq->wq_buf_size, (void *)wq->queue_buf_vaddr,
+                       (dma_addr_t)wq->queue_buf_paddr);
+
+       wq->queue_buf_paddr = 0;
+       wq->queue_buf_vaddr = 0;
+}
+
+static int alloc_wq_pages(void *dev_hdl, struct hinic_wq *wq)
+{
+       dma_addr_t dma_addr = 0;
+
+       wq->queue_buf_vaddr = (u64)(u64 *)
+               dma_zalloc_coherent_aligned256k(dev_hdl, wq->wq_buf_size,
+                                               &dma_addr, GFP_KERNEL);
+       if (!wq->queue_buf_vaddr) {
+               PMD_DRV_LOG(ERR, "Failed to allocate wq page");
+               return -ENOMEM;
+       }
+
+       if (!ADDR_256K_ALIGNED(dma_addr)) {
+               PMD_DRV_LOG(ERR, "Wqe pages is not 256k aligned!");
+               dma_free_coherent(dev_hdl, wq->wq_buf_size,
+                                 (void *)wq->queue_buf_vaddr,
+                                 dma_addr);
+               return -ENOMEM;
+       }
+
+       wq->queue_buf_paddr = dma_addr;
+
+       return 0;
+}
+
+int hinic_wq_allocate(void *dev_hdl, struct hinic_wq *wq,
+                     u32 wqebb_shift, u16 q_depth)
+{
+       int err;
+
+       if (q_depth & (q_depth - 1)) {
+               PMD_DRV_LOG(ERR, "WQ q_depth isn't power of 2");
+               return -EINVAL;
+       }
+
+       wq->wqebb_size = 1 << wqebb_shift;
+       wq->wqebb_shift = wqebb_shift;
+       wq->wq_buf_size = ((u32)q_depth) << wqebb_shift;
+       wq->q_depth = q_depth;
+
+       if (wq->wq_buf_size > (PAGE_SIZE << HINIC_PAGE_SIZE_DPDK)) {
+               PMD_DRV_LOG(ERR, "Invalid q_depth %u which one page_size can 
not hold",
+                       q_depth);
+               return -EINVAL;
+       }
+
+       err = alloc_wq_pages(dev_hdl, wq);
+       if (err) {
+               PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
+               return err;
+       }
+
+       wq->cons_idx = 0;
+       wq->prod_idx = 0;
+       wq->delta = q_depth;
+       wq->mask = q_depth - 1;
+
+       return 0;
+}
+
+void hinic_wq_free(void *dev_hdl, struct hinic_wq *wq)
+{
+       free_wq_pages(dev_hdl, wq);
+}
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs)
+{
+       wq->cons_idx += num_wqebbs;
+       wq->delta += num_wqebbs;
+}
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx)
+{
+       u16 curr_cons_idx;
+
+       if ((wq->delta + num_wqebbs) > wq->q_depth)
+               return NULL;
+
+       curr_cons_idx = (u16)(wq->cons_idx);
+
+       curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
+
+       *cons_idx = curr_cons_idx;
+
+       return WQ_WQE_ADDR(wq, (u32)(*cons_idx));
+}
+
+int hinic_cmdq_alloc(struct hinic_wq *wq, void *dev_hdl,
+                    int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
+                    u16 q_depth)
+{
+       int i, j, err = -ENOMEM;
+
+       /* validate q_depth is power of 2 & wqebb_size is not 0 */
+       for (i = 0; i < cmdq_blocks; i++) {
+               wq[i].wqebb_size = 1 << wqebb_shift;
+               wq[i].wqebb_shift = wqebb_shift;
+               wq[i].wq_buf_size = wq_buf_size;
+               wq[i].q_depth = q_depth;
+
+               err = alloc_wq_pages(dev_hdl, &wq[i]);
+               if (err) {
+                       PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
+                       goto cmdq_block_err;
+               }
+
+               wq[i].cons_idx = 0;
+               wq[i].prod_idx = 0;
+               wq[i].delta = q_depth;
+
+               wq[i].mask = q_depth - 1;
+       }
+
+       return 0;
+
+cmdq_block_err:
+       for (j = 0; j < i; j++)
+               free_wq_pages(dev_hdl, &wq[j]);
+
+       return err;
+}
+
+void hinic_cmdq_free(void *dev_hdl, struct hinic_wq *wq, int cmdq_blocks)
+{
+       int i;
+
+       for (i = 0; i < cmdq_blocks; i++)
+               free_wq_pages(dev_hdl, &wq[i]);
+}
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq)
+{
+       wq->cons_idx = 0;
+       wq->prod_idx = 0;
+
+       memset((void *)wq->queue_buf_vaddr, 0, wq->wq_buf_size);
+}
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx)
+{
+       u16 curr_prod_idx;
+
+       wq->delta -= num_wqebbs;
+       curr_prod_idx = wq->prod_idx;
+       wq->prod_idx += num_wqebbs;
+       *prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
+
+       return WQ_WQE_ADDR(wq, (u32)(*prod_idx));
+}
diff --git a/drivers/net/hinic/base/hinic_pmd_wq.h 
b/drivers/net/hinic/base/hinic_pmd_wq.h
new file mode 100644
index 0000000..8cc7525
--- /dev/null
+++ b/drivers/net/hinic/base/hinic_pmd_wq.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef _HINIC_PMD_WQ_H_
+#define _HINIC_PMD_WQ_H_
+
+#define        WQ_WQE_ADDR(wq, idx) ((void *)((u64)((wq)->queue_buf_vaddr) + \
+                             ((idx) << (wq)->wqebb_shift)))
+
+/* Working Queue */
+struct hinic_wq {
+       /* The addresses are 64 bit in the HW */
+       u64     queue_buf_vaddr;
+
+       u16             q_depth;
+       u16             mask;
+       u32             delta;
+
+       u32             cons_idx;
+       u32             prod_idx;
+
+       u64     queue_buf_paddr;
+
+       u32             wqebb_size;
+       u32             wqebb_shift;
+
+       u32             wq_buf_size;
+
+       u32             rsvd[5];
+};
+
+void hinic_wq_wqe_pg_clear(struct hinic_wq *wq);
+
+int hinic_cmdq_alloc(struct hinic_wq *wq, void *dev_hdl,
+                    int cmdq_blocks, u32 wq_buf_size, u32 wqebb_shift,
+                    u16 q_depth);
+
+void hinic_cmdq_free(void *dev_hdl, struct hinic_wq *wq, int cmdq_blocks);
+
+int hinic_wq_allocate(void *dev_hdl, struct hinic_wq *wq,
+                     u32 wqebb_shift, u16 q_depth);
+
+void hinic_wq_free(void *dev_hdl, struct hinic_wq *wq);
+
+void *hinic_get_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *prod_idx);
+
+void hinic_put_wqe(struct hinic_wq *wq, int num_wqebbs);
+
+void *hinic_read_wqe(struct hinic_wq *wq, int num_wqebbs, u16 *cons_idx);
+
+#endif /* _HINIC_PMD_WQ_H_ */
-- 
1.8.3.1

Reply via email to