On 03/09, Haiyue Wang wrote:
>The DCF (Device Config Function) feature works as a standalone PMD, its
>hardware entity is the trusted VF with ID 0, and the software function
>is to control the flow setting of other VFs by the mailbox with PF.
>
>It doesn't handle packet Rx/Tx related things. This PMD needs the iAVF
>virtchnl to send the ICE PF's AdminQ command.
>
>Also for security, it needs to acquire the DCF capability from PF. The
>flow related things will be added later, it shares most of the ICE flow
>function. This mainly handles hardware initialization etc.
>
>                               .-------------.
>                             .-|             |
>                           .-| |             |
>                           | | |  iAVF PMD   |
>                           | | |             |
>                           | | |             |
>                           | | '-------------'
>                           | '-------------'
>                           '-------------'
>                          ^   Other VFs
>                         /
>                        / Flow Distribution
>  .------------------. /
>  |                  |/  DCF cap req   .-------------.
>  |      CVL         .  <----------    |             |
>  |     Kernel       |  ---------->    |             |
>  .                  .   DCF cap rsp   |   DCF PMD   |
>  |                  |                 |             |
>  '------------------'  <-----------   |             |
>                          rte_flow     '-------------'
>                                             VF0

It'd be clearer if this diagram keeps `DCF PMD` and `iAVF PMD` at the same
level and `CVL Kernel` at the bottom, what do you think?

>
>Signed-off-by: Haiyue Wang <haiyue.w...@intel.com>
>---
> doc/guides/nics/ice.rst          |  47 +++
> doc/guides/nics/img/ice_dcf.png  | Bin 0 -> 39168 bytes
> drivers/common/Makefile          |   1 +
> drivers/net/ice/Makefile         |   6 +
> drivers/net/ice/ice_dcf.c        | 651 +++++++++++++++++++++++++++++++
> drivers/net/ice/ice_dcf.h        |  61 +++
> drivers/net/ice/ice_dcf_ethdev.c | 319 +++++++++++++++
> drivers/net/ice/ice_dcf_ethdev.h |  33 ++
> drivers/net/ice/ice_dcf_parent.c | 348 +++++++++++++++++
> drivers/net/ice/meson.build      |   8 +-
> mk/rte.app.mk                    |   1 +
> 11 files changed, 1473 insertions(+), 2 deletions(-)
> create mode 100644 doc/guides/nics/img/ice_dcf.png
> create mode 100644 drivers/net/ice/ice_dcf.c
> create mode 100644 drivers/net/ice/ice_dcf.h
> create mode 100644 drivers/net/ice/ice_dcf_ethdev.c
> create mode 100644 drivers/net/ice/ice_dcf_ethdev.h
> create mode 100644 drivers/net/ice/ice_dcf_parent.c
>
>diff --git a/drivers/common/Makefile b/drivers/common/Makefile
>index 96bd7ac6e..df2e840cf 100644
>--- a/drivers/common/Makefile
>+++ b/drivers/common/Makefile
>@@ -31,6 +31,7 @@ DIRS-y += dpaax
> endif
> 
> IAVF-y := $(CONFIG_RTE_LIBRTE_IAVF_PMD)
>+IAVF-y += $(CONFIG_RTE_LIBRTE_ICE_PMD)
> ifneq (,$(findstring y,$(IAVF-y)))
> DIRS-y += iavf
> endif
>diff --git a/drivers/net/ice/Makefile b/drivers/net/ice/Makefile
>index e22c34287..643639a50 100644
>--- a/drivers/net/ice/Makefile
>+++ b/drivers/net/ice/Makefile
>@@ -11,9 +11,11 @@ LIB = librte_pmd_ice.a
> CFLAGS += -O3
> CFLAGS += $(WERROR_FLAGS)
> CFLAGS += -DALLOW_EXPERIMENTAL_API
>+CFLAGS += -I$(RTE_SDK)/drivers/common/iavf
> 
> LDLIBS += -lrte_eal -lrte_mbuf -lrte_ethdev -lrte_kvargs
> LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_hash
>+LDLIBS += -lrte_net -lrte_common_iavf
> 
> EXPORT_MAP := rte_pmd_ice_version.map
> 
>@@ -84,6 +86,10 @@ ifeq ($(CC_AVX2_SUPPORT), 1)
> endif
> SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_generic_flow.c
> 
>+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf.c
>+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf_parent.c
>+SRCS-$(CONFIG_RTE_LIBRTE_ICE_PMD) += ice_dcf_ethdev.c
>+
> # install this header file
> SYMLINK-$(CONFIG_RTE_LIBRTE_ICE_PMD)-include := rte_pmd_ice.h
> 
>diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
>new file mode 100644
>index 000000000..669122331
>--- /dev/null
>+++ b/drivers/net/ice/ice_dcf.c
>@@ -0,0 +1,651 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation

Should be 2020?

>+ */
>+
>+#include <sys/queue.h>
>+#include <stdio.h>
>+#include <errno.h>
>+#include <stdint.h>
>+#include <string.h>
>+#include <unistd.h>
>+#include <stdarg.h>
>+#include <inttypes.h>
>+#include <rte_byteorder.h>
>+#include <rte_common.h>
>+
>+#include <rte_pci.h>
>+#include <rte_atomic.h>
>+#include <rte_eal.h>
>+#include <rte_ether.h>
>+#include <rte_ethdev_driver.h>
>+#include <rte_ethdev_pci.h>
>+#include <rte_malloc.h>
>+#include <rte_memzone.h>
>+#include <rte_dev.h>
>+
>+#include "ice_dcf.h"
>+
>+#define ICE_DCF_AQ_LEN     32
>+#define ICE_DCF_AQ_BUF_SZ  4096
>+
>+#define ICE_DCF_ARQ_MAX_RETRIES 200
>+#define ICE_DCF_ARQ_CHECK_TIME  2   /* msecs */
>+
>+#define ICE_DCF_VF_RES_BUF_SZ \
>+      (sizeof(struct virtchnl_vf_resource) +  \
>+              IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource))
>+
>+static __rte_always_inline int
>+ice_dcf_send_cmd_req_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
>+                          uint8_t *req_msg, uint16_t req_msglen)
>+{
>+      return iavf_aq_send_msg_to_pf(&hw->avf, op, IAVF_SUCCESS,
>+                                    req_msg, req_msglen, NULL);
>+}
>+
>+static int
>+ice_dcf_recv_cmd_rsp_no_irq(struct ice_dcf_hw *hw, enum virtchnl_ops op,
>+                          uint8_t *rsp_msgbuf, uint16_t rsp_buflen,
>+                          uint16_t *rsp_msglen)
>+{
>+      struct iavf_arq_event_info event;
>+      enum virtchnl_ops v_op;
>+      int i = 0;
>+      int err;
>+
>+      event.buf_len = rsp_buflen;
>+      event.msg_buf = rsp_msgbuf;
>+
>+      do {
>+              err = iavf_clean_arq_element(&hw->avf, &event, NULL);
>+              if (err != IAVF_SUCCESS)
>+                      goto again;
>+
>+              v_op = rte_le_to_cpu_32(event.desc.cookie_high);
>+              if (v_op != op)
>+                      goto again;
>+
>+              if (rsp_msglen != NULL)
>+                      *rsp_msglen = event.msg_len;
>+              return rte_le_to_cpu_32(event.desc.cookie_low);
>+
>+again:
>+              rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
>+      } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
>+
>+      return -EIO;
>+}
>+
>+static __rte_always_inline void
>+ice_dcf_aq_cmd_clear(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
>+{
>+      rte_spinlock_lock(&hw->vc_cmd_queue_lock);
>+
>+      TAILQ_REMOVE(&hw->vc_cmd_queue, cmd, next);
>+
>+      rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
>+}
>+
>+static __rte_always_inline void
>+ice_dcf_vc_cmd_set(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
>+{
>+      cmd->v_ret = IAVF_ERR_NOT_READY;
>+      cmd->rsp_msglen = 0;
>+      cmd->pending = 1;
>+
>+      rte_spinlock_lock(&hw->vc_cmd_queue_lock);
>+
>+      TAILQ_INSERT_TAIL(&hw->vc_cmd_queue, cmd, next);
>+
>+      rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
>+}
>+
>+static __rte_always_inline int
>+ice_dcf_vc_cmd_send(struct ice_dcf_hw *hw, struct dcf_virtchnl_cmd *cmd)
>+{
>+      return iavf_aq_send_msg_to_pf(&hw->avf,
>+                                    cmd->v_op, IAVF_SUCCESS,
>+                                    cmd->req_msg, cmd->req_msglen, NULL);
>+}
>+
>+static __rte_always_inline void
>+ice_dcf_aq_cmd_handle(struct ice_dcf_hw *hw, struct iavf_arq_event_info *info)
>+{
>+      struct dcf_virtchnl_cmd *cmd;
>+      enum virtchnl_ops v_op;
>+      enum iavf_status v_ret;
>+      uint16_t aq_op;
>+
>+      aq_op = rte_le_to_cpu_16(info->desc.opcode);
>+      if (unlikely(aq_op != iavf_aqc_opc_send_msg_to_vf)) {
>+              PMD_DRV_LOG(ERR,
>+                          "Request %u is not supported yet", aq_op);
>+              return;
>+      }
>+
>+      v_op = rte_le_to_cpu_32(info->desc.cookie_high);
>+      if (unlikely(v_op == VIRTCHNL_OP_EVENT)) {
>+              if (hw->vc_event_msg_cb != NULL)
>+                      hw->vc_event_msg_cb(hw,
>+                                          info->msg_buf,
>+                                          info->msg_len);
>+              return;
>+      }
>+
>+      v_ret = rte_le_to_cpu_32(info->desc.cookie_low);
>+
>+      rte_spinlock_lock(&hw->vc_cmd_queue_lock);
>+
>+      TAILQ_FOREACH(cmd, &hw->vc_cmd_queue, next) {
>+              if (cmd->v_op == v_op && cmd->pending) {
>+                      cmd->v_ret = v_ret;
>+                      cmd->rsp_msglen = RTE_MIN(info->msg_len,
>+                                                cmd->rsp_buflen);
>+                      if (likely(cmd->rsp_msglen != 0))
>+                              rte_memcpy(cmd->rsp_msgbuf, info->msg_buf,
>+                                         cmd->rsp_msglen);
>+
>+                      /* prevent compiler reordering */
>+                      rte_compiler_barrier();
>+                      cmd->pending = 0;
>+                      break;
>+              }
>+      }
>+
>+      rte_spinlock_unlock(&hw->vc_cmd_queue_lock);
>+}
>+
>+static void
>+ice_dcf_handle_virtchnl_msg(struct ice_dcf_hw *hw)
>+{
>+      struct iavf_arq_event_info info;
>+      uint16_t pending = 1;
>+      int ret;
>+
>+      info.buf_len = ICE_DCF_AQ_BUF_SZ;
>+      info.msg_buf = hw->arq_buf;
>+
>+      while (pending) {
>+              ret = iavf_clean_arq_element(&hw->avf, &info, &pending);
>+              if (ret != IAVF_SUCCESS)
>+                      break;
>+
>+              ice_dcf_aq_cmd_handle(hw, &info);
>+      }
>+}
>+
>+static int
>+ice_dcf_init_check_api_version(struct ice_dcf_hw *hw)
>+{
>+#define ICE_CPF_VIRTCHNL_VERSION_MAJOR_START  1
>+#define ICE_CPF_VIRTCHNL_VERSION_MINOR_START  1
>+      struct virtchnl_version_info version, *pver;
>+      int err;
>+
>+      version.major = VIRTCHNL_VERSION_MAJOR;
>+      version.minor = VIRTCHNL_VERSION_MINOR;
>+      err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_VERSION,
>+                                        (uint8_t *)&version, sizeof(version));
>+      if (err) {
>+              PMD_INIT_LOG(ERR, "Fail to send OP_VERSION");
>+              return err;
>+      }
>+
>+      pver = &hw->virtchnl_version;
>+      err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_VERSION,
>+                                        (uint8_t *)pver, sizeof(*pver), NULL);
>+      if (err) {
>+              PMD_INIT_LOG(ERR, "Fail to get response of OP_VERSION");
>+              return -1;
>+      }
>+
>+      PMD_DRV_LOG(DEBUG,
>+                  "Peer PF API version: %u.%u", pver->major, pver->minor);

PMD_INIT_LOG

>+
>+      if (pver->major < ICE_CPF_VIRTCHNL_VERSION_MAJOR_START ||
>+          (pver->major == ICE_CPF_VIRTCHNL_VERSION_MAJOR_START &&
>+           pver->minor < ICE_CPF_VIRTCHNL_VERSION_MINOR_START)) {
>+              PMD_INIT_LOG(ERR,
>+                           "VIRTCHNL API version should not be lower than 
>(%u.%u)",
>+                           ICE_CPF_VIRTCHNL_VERSION_MAJOR_START,
>+                           ICE_CPF_VIRTCHNL_VERSION_MAJOR_START);
>+              return -1;
>+      } else if (pver->major > VIRTCHNL_VERSION_MAJOR ||
>+                 (pver->major == VIRTCHNL_VERSION_MAJOR &&
>+                  pver->minor > VIRTCHNL_VERSION_MINOR)) {
>+              PMD_INIT_LOG(ERR,
>+                           "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
>+                           pver->major, pver->minor,
>+                           VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
>+              return -1;
>+      }
>+
>+      PMD_DRV_LOG(DEBUG, "Peer is supported PF host");

PMD_INIT_LOG

>+
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_get_vf_resource(struct ice_dcf_hw *hw)
>+{
>+      uint32_t caps;
>+      int err, i;
>+
>+      caps = VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RX_POLLING |
>+             VIRTCHNL_VF_CAP_ADV_LINK_SPEED | VIRTCHNL_VF_CAP_DCF |
>+             VF_BASE_MODE_OFFLOADS;
>+
>+      err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
>+                                        (uint8_t *)&caps, sizeof(caps));
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "Fail to send msg OP_GET_VF_RESOURCE");
>+              return err;
>+      }
>+
>+      err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_GET_VF_RESOURCES,
>+                                        (uint8_t *)hw->vf_res,
>+                                        ICE_DCF_VF_RES_BUF_SZ, NULL);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "Fail to get response of OP_GET_VF_RESOURCE");
>+              return -1;
>+      }
>+
>+      iavf_vf_parse_hw_config(&hw->avf, hw->vf_res);
>+
>+      hw->vsi_res = NULL;
>+      for (i = 0; i < hw->vf_res->num_vsis; i++) {
>+              if (hw->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
>+                      hw->vsi_res = &hw->vf_res->vsi_res[i];
>+      }
>+
>+      if (!hw->vsi_res) {
>+              PMD_DRV_LOG(ERR, "no LAN VSI found");
>+              return -1;
>+      }
>+
>+      hw->vsi_id = hw->vsi_res->vsi_id;
>+      PMD_DRV_LOG(DEBUG, "VSI ID is %u", hw->vsi_id);
>+
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_get_vf_vsi_map(struct ice_dcf_hw *hw)
>+{
>+      struct virtchnl_dcf_vsi_map *vsi_map;
>+      uint16_t len;
>+      int err;
>+
>+      err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
>+                                        NULL, 0);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "Fail to send msg OP_DCF_GET_VSI_MAP");
>+              return err;
>+      }
>+
>+      err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_GET_VSI_MAP,
>+                                        hw->arq_buf, ICE_DCF_AQ_BUF_SZ,
>+                                        &len);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "Fail to get response of OP_DCF_GET_VSI_MAP");
>+              return err;
>+      }
>+
>+      vsi_map = (struct virtchnl_dcf_vsi_map *)hw->arq_buf;
>+      if (len < sizeof(*vsi_map) || !vsi_map->num_vfs ||
>+          len < sizeof(*vsi_map) +
>+                      (vsi_map->num_vfs - 1) * sizeof(vsi_map->vf_vsi[0])) {
>+              PMD_DRV_LOG(ERR, "invalid vf vsi map response with length %u",
>+                          len);
>+              return -EINVAL;
>+      }
>+
>+      if (hw->num_vfs != 0 && hw->num_vfs != vsi_map->num_vfs) {
>+              PMD_DRV_LOG(ERR, "The number VSI map (%u) doesn't match the 
>number of VFs (%u)",
>+                          vsi_map->num_vfs, hw->num_vfs);
>+              return -EINVAL;
>+      }
>+
>+      len = vsi_map->num_vfs * sizeof(vsi_map->vf_vsi[0]);
>+      if (!hw->vf_vsi_map) {
>+              hw->num_vfs = vsi_map->num_vfs;
>+              hw->vf_vsi_map = rte_zmalloc("vf_vsi_ctx", len, 0);
>+      }
>+
>+      if (!hw->vf_vsi_map) {
>+              PMD_DRV_LOG(ERR, "Fail to alloc memory for VSI context");
>+              return -ENOMEM;
>+      }
>+
>+      if (!memcmp(hw->vf_vsi_map, vsi_map->vf_vsi, len)) {
>+              PMD_DRV_LOG(DEBUG, "VF VSI map doesn't change");
>+              return 1;
>+      }
>+
>+      rte_memcpy(hw->vf_vsi_map, vsi_map->vf_vsi, len);
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_mode_disable(struct ice_dcf_hw *hw)
>+{
>+      int err;
>+
>+      err = ice_dcf_send_cmd_req_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
>+                                        NULL, 0);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "Fail to send msg OP_DCF_DISABLE");
>+              return err;
>+      }
>+
>+      err = ice_dcf_recv_cmd_rsp_no_irq(hw, VIRTCHNL_OP_DCF_DISABLE,
>+                                        (uint8_t *)hw->arq_buf,
>+                                        ICE_DCF_AQ_BUF_SZ, NULL);
>+      if (err) {
>+              PMD_DRV_LOG(ERR,
>+                          "Fail to get response of OP_DCF_DISABLE %d",
>+                          err);
>+              return -1;
>+      }
>+
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_check_reset_done(struct ice_dcf_hw *hw)
>+{
>+#define ICE_DCF_RESET_WAIT_CNT       50
>+      struct iavf_hw *avf = &hw->avf;
>+      int i, reset;
>+
>+      for (i = 0; i < ICE_DCF_RESET_WAIT_CNT; i++) {
>+              reset = IAVF_READ_REG(avf, IAVF_VFGEN_RSTAT) &
>+                                      IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
>+              reset = reset >> IAVF_VFGEN_RSTAT_VFR_STATE_SHIFT;
>+
>+              if (reset == VIRTCHNL_VFR_VFACTIVE ||
>+                  reset == VIRTCHNL_VFR_COMPLETED)
>+                      break;
>+
>+              rte_delay_ms(20);
>+      }
>+
>+      if (i >= ICE_DCF_RESET_WAIT_CNT)
>+              return -1;
>+
>+      return 0;
>+}
>+
>+static inline void
>+ice_dcf_enable_irq0(struct ice_dcf_hw *hw)
>+{
>+      struct iavf_hw *avf = &hw->avf;
>+
>+      /* Enable admin queue interrupt trigger */
>+      IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1,
>+                     IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
>+      IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
>+                     IAVF_VFINT_DYN_CTL01_INTENA_MASK |
>+                     IAVF_VFINT_DYN_CTL01_CLEARPBA_MASK |
>+                     IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
>+
>+      IAVF_WRITE_FLUSH(avf);
>+}
>+
>+static inline void
>+ice_dcf_disable_irq0(struct ice_dcf_hw *hw)
>+{
>+      struct iavf_hw *avf = &hw->avf;
>+
>+      /* Disable all interrupt types */
>+      IAVF_WRITE_REG(avf, IAVF_VFINT_ICR0_ENA1, 0);
>+      IAVF_WRITE_REG(avf, IAVF_VFINT_DYN_CTL01,
>+                     IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
>+
>+      IAVF_WRITE_FLUSH(avf);
>+}
>+
>+static void
>+ice_dcf_dev_interrupt_handler(void *param)
>+{
>+      struct ice_dcf_hw *hw = param;
>+
>+      ice_dcf_disable_irq0(hw);
>+
>+      ice_dcf_handle_virtchnl_msg(hw);
>+
>+      ice_dcf_enable_irq0(hw);
>+}
>+
>+int
>+ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
>+                           struct dcf_virtchnl_cmd *cmd)
>+{
>+      int i = 0;
>+      int err;
>+
>+      if ((cmd->req_msg && !cmd->req_msglen) ||
>+          (!cmd->req_msg && cmd->req_msglen) ||
>+          (cmd->rsp_msgbuf && !cmd->rsp_buflen) ||
>+          (!cmd->rsp_msgbuf && cmd->rsp_buflen))
>+              return -EINVAL;
>+
>+      rte_spinlock_lock(&hw->vc_cmd_send_lock);
>+      ice_dcf_vc_cmd_set(hw, cmd);
>+
>+      err = ice_dcf_vc_cmd_send(hw, cmd);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "fail to send cmd %d", cmd->v_op);
>+              goto ret;
>+      }
>+
>+      do {
>+              if (!cmd->pending)
>+                      break;
>+
>+              rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
>+      } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
>+
>+      if (cmd->v_ret != IAVF_SUCCESS) {
>+              err = -1;
>+              PMD_DRV_LOG(ERR,
>+                          "No response (%d times) or return failure (%d) for 
>cmd %d",
>+                          i, cmd->v_ret, cmd->v_op);
>+      }
>+
>+ret:
>+      ice_dcf_aq_cmd_clear(hw, cmd);
>+      rte_spinlock_unlock(&hw->vc_cmd_send_lock);
>+      return err;
>+}
>+
>+int
>+ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
>+                  void *buf, uint16_t buf_size)
>+{
>+      struct dcf_virtchnl_cmd desc_cmd, buff_cmd;
>+      struct ice_dcf_hw *hw = dcf_hw;
>+      int err = 0;
>+      int i = 0;
>+
>+      if ((buf && !buf_size) || (!buf && buf_size) ||
>+          buf_size > ICE_DCF_AQ_BUF_SZ)
>+              return -EINVAL;
>+
>+      desc_cmd.v_op = VIRTCHNL_OP_DCF_CMD_DESC;
>+      desc_cmd.req_msglen = sizeof(*desc);
>+      desc_cmd.req_msg = (uint8_t *)desc;
>+      desc_cmd.rsp_buflen = sizeof(*desc);
>+      desc_cmd.rsp_msgbuf = (uint8_t *)desc;
>+
>+      if (buf == NULL)
>+              return ice_dcf_execute_virtchnl_cmd(hw, &desc_cmd);
>+
>+      desc->flags |= rte_cpu_to_le_16(ICE_AQ_FLAG_BUF);
>+
>+      buff_cmd.v_op = VIRTCHNL_OP_DCF_CMD_BUFF;
>+      buff_cmd.req_msglen = buf_size;
>+      buff_cmd.req_msg = buf;
>+      buff_cmd.rsp_buflen = buf_size;
>+      buff_cmd.rsp_msgbuf = buf;
>+
>+      rte_spinlock_lock(&hw->vc_cmd_send_lock);
>+      ice_dcf_vc_cmd_set(hw, &desc_cmd);
>+      ice_dcf_vc_cmd_set(hw, &buff_cmd);
>+
>+      if (ice_dcf_vc_cmd_send(hw, &desc_cmd) ||
>+          ice_dcf_vc_cmd_send(hw, &buff_cmd)) {
>+              err = -1;
>+              PMD_DRV_LOG(ERR, "fail to send OP_DCF_CMD_DESC/BUFF");
>+              goto ret;
>+      }
>+
>+      do {
>+              if ((!desc_cmd.pending && !buff_cmd.pending) ||
>+                  (!desc_cmd.pending && desc_cmd.v_ret != IAVF_SUCCESS) ||
>+                  (!buff_cmd.pending && buff_cmd.v_ret != IAVF_SUCCESS))
>+                      break;
>+
>+              rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
>+      } while (i++ < ICE_DCF_ARQ_MAX_RETRIES);
>+
>+      if (desc_cmd.v_ret != IAVF_SUCCESS || buff_cmd.v_ret != IAVF_SUCCESS) {
>+              err = -1;
>+              PMD_DRV_LOG(ERR,
>+                          "No response (%d times) or return failure (desc: %d 
>/ buff: %d)",
>+                          i, desc_cmd.v_ret, buff_cmd.v_ret);
>+      }
>+
>+ret:
>+      ice_dcf_aq_cmd_clear(hw, &desc_cmd);
>+      ice_dcf_aq_cmd_clear(hw, &buff_cmd);
>+      rte_spinlock_unlock(&hw->vc_cmd_send_lock);
>+
>+      return err;
>+}
>+
>+int
>+ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw)
>+{
>+      int err = 0;
>+
>+      rte_spinlock_lock(&hw->vc_cmd_send_lock);
>+      ice_dcf_disable_irq0(hw);
>+
>+      if (ice_dcf_get_vf_resource(hw) || ice_dcf_get_vf_vsi_map(hw))
>+              err = -1;
>+
>+      ice_dcf_enable_irq0(hw);
>+      rte_spinlock_unlock(&hw->vc_cmd_send_lock);
>+
>+      return err;
>+}
>+
>+int
>+ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
>+{
>+      struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
>+      int ret;
>+
>+      hw->avf.hw_addr = pci_dev->mem_resource[0].addr;
>+      hw->avf.back = hw;
>+
>+      hw->avf.bus.bus_id = pci_dev->addr.bus;
>+      hw->avf.bus.device = pci_dev->addr.devid;
>+      hw->avf.bus.func = pci_dev->addr.function;
>+
>+      hw->avf.device_id = pci_dev->id.device_id;
>+      hw->avf.vendor_id = pci_dev->id.vendor_id;
>+      hw->avf.subsystem_device_id = pci_dev->id.subsystem_device_id;
>+      hw->avf.subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
>+
>+      hw->avf.aq.num_arq_entries = ICE_DCF_AQ_LEN;
>+      hw->avf.aq.num_asq_entries = ICE_DCF_AQ_LEN;
>+      hw->avf.aq.arq_buf_size = ICE_DCF_AQ_BUF_SZ;
>+      hw->avf.aq.asq_buf_size = ICE_DCF_AQ_BUF_SZ;
>+
>+      rte_spinlock_init(&hw->vc_cmd_send_lock);
>+      rte_spinlock_init(&hw->vc_cmd_queue_lock);
>+      TAILQ_INIT(&hw->vc_cmd_queue);
>+
>+      hw->arq_buf = rte_zmalloc("arq_buf", ICE_DCF_AQ_BUF_SZ, 0);
>+      if (hw->arq_buf == NULL) {
>+              PMD_INIT_LOG(ERR, "unable to allocate AdminQ buffer memory");
>+              goto err;
>+      }
>+
>+      ret = iavf_set_mac_type(&hw->avf);
>+      if (ret) {
>+              PMD_INIT_LOG(ERR, "set_mac_type failed: %d", ret);
>+              goto err;
>+      }
>+
>+      ret = ice_dcf_check_reset_done(hw);
>+      if (ret) {
>+              PMD_INIT_LOG(ERR, "VF is still resetting");
>+              goto err;
>+      }
>+
>+      ret = iavf_init_adminq(&hw->avf);
>+      if (ret) {
>+              PMD_INIT_LOG(ERR, "init_adminq failed: %d", ret);
>+              goto err;
>+      }
>+
>+      if (ice_dcf_init_check_api_version(hw)) {
>+              PMD_INIT_LOG(ERR, "check_api version failed");
>+              goto err_api;
>+      }
>+
>+      hw->vf_res = rte_zmalloc("vf_res", ICE_DCF_VF_RES_BUF_SZ, 0);
>+      if (hw->vf_res == NULL) {
>+              PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
>+              goto err_api;
>+      }
>+
>+      if (ice_dcf_get_vf_resource(hw)) {
>+              PMD_INIT_LOG(ERR, "Failed to get VF resource");
>+              goto err_alloc;
>+      }
>+
>+      if (ice_dcf_get_vf_vsi_map(hw) < 0) {
>+              PMD_INIT_LOG(ERR, "Failed to get VF VSI map");
>+              ice_dcf_mode_disable(hw);
>+              goto err_alloc;
>+      }
>+
>+      rte_intr_callback_register(&pci_dev->intr_handle,
>+                                 ice_dcf_dev_interrupt_handler, hw);
>+      rte_intr_enable(&pci_dev->intr_handle);
>+      ice_dcf_enable_irq0(hw);
>+
>+      return 0;
>+
>+err_alloc:
>+      rte_free(hw->vf_res);
>+err_api:
>+      iavf_shutdown_adminq(&hw->avf);
>+err:
>+      rte_free(hw->arq_buf);
>+
>+      return -1;
>+}
>+
>+void
>+ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw)
>+{
>+      struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
>+      struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
>+
>+      ice_dcf_disable_irq0(hw);
>+      rte_intr_disable(intr_handle);
>+      rte_intr_callback_unregister(intr_handle,
>+                                   ice_dcf_dev_interrupt_handler, hw);
>+
>+      ice_dcf_mode_disable(hw);
>+      iavf_shutdown_adminq(&hw->avf);
>+
>+      rte_free(hw->arq_buf);
>+      rte_free(hw->vf_vsi_map);
>+      rte_free(hw->vf_res);
>+}
>diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h
>new file mode 100644
>index 000000000..58647d87f
>--- /dev/null
>+++ b/drivers/net/ice/ice_dcf.h
>@@ -0,0 +1,61 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation
>+ */
>+
>+#ifndef _ICE_DCF_H_
>+#define _ICE_DCF_H_
>+
>+#include <rte_ethdev_driver.h>
>+
>+#include <iavf_prototype.h>
>+#include <iavf_adminq_cmd.h>
>+#include <iavf_type.h>
>+
>+#include "base/ice_type.h"
>+#include "ice_logs.h"
>+
>+struct dcf_virtchnl_cmd {
>+      TAILQ_ENTRY(dcf_virtchnl_cmd) next;
>+
>+      enum virtchnl_ops v_op;
>+      enum iavf_status v_ret;
>+
>+      uint16_t req_msglen;
>+      uint8_t *req_msg;
>+
>+      uint16_t rsp_msglen;
>+      uint16_t rsp_buflen;
>+      uint8_t *rsp_msgbuf;
>+
>+      volatile int pending;
>+};
>+
>+struct ice_dcf_hw {
>+      struct iavf_hw avf;
>+
>+      rte_spinlock_t vc_cmd_send_lock;
>+      rte_spinlock_t vc_cmd_queue_lock;
>+      TAILQ_HEAD(, dcf_virtchnl_cmd) vc_cmd_queue;
>+      void (*vc_event_msg_cb)(struct ice_dcf_hw *dcf_hw,
>+                              uint8_t *msg, uint16_t msglen);
>+
>+      uint8_t *arq_buf;
>+
>+      uint16_t num_vfs;
>+      uint16_t *vf_vsi_map;
>+
>+      struct virtchnl_version_info virtchnl_version;
>+      struct virtchnl_vf_resource *vf_res; /* VF resource */
>+      struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
>+      uint16_t vsi_id;
>+};
>+
>+int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw *hw,
>+                               struct dcf_virtchnl_cmd *cmd);
>+int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
>+                      void *buf, uint16_t buf_size);
>+int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
>+int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
>+void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
>+
>+#endif /* _ICE_DCF_H_ */
>diff --git a/drivers/net/ice/ice_dcf_ethdev.c 
>b/drivers/net/ice/ice_dcf_ethdev.c
>new file mode 100644
>index 000000000..f65b962d4
>--- /dev/null
>+++ b/drivers/net/ice/ice_dcf_ethdev.c
>@@ -0,0 +1,319 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation
>+ */
>+
>+#include <errno.h>
>+#include <stdbool.h>
>+#include <sys/types.h>
>+#include <sys/ioctl.h>
>+#include <unistd.h>
>+
>+#include <rte_interrupts.h>
>+#include <rte_debug.h>
>+#include <rte_pci.h>
>+#include <rte_atomic.h>
>+#include <rte_eal.h>
>+#include <rte_ether.h>
>+#include <rte_ethdev_pci.h>
>+#include <rte_kvargs.h>
>+#include <rte_malloc.h>
>+#include <rte_memzone.h>
>+#include <rte_dev.h>
>+
>+#include <iavf_devids.h>
>+
>+#include "ice_generic_flow.h"
>+#include "ice_dcf_ethdev.h"
>+
>+static uint16_t
>+ice_dcf_recv_pkts(__rte_unused void *rx_queue,
>+                __rte_unused struct rte_mbuf **bufs,
>+                __rte_unused uint16_t nb_pkts)
>+{
>+      return 0;
>+}
>+
>+static uint16_t
>+ice_dcf_xmit_pkts(__rte_unused void *tx_queue,
>+                __rte_unused struct rte_mbuf **bufs,
>+                __rte_unused uint16_t nb_pkts)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_start(struct rte_eth_dev *dev)
>+{
>+      dev->data->dev_link.link_status = ETH_LINK_UP;
>+
>+      return 0;
>+}
>+
>+static void
>+ice_dcf_dev_stop(struct rte_eth_dev *dev)
>+{
>+      dev->data->dev_link.link_status = ETH_LINK_DOWN;
>+}
>+
>+static int
>+ice_dcf_dev_configure(__rte_unused struct rte_eth_dev *dev)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_info_get(struct rte_eth_dev *dev,
>+                   struct rte_eth_dev_info *dev_info)
>+{
>+      struct ice_dcf_adapter *adapter = dev->data->dev_private;
>+
>+      dev_info->max_mac_addrs = 1;
>+      dev_info->max_rx_pktlen = (uint32_t)-1;
>+      dev_info->max_rx_queues = RTE_DIM(adapter->rxqs);
>+      dev_info->max_tx_queues = RTE_DIM(adapter->txqs);
>+
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev,
>+                __rte_unused struct rte_eth_stats *igb_stats)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_promiscuous_disable(__rte_unused struct rte_eth_dev *dev)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_allmulticast_enable(__rte_unused struct rte_eth_dev *dev)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_allmulticast_disable(__rte_unused struct rte_eth_dev *dev)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev,
>+                      enum rte_filter_type filter_type,
>+                      __rte_unused enum rte_filter_op filter_op,
>+                      __rte_unused void *arg)
>+{

legacy filter API will be deprecated, I think we should avoid adding support
in new PMD.

>+      int ret = 0;
>+
>+      if (!dev)
>+              return -EINVAL;
>+
>+      switch (filter_type) {
>+      default:
>+              PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
>+                          filter_type);
>+              ret = -EINVAL;
>+              break;
>+      }
>+
>+      return ret;
>+}
>+
>+static void
>+ice_dcf_dev_close(struct rte_eth_dev *dev)
>+{
>+      struct ice_dcf_adapter *adapter = dev->data->dev_private;
>+
>+      if (rte_eal_process_type() != RTE_PROC_PRIMARY)
>+              return;
>+
>+      dev->dev_ops = NULL;
>+      dev->rx_pkt_burst = NULL;
>+      dev->tx_pkt_burst = NULL;
>+
>+      ice_dcf_uninit_parent_adapter(dev);
>+      ice_dcf_uninit_hw(dev, &adapter->real_hw);
>+}
>+
>+static void
>+ice_dcf_queue_release(__rte_unused void *q)
>+{
>+}
>+
>+static int
>+ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev,
>+                  __rte_unused int wait_to_complete)
>+{
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_rx_queue_setup(struct rte_eth_dev *dev,
>+                     uint16_t rx_queue_id,
>+                     __rte_unused uint16_t nb_rx_desc,
>+                     __rte_unused unsigned int socket_id,
>+                     __rte_unused const struct rte_eth_rxconf *rx_conf,
>+                     __rte_unused struct rte_mempool *mb_pool)
>+{
>+      struct ice_dcf_adapter *adapter = dev->data->dev_private;
>+
>+      dev->data->rx_queues[rx_queue_id] = &adapter->rxqs[rx_queue_id];
>+
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_tx_queue_setup(struct rte_eth_dev *dev,
>+                     uint16_t tx_queue_id,
>+                     __rte_unused uint16_t nb_tx_desc,
>+                     __rte_unused unsigned int socket_id,
>+                     __rte_unused const struct rte_eth_txconf *tx_conf)
>+{
>+      struct ice_dcf_adapter *adapter = dev->data->dev_private;
>+
>+      dev->data->tx_queues[tx_queue_id] = &adapter->txqs[tx_queue_id];
>+
>+      return 0;
>+}
>+
>+static const struct eth_dev_ops ice_dcf_eth_dev_ops = {
>+      .dev_start               = ice_dcf_dev_start,
>+      .dev_stop                = ice_dcf_dev_stop,
>+      .dev_close               = ice_dcf_dev_close,
>+      .dev_configure           = ice_dcf_dev_configure,
>+      .dev_infos_get           = ice_dcf_dev_info_get,
>+      .rx_queue_setup          = ice_dcf_rx_queue_setup,
>+      .tx_queue_setup          = ice_dcf_tx_queue_setup,
>+      .rx_queue_release        = ice_dcf_queue_release,
>+      .tx_queue_release        = ice_dcf_queue_release,
>+      .link_update             = ice_dcf_link_update,
>+      .stats_get               = ice_dcf_stats_get,
>+      .stats_reset             = ice_dcf_stats_reset,
>+      .promiscuous_enable      = ice_dcf_dev_promiscuous_enable,
>+      .promiscuous_disable     = ice_dcf_dev_promiscuous_disable,
>+      .allmulticast_enable     = ice_dcf_dev_allmulticast_enable,
>+      .allmulticast_disable    = ice_dcf_dev_allmulticast_disable,
>+      .filter_ctrl             = ice_dcf_dev_filter_ctrl,
>+};
>+
>+static int
>+ice_dcf_dev_init(struct rte_eth_dev *eth_dev)
>+{
>+      struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
>+
>+      eth_dev->dev_ops = &ice_dcf_eth_dev_ops;
>+      eth_dev->rx_pkt_burst = ice_dcf_recv_pkts;
>+      eth_dev->tx_pkt_burst = ice_dcf_xmit_pkts;
>+
>+      if (rte_eal_process_type() != RTE_PROC_PRIMARY)
>+              return 0;
>+
>+      eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
>+
>+      adapter->real_hw.vc_event_msg_cb = ice_dcf_handle_pf_event_msg;
>+      if (ice_dcf_init_hw(eth_dev, &adapter->real_hw) != 0) {
>+              PMD_DRV_LOG(ERR, "Failed to init DCF hardware");

Use PMD_INIT_LOG instead.

>+              return -1;
>+      }
>+
>+      if (ice_dcf_init_parent_adapter(eth_dev) != 0) {
>+              PMD_DRV_LOG(ERR, "Failed to init DCF parent adapter");

Use PMD_INIT_LOG instead.

>+              ice_dcf_uninit_hw(eth_dev, &adapter->real_hw);
>+              return -1;
>+      }
>+
>+      return 0;
>+}
>+
>+static int
>+ice_dcf_dev_uninit(struct rte_eth_dev *eth_dev)
>+{
>+      ice_dcf_dev_close(eth_dev);
>+
>+      return 0;
>+}
>+
>+static int
>+handle_dcf_arg(__rte_unused const char *key, const char *value,
>+             __rte_unused void *arg)
>+{
>+      bool *dcf = arg;
>+
>+      if (arg == NULL || value == NULL)
>+              return -EINVAL;
>+
>+      if (strcmp(value, "dcf") == 0)
>+              *dcf = true;
>+      else
>+              *dcf = false;
>+
>+      return 0;
>+}
>+
>+static bool
>+check_cap_dcf_enable(struct rte_devargs *devargs)
>+{
>+      struct rte_kvargs *kvlist;
>+      bool enable = false;
>+
>+      if (devargs == NULL)
>+              return false;
>+
>+      kvlist = rte_kvargs_parse(devargs->args, NULL);
>+      if (kvlist == NULL)
>+              return false;
>+
>+      rte_kvargs_process(kvlist, "cap", handle_dcf_arg, &enable);

Need error handing for failure case.
>+
>+      rte_kvargs_free(kvlist);
>+
>+      return enable;
>+}
>+
>+static int eth_ice_dcf_pci_probe(__rte_unused struct rte_pci_driver *pci_drv,
>+                           struct rte_pci_device *pci_dev)
>+{
>+      if (!check_cap_dcf_enable(pci_dev->device.devargs))
>+              return 1; /* continue to probe */

I think the code is self explanatory, this straightforward comment is 
unnecessary.

>+
>+      return rte_eth_dev_pci_generic_probe(pci_dev,
>+                                           sizeof(struct ice_dcf_adapter),
>+                                           ice_dcf_dev_init);
>+}
>+
>+static int eth_ice_dcf_pci_remove(struct rte_pci_device *pci_dev)
>+{
>+      return rte_eth_dev_pci_generic_remove(pci_dev, ice_dcf_dev_uninit);
>+}
>+
>+static const struct rte_pci_id pci_id_ice_dcf_map[] = {
>+      { RTE_PCI_DEVICE(IAVF_INTEL_VENDOR_ID, IAVF_DEV_ID_ADAPTIVE_VF) },
>+      { .vendor_id = 0, /* sentinel */ },
>+};
>+
>+static struct rte_pci_driver rte_ice_dcf_pmd = {
>+      .id_table = pci_id_ice_dcf_map,
>+      .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
>+      .probe = eth_ice_dcf_pci_probe,
>+      .remove = eth_ice_dcf_pci_remove,
>+};
>+
>+RTE_PMD_REGISTER_PCI(net_ice_dcf, rte_ice_dcf_pmd);
>+RTE_PMD_REGISTER_PCI_TABLE(net_ice_dcf, pci_id_ice_dcf_map);
>+RTE_PMD_REGISTER_KMOD_DEP(net_ice_dcf, "* igb_uio | vfio-pci");
>+RTE_PMD_REGISTER_PARAM_STRING(net_ice_dcf, "cap=dcf");
>diff --git a/drivers/net/ice/ice_dcf_ethdev.h 
>b/drivers/net/ice/ice_dcf_ethdev.h
>new file mode 100644
>index 000000000..e95266599
>--- /dev/null
>+++ b/drivers/net/ice/ice_dcf_ethdev.h
>@@ -0,0 +1,33 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation
>+ */
>+
>+#ifndef _ICE_DCF_ETHDEV_H_
>+#define _ICE_DCF_ETHDEV_H_
>+
>+#include "base/ice_common.h"
>+#include "base/ice_adminq_cmd.h"
>+
>+#include "ice_ethdev.h"
>+#include "ice_dcf.h"
>+
>+#define ICE_DCF_MAX_RINGS  1
>+
>+struct ice_dcf_queue {
>+      uint64_t dummy;
>+};
>+
>+struct ice_dcf_adapter {
>+      struct ice_adapter parent; /* Must be first */
>+
>+      struct ice_dcf_hw real_hw;
>+      struct ice_dcf_queue rxqs[ICE_DCF_MAX_RINGS];
>+      struct ice_dcf_queue txqs[ICE_DCF_MAX_RINGS];
>+};
>+
>+void ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
>+                               uint8_t *msg, uint16_t msglen);
>+int ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev);
>+void ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev);
>+
>+#endif /* _ICE_DCF_ETHDEV_H_ */
>diff --git a/drivers/net/ice/ice_dcf_parent.c 
>b/drivers/net/ice/ice_dcf_parent.c
>new file mode 100644
>index 000000000..bca9cd34a
>--- /dev/null
>+++ b/drivers/net/ice/ice_dcf_parent.c
>@@ -0,0 +1,348 @@
>+/* SPDX-License-Identifier: BSD-3-Clause
>+ * Copyright(c) 2019 Intel Corporation
>+ */
>+#include <sys/types.h>
>+#include <sys/stat.h>
>+#include <unistd.h>
>+
>+#include <rte_alarm.h>
>+
>+#include "ice_dcf_ethdev.h"
>+#include "ice_generic_flow.h"
>+
>+#define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL   100000 /* us */
>+
>+static __rte_always_inline void
>+ice_dcf_update_vsi_ctx(struct ice_hw *hw, uint16_t vsi_handle,
>+                     uint16_t vsi_map)
>+{
>+      struct ice_vsi_ctx *vsi_ctx;
>+
>+      if (unlikely(vsi_handle >= ICE_MAX_VSI)) {
>+              PMD_DRV_LOG(ERR, "Invalid vsi handle %u", vsi_handle);
>+              return;
>+      }
>+
>+      vsi_ctx = hw->vsi_ctx[vsi_handle];
>+
>+      if (vsi_map & VIRTCHNL_DCF_VF_VSI_VALID) {
>+              if (!vsi_ctx)
>+                      vsi_ctx = ice_malloc(hw, sizeof(*vsi_ctx));
>+
>+              if (!vsi_ctx) {
>+                      PMD_DRV_LOG(ERR, "No memory for vsi context %u",
>+                                  vsi_handle);
>+                      return;
>+              }
>+
>+              vsi_ctx->vsi_num = (vsi_map & VIRTCHNL_DCF_VF_VSI_ID_M) >>
>+                                            VIRTCHNL_DCF_VF_VSI_ID_S;
>+              hw->vsi_ctx[vsi_handle] = vsi_ctx;
>+
>+              PMD_DRV_LOG(DEBUG, "VF%u is assigned with vsi number %u",
>+                          vsi_handle, vsi_ctx->vsi_num);
>+      } else {
>+              hw->vsi_ctx[vsi_handle] = NULL;
>+
>+              if (vsi_ctx)
>+                      ice_free(hw, vsi_ctx);

Doesn't need to check vsi_ctx is NUll or not, ice_free (rte_free) would do it 
inside.

>+
>+              PMD_DRV_LOG(NOTICE, "VF%u is disabled", vsi_handle);
>+      }
>+}
>+
>+static void
>+ice_dcf_update_vf_vsi_map(struct ice_hw *hw, uint16_t num_vfs,
>+                        uint16_t *vf_vsi_map)
>+{
>+      uint16_t vf_id;
>+
>+      for (vf_id = 0; vf_id < num_vfs; vf_id++)
>+              ice_dcf_update_vsi_ctx(hw, vf_id, vf_vsi_map[vf_id]);
>+}
>+
>+static void
>+ice_dcf_vsi_update_service_handler(void *param)
>+{
>+      struct ice_dcf_hw *hw = param;
>+
>+      if (!ice_dcf_handle_vsi_update_event(hw)) {
>+              struct ice_dcf_adapter *dcf_ad =
>+                      container_of(hw, struct ice_dcf_adapter, real_hw);
>+
>+              ice_dcf_update_vf_vsi_map(&dcf_ad->parent.hw,
>+                                        hw->num_vfs, hw->vf_vsi_map);
>+      }
>+}
>+
>+void
>+ice_dcf_handle_pf_event_msg(struct ice_dcf_hw *dcf_hw,
>+                          uint8_t *msg, uint16_t msglen)
>+{
>+      struct virtchnl_pf_event *pf_msg = (struct virtchnl_pf_event *)msg;
>+
>+      if (msglen < sizeof(struct virtchnl_pf_event)) {
>+              PMD_DRV_LOG(DEBUG, "Invalid event message length : %u", msglen);
>+              return;
>+      }
>+
>+      switch (pf_msg->event) {
>+      case VIRTCHNL_EVENT_RESET_IMPENDING:
>+              PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
>+              rte_eal_alarm_set(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL * 2,
>+                                ice_dcf_vsi_update_service_handler, dcf_hw);
>+              break;
>+      case VIRTCHNL_EVENT_LINK_CHANGE:
>+              PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
>+              break;
>+      case VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
>+              PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
>+              break;
>+      case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
>+              PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : 
>VF%u with VSI num %u",
>+                          pf_msg->event_data.vf_vsi_map.vf_id,
>+                          pf_msg->event_data.vf_vsi_map.vsi_id);
>+              rte_eal_alarm_set(ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL,
>+                                ice_dcf_vsi_update_service_handler, dcf_hw);
>+              break;
>+      default:
>+              PMD_DRV_LOG(ERR, "Unknown event received %u", pf_msg->event);
>+              break;
>+      }
>+}
>+
>+static int
>+ice_dcf_init_parent_hw(struct ice_hw *hw)
>+{
>+      struct ice_aqc_get_phy_caps_data *pcaps;
>+      enum ice_status status;
>+
>+      status = ice_aq_get_fw_ver(hw, NULL);
>+      if (status)
>+              return status;
>+
>+      status = ice_get_caps(hw);
>+      if (status)
>+              return status;
>+
>+      hw->port_info = (struct ice_port_info *)
>+                      ice_malloc(hw, sizeof(*hw->port_info));
>+      if (!hw->port_info)
>+              return ICE_ERR_NO_MEMORY;
>+
>+      /* set the back pointer to HW */
>+      hw->port_info->hw = hw;
>+
>+      /* Initialize port_info struct with switch configuration data */
>+      status = ice_get_initial_sw_cfg(hw);
>+      if (status)
>+              goto err_unroll_alloc;
>+
>+      pcaps = (struct ice_aqc_get_phy_caps_data *)
>+              ice_malloc(hw, sizeof(*pcaps));
>+      if (!pcaps) {
>+              status = ICE_ERR_NO_MEMORY;
>+              goto err_unroll_alloc;
>+      }
>+
>+      /* Initialize port_info struct with PHY capabilities */
>+      status = ice_aq_get_phy_caps(hw->port_info, false,
>+                                   ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
>+      ice_free(hw, pcaps);
>+      if (status)
>+              goto err_unroll_alloc;
>+
>+      /* Initialize port_info struct with link information */
>+      status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
>+      if (status)
>+              goto err_unroll_alloc;
>+
>+      status = ice_init_fltr_mgmt_struct(hw);
>+      if (status)
>+              goto err_unroll_alloc;
>+
>+      status = ice_init_hw_tbls(hw);
>+      if (status)
>+              goto err_unroll_fltr_mgmt_struct;
>+
>+      PMD_DRV_LOG(INFO,

PMD_INIT_LOG

>+                  "firmware %d.%d.%d api %d.%d.%d build 0x%08x",
>+                  hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
>+                  hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
>+                  hw->fw_build);
>+
>+      return ICE_SUCCESS;
>+
>+err_unroll_fltr_mgmt_struct:
>+      ice_cleanup_fltr_mgmt_struct(hw);
>+err_unroll_alloc:
>+      ice_free(hw, hw->port_info);
>+      hw->port_info = NULL;
>+
>+      return status;
>+}
>+
>+static void ice_dcf_uninit_parent_hw(struct ice_hw *hw)
>+{
>+      ice_cleanup_fltr_mgmt_struct(hw);
>+
>+      ice_free_seg(hw);
>+      ice_free_hw_tbls(hw);
>+
>+      if (hw->port_info) {
>+              ice_free(hw, hw->port_info);

Doesn't need to check whether hw->port_info is NULL or not, ice_free (rte_free) 
would do it inside.

>+              hw->port_info = NULL;
>+      }
>+
>+      ice_clear_all_vsi_ctx(hw);
>+}
>+
>+static int
>+ice_dcf_request_pkg_name(struct ice_hw *hw, char *pkg_name)
>+{
>+      struct ice_dcf_adapter *dcf_adapter =
>+                      container_of(hw, struct ice_dcf_adapter, parent.hw);
>+
>+      /* TODO: check with DSN firstly by iAVF */
>+      PMD_DRV_LOG(DEBUG,
>+                  "DCF VSI_ID = %u",
>+                  dcf_adapter->real_hw.vsi_id);
>+
>+      snprintf(pkg_name,
>+               ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_UPDATES);
>+      if (!access(pkg_name, 0))
>+              return 0;
>+
>+      snprintf(pkg_name,
>+               ICE_MAX_PKG_FILENAME_SIZE, "%s", ICE_PKG_FILE_DEFAULT);
>+      if (!access(pkg_name, 0))
>+              return 0;
>+
>+      return -1;
>+}
>+
>+static int
>+ice_dcf_load_pkg(struct ice_hw *hw)
>+{
>+      char pkg_name[ICE_MAX_PKG_FILENAME_SIZE];
>+      uint8_t *pkg_buf;
>+      uint32_t buf_len;
>+      struct stat st;
>+      FILE *fp;
>+      int err;
>+
>+      if (ice_dcf_request_pkg_name(hw, pkg_name)) {
>+              PMD_INIT_LOG(ERR, "failed to locate the package file");
>+              return -ENOENT;
>+      }
>+
>+      PMD_INIT_LOG(DEBUG, "DDP package name: %s", pkg_name);
>+
>+      err = stat(pkg_name, &st);
>+      if (err) {
>+              PMD_INIT_LOG(ERR, "failed to get file status");
>+              return err;
>+      }
>+
>+      buf_len = st.st_size;
>+      pkg_buf = rte_malloc(NULL, buf_len, 0);
>+      if (!pkg_buf) {
>+              PMD_INIT_LOG(ERR, "failed to allocate buffer of size %u for 
>package",
>+                           buf_len);
>+              return -1;
>+      }
>+
>+      fp = fopen(pkg_name, "rb");
>+      if (!fp)  {
>+              PMD_INIT_LOG(ERR, "failed to open file: %s", pkg_name);
>+              err = -1;
>+              goto ret;
>+      }
>+
>+      err = fread(pkg_buf, buf_len, 1, fp);
>+      fclose(fp);
>+      if (err != 1) {
>+              PMD_INIT_LOG(ERR, "failed to read package data");
>+              err = -1;
>+              goto ret;
>+      }
>+
>+      err = ice_copy_and_init_pkg(hw, pkg_buf, buf_len);
>+      if (err)
>+              PMD_INIT_LOG(ERR, "ice_copy_and_init_hw failed: %d", err);
>+
>+ret:
>+      rte_free(pkg_buf);
>+      return err;
>+}
>+
>+int
>+ice_dcf_init_parent_adapter(struct rte_eth_dev *eth_dev)
>+{
>+      struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
>+      struct ice_adapter *parent_adapter = &adapter->parent;
>+      struct ice_hw *parent_hw = &parent_adapter->hw;
>+      struct ice_dcf_hw *hw = &adapter->real_hw;
>+      const struct rte_ether_addr *mac;
>+      int err;
>+
>+      parent_adapter->eth_dev = eth_dev;
>+      parent_adapter->pf.adapter = parent_adapter;
>+      parent_adapter->pf.dev_data = eth_dev->data;
>+      parent_hw->back = parent_adapter;
>+      parent_hw->mac_type = ICE_MAC_GENERIC;
>+      parent_hw->vendor_id = ICE_INTEL_VENDOR_ID;
>+
>+      ice_init_lock(&parent_hw->adminq.sq_lock);
>+      ice_init_lock(&parent_hw->adminq.rq_lock);
>+      parent_hw->aq_send_cmd_fn = ice_dcf_send_aq_cmd;
>+      parent_hw->aq_send_cmd_param = &adapter->real_hw;
>+      parent_hw->dcf_enabled = true;
>+
>+      err = ice_dcf_init_parent_hw(parent_hw);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "failed to init the DCF parent hardware with 
>error %d",

PMD_INIT_LOG

>+                          err);
>+              return err;
>+      }
>+
>+      err = ice_dcf_load_pkg(parent_hw);
>+      if (err) {
>+              PMD_DRV_LOG(ERR, "failed to load package with error %d",

PMD_INIT_LOG

>+                          err);
>+              goto uninit_hw;
>+      }
>+      parent_adapter->active_pkg_type = ice_load_pkg_type(parent_hw);
>+
>+      ice_dcf_update_vf_vsi_map(parent_hw,
>+                                hw->num_vfs, hw->vf_vsi_map);
>+
>+      mac = (const struct rte_ether_addr *)hw->avf.mac.addr;
>+      if (rte_is_valid_assigned_ether_addr(mac))
>+              rte_ether_addr_copy(mac, &parent_adapter->pf.dev_addr);
>+      else
>+              rte_eth_random_addr(parent_adapter->pf.dev_addr.addr_bytes);
>+
>+      eth_dev->data->mac_addrs = &parent_adapter->pf.dev_addr;
>+
>+      return 0;
>+
>+uninit_hw:
>+      ice_dcf_uninit_parent_hw(parent_hw);
>+      return err;
>+}
>+
>+void
>+ice_dcf_uninit_parent_adapter(struct rte_eth_dev *eth_dev)
>+{
>+      struct ice_dcf_adapter *adapter = eth_dev->data->dev_private;
>+      struct ice_adapter *parent_adapter = &adapter->parent;
>+      struct ice_hw *parent_hw = &parent_adapter->hw;
>+
>+      eth_dev->data->mac_addrs = NULL;
>+
>+      rte_eal_alarm_cancel(ice_dcf_vsi_update_service_handler,
>+                           &adapter->real_hw);
>+
>+      ice_dcf_uninit_parent_hw(parent_hw);
>+}
>diff --git a/drivers/net/ice/meson.build b/drivers/net/ice/meson.build
>index f9e897bbc..0df8ddc0f 100644
>--- a/drivers/net/ice/meson.build
>+++ b/drivers/net/ice/meson.build
>@@ -15,8 +15,8 @@ sources = files(
>       'ice_hash.c'
>       )
> 
>-deps += ['hash']
>-includes += include_directories('base')
>+deps += ['hash', 'net', 'common_iavf']
>+includes += include_directories('base', '../../common/iavf')
> 
> if arch_subdir == 'x86'
>       sources += files('ice_rxtx_vec_sse.c')
>@@ -37,4 +37,8 @@ if arch_subdir == 'x86'
>       endif
> endif
> 
>+sources += files('ice_dcf.c',
>+               'ice_dcf_parent.c',
>+               'ice_dcf_ethdev.c')
>+
> install_headers('rte_pmd_ice.h')
>diff --git a/mk/rte.app.mk b/mk/rte.app.mk
>index d295ca0a5..f3798a09f 100644
>--- a/mk/rte.app.mk
>+++ b/mk/rte.app.mk
>@@ -185,6 +185,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_I40E_PMD)       += 
>-lrte_pmd_i40e
> _LDLIBS-$(CONFIG_RTE_LIBRTE_IAVF_PMD)       += -lrte_pmd_iavf
> _LDLIBS-$(CONFIG_RTE_LIBRTE_ICE_PMD)        += -lrte_pmd_ice
> IAVF-y := $(CONFIG_RTE_LIBRTE_IAVF_PMD)
>+IAVF-y += $(CONFIG_RTE_LIBRTE_ICE_PMD)
> ifeq ($(findstring y,$(IAVF-y)),y)
> _LDLIBS-y += -lrte_common_iavf
> endif
>-- 
>2.25.1
>

Reply via email to