> -----Original Message-----
> From: Junfeng Guo <junfeng....@intel.com>
> Sent: Monday, September 5, 2022 6:58 PM
> To: Zhang, Qi Z <qi.z.zh...@intel.com>; Wu, Jingjing
> <jingjing...@intel.com>; Xing, Beilei <beilei.x...@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.w...@intel.com>; Guo, Junfeng
> <junfeng....@intel.com>; Li, Xiaoyun <xiaoyun...@intel.com>
> Subject: [PATCH v2 03/14] net/idpf: add support for device initialization
>
> Support device init and the following dev ops:
> - dev_configure
> - dev_start
> - dev_stop
> - dev_close
>
> Signed-off-by: Beilei Xing <beilei.x...@intel.com>
> Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com>
> Signed-off-by: Xiao Wang <xiao.w.w...@intel.com>
> Signed-off-by: Junfeng Guo <junfeng....@intel.com>
> ---
> drivers/net/idpf/idpf_ethdev.c | 810
> +++++++++++++++++++++++++++++++++ drivers/net/idpf/idpf_ethdev.h |
> 229 ++++++++++ drivers/net/idpf/idpf_vchnl.c | 495
> ++++++++++++++++++++
> drivers/net/idpf/meson.build | 18 +
> drivers/net/idpf/version.map | 3 +
> drivers/net/meson.build | 1 +
> 6 files changed, 1556 insertions(+)
> create mode 100644 drivers/net/idpf/idpf_ethdev.c create mode 100644
> drivers/net/idpf/idpf_ethdev.h create mode 100644
> drivers/net/idpf/idpf_vchnl.c create mode 100644
> drivers/net/idpf/meson.build create mode 100644
> drivers/net/idpf/version.map
>
> diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
> new file mode 100644 index 0000000000..f0452de09e
> --- /dev/null
> +++ b/drivers/net/idpf/idpf_ethdev.c
> @@ -0,0 +1,810 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2022 Intel Corporation
> + */
> +
> +#include <rte_atomic.h>
> +#include <rte_eal.h>
> +#include <rte_ether.h>
> +#include <rte_malloc.h>
> +#include <rte_memzone.h>
> +#include <rte_dev.h>
> +
> +#include "idpf_ethdev.h"
> +
> +#define IDPF_VPORT "vport"
> +
> +struct idpf_adapter_list adapter_list;
> +bool adapter_list_init;
> +
> +uint64_t idpf_timestamp_dynflag;
> +
> +static const char * const idpf_valid_args[] = {
> + IDPF_VPORT,
> + NULL
> +};
> +
> +static int idpf_dev_configure(struct rte_eth_dev *dev); static int
> +idpf_dev_start(struct rte_eth_dev *dev); static int
> +idpf_dev_stop(struct rte_eth_dev *dev); static int
> +idpf_dev_close(struct rte_eth_dev *dev);
> +
> +static const struct eth_dev_ops idpf_eth_dev_ops = {
> + .dev_configure = idpf_dev_configure,
> + .dev_start = idpf_dev_start,
> + .dev_stop = idpf_dev_stop,
> + .dev_close = idpf_dev_close,
> +};
> +
> +static int
> +idpf_init_vport_req_info(struct rte_eth_dev *dev) {
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_create_vport *vport_info;
> + uint16_t idx = adapter->next_vport_idx;
> +
> + if (!adapter->vport_req_info[idx]) {
> + adapter->vport_req_info[idx] = rte_zmalloc(NULL,
> + sizeof(struct virtchnl2_create_vport), 0);
> + if (!adapter->vport_req_info[idx]) {
> + PMD_INIT_LOG(ERR, "Failed to allocate
> vport_req_info");
> + return -1;
> + }
> + }
> +
> + vport_info =
> + (struct virtchnl2_create_vport *)adapter-
> >vport_req_info[idx];
> +
> + vport_info->vport_type =
> rte_cpu_to_le_16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
> + if (!adapter->txq_model) {
> + vport_info->txq_model =
> + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
> + vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
> + vport_info->num_tx_complq =
> + IDPF_DEFAULT_TXQ_NUM *
> IDPF_TX_COMPLQ_PER_GRP;
> + } else {
> + vport_info->txq_model =
> +
> rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
> + vport_info->num_tx_q = IDPF_DEFAULT_TXQ_NUM;
> + vport_info->num_tx_complq = 0;
> + }
> + if (!adapter->rxq_model) {
> + vport_info->rxq_model =
> + rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
> + vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
> + vport_info->num_rx_bufq =
> + IDPF_DEFAULT_RXQ_NUM *
> IDPF_RX_BUFQ_PER_GRP;
> + } else {
> + vport_info->rxq_model =
> +
> rte_cpu_to_le_16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
> + vport_info->num_rx_q = IDPF_DEFAULT_RXQ_NUM;
> + vport_info->num_rx_bufq = 0;
> + }
> +
> + return 0;
> +}
> +
> +static uint16_t
> +idpf_get_next_vport_idx(struct idpf_vport **vports, uint16_t
> max_vport_nb,
> + uint16_t cur_vport_idx)
> +{
> + uint16_t vport_idx;
> + uint16_t i;
> +
> + if (cur_vport_idx < max_vport_nb && !vports[cur_vport_idx + 1]) {
> + vport_idx = cur_vport_idx + 1;
> + return vport_idx;
> + }
> +
> + for (i = 0; i < max_vport_nb; i++) {
> + if (!vports[i])
> + break;
> + }
> +
> + if (i == max_vport_nb)
> + vport_idx = IDPF_INVALID_VPORT_IDX;
> + else
> + vport_idx = i;
> +
> + return vport_idx;
> +}
> +
> +static uint16_t
> +idpf_parse_devarg_id(char *name)
> +{
> + uint16_t val;
> + char *p;
> +
> + p = strstr(name, "vport_");
> + p += sizeof("vport_") - 1;
> +
> + val = strtoul(p, NULL, 10);
> +
> + return val;
> +}
> +
> +#ifndef IDPF_RSS_KEY_LEN
> +#define IDPF_RSS_KEY_LEN 52
> +#endif
> +
> +static int
> +idpf_init_vport(struct rte_eth_dev *dev) {
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct idpf_adapter *adapter = vport->adapter;
> + uint16_t idx = adapter->next_vport_idx;
> + struct virtchnl2_create_vport *vport_info =
> + (struct virtchnl2_create_vport *)adapter-
> >vport_recv_info[idx];
> + int i;
> +
> + vport->vport_id = vport_info->vport_id;
> + vport->txq_model = vport_info->txq_model;
> + vport->rxq_model = vport_info->rxq_model;
> + vport->num_tx_q = vport_info->num_tx_q;
> + vport->num_tx_complq = vport_info->num_tx_complq;
> + vport->num_rx_q = vport_info->num_rx_q;
> + vport->num_rx_bufq = vport_info->num_rx_bufq;
> + vport->max_mtu = vport_info->max_mtu;
> + rte_memcpy(vport->default_mac_addr,
> + vport_info->default_mac_addr, ETH_ALEN);
> + vport->rss_algorithm = vport_info->rss_algorithm;
> + vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
> + vport_info->rss_key_size);
> + vport->rss_lut_size = vport_info->rss_lut_size;
> + vport->sw_idx = idx;
> +
> + for (i = 0; i < vport_info->chunks.num_chunks; i++) {
> + if (vport_info->chunks.chunks[i].type ==
> + VIRTCHNL2_QUEUE_TYPE_TX) {
> + vport->chunks_info.tx_start_qid =
> + vport_info->chunks.chunks[i].start_queue_id;
> + vport->chunks_info.tx_qtail_start =
> + vport_info->chunks.chunks[i].qtail_reg_start;
> + vport->chunks_info.tx_qtail_spacing =
> + vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> + } else if (vport_info->chunks.chunks[i].type ==
> + VIRTCHNL2_QUEUE_TYPE_RX) {
> + vport->chunks_info.rx_start_qid =
> + vport_info->chunks.chunks[i].start_queue_id;
> + vport->chunks_info.rx_qtail_start =
> + vport_info->chunks.chunks[i].qtail_reg_start;
> + vport->chunks_info.rx_qtail_spacing =
> + vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> + } else if (vport_info->chunks.chunks[i].type ==
> + VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) {
> + vport->chunks_info.tx_compl_start_qid =
> + vport_info->chunks.chunks[i].start_queue_id;
> + vport->chunks_info.tx_compl_qtail_start =
> + vport_info->chunks.chunks[i].qtail_reg_start;
> + vport->chunks_info.tx_compl_qtail_spacing =
> + vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> + } else if (vport_info->chunks.chunks[i].type ==
> + VIRTCHNL2_QUEUE_TYPE_RX_BUFFER) {
> + vport->chunks_info.rx_buf_start_qid =
> + vport_info->chunks.chunks[i].start_queue_id;
> + vport->chunks_info.rx_buf_qtail_start =
> + vport_info->chunks.chunks[i].qtail_reg_start;
> + vport->chunks_info.rx_buf_qtail_spacing =
> + vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> + }
> + }
> +
> + adapter->vports[idx] = vport;
> + adapter->next_vport_idx = idpf_get_next_vport_idx(adapter->vports,
> + adapter->max_vport_nb,
> idx);
> + if (adapter->next_vport_idx == IDPF_INVALID_VPORT_IDX) {
> + PMD_INIT_LOG(ERR, "Failed to get next vport id");
> + return -1;
> + }
> +
> + vport->devarg_id = idpf_parse_devarg_id(dev->data->name);
> + vport->dev_data = dev->data;
> +
> + return 0;
> +}
> +
> +static int
> +idpf_dev_configure(__rte_unused struct rte_eth_dev *dev) {
> + return 0;
> +}
> +
> +static int
> +idpf_dev_start(struct rte_eth_dev *dev) {
> + struct idpf_vport *vport = dev->data->dev_private;
> +
> + PMD_INIT_FUNC_TRACE();
> +
> + vport->stopped = 0;
> +
> + if (idpf_ena_dis_vport(vport, true)) {
> + PMD_DRV_LOG(ERR, "Failed to enable vport");
> + goto err_vport;
> + }
> +
> + return 0;
> +
> +err_vport:
> + return -1;
> +}
> +
> +static int
> +idpf_dev_stop(struct rte_eth_dev *dev)
> +{
> + struct idpf_vport *vport = dev->data->dev_private;
> +
> + PMD_INIT_FUNC_TRACE();
> +
> + if (vport->stopped == 1)
> + return 0;
> +
> + if (idpf_ena_dis_vport(vport, false))
> + PMD_DRV_LOG(ERR, "disable vport failed");
> +
> + vport->stopped = 1;
> + dev->data->dev_started = 0;
> +
> + return 0;
> +}
> +
> +static int
> +idpf_dev_close(struct rte_eth_dev *dev) {
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct idpf_adapter *adapter = vport->adapter;
> +
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + return 0;
> +
> + idpf_dev_stop(dev);
> + idpf_destroy_vport(vport);
> +
> + adapter->cur_vports &= ~BIT(vport->devarg_id);
> +
> + rte_free(vport);
> + dev->data->dev_private = NULL;
> +
> + return 0;
> +}
> +
> +static int
> +insert_value(struct idpf_adapter *adapter, uint16_t id) {
> + uint16_t i;
> +
> + for (i = 0; i < adapter->req_vport_nb; i++) {
> + if (adapter->req_vports[i] == id)
> + return 0;
> + }
> +
> + if (adapter->req_vport_nb >= RTE_DIM(adapter->req_vports)) {
> + PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
> + IDPF_MAX_VPORT_NUM);
> + return -1;
> + }
> +
> + adapter->req_vports[adapter->req_vport_nb] = id;
> + adapter->req_vport_nb++;
> +
> + return 0;
> +}
> +
> +static const char *
> +parse_range(const char *value, struct idpf_adapter *adapter) {
> + uint16_t lo, hi, i;
> + int n = 0;
> + int result;
> + const char *pos = value;
> +
> + result = sscanf(value, "%hu%n-%hu%n", &lo, &n, &hi, &n);
> + if (result == 1) {
> + if (lo >= IDPF_MAX_VPORT_NUM)
> + return NULL;
> + if (insert_value(adapter, lo))
> + return NULL;
> + } else if (result == 2) {
> + if (lo > hi || hi >= IDPF_MAX_VPORT_NUM)
> + return NULL;
> + for (i = lo; i <= hi; i++) {
> + if (insert_value(adapter, i))
> + return NULL;
> + }
> + } else {
> + return NULL;
> + }
> +
> + return pos + n;
> +}
> +
> +static int
> +parse_vport(const char *key, const char *value, void *args) {
> + struct idpf_adapter *adapter = (struct idpf_adapter *)args;
> + const char *pos = value;
> + int i;
> +
> + adapter->req_vport_nb = 0;
> +
> + if (*pos == '[')
> + pos++;
> +
> + while (1) {
> + pos = parse_range(pos, adapter);
> + if (pos == NULL) {
> + PMD_INIT_LOG(ERR, "invalid value:\"%s\" for
> key:\"%s\", ",
> + value, key);
> + return -1;
> + }
> + if (*pos != ',')
> + break;
> + pos++;
> + }
> +
> + if (*value == '[' && *pos != ']') {
> + PMD_INIT_LOG(ERR, "invalid value:\"%s\" for key:\"%s\", ",
> + value, key);
> + return -1;
> + }
> +
> + if (adapter->cur_vport_nb + adapter->req_vport_nb >
> + IDPF_MAX_VPORT_NUM) {
> + PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
> + IDPF_MAX_VPORT_NUM);
> + return -1;
> + }
> +
> + for (i = 0; i < adapter->req_vport_nb; i++) {
> + if (!(adapter->cur_vports & BIT(adapter->req_vports[i]))) {
> + adapter->cur_vports |= BIT(adapter->req_vports[i]);
> + adapter->cur_vport_nb++;
> + } else {
> + PMD_INIT_LOG(ERR, "Vport %d has been created",
> adapter->req_vports[i]);
> + return -1;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static int
> +idpf_parse_devargs(struct rte_pci_device *pci_dev, struct idpf_adapter
> +*adapter) {
> + struct rte_devargs *devargs = pci_dev->device.devargs;
> + struct rte_kvargs *kvlist;
> + int ret;
> +
> + if (!devargs)
> + return 0;
> +
> + kvlist = rte_kvargs_parse(devargs->args, idpf_valid_args);
> + if (!kvlist) {
> + PMD_INIT_LOG(ERR, "invalid kvargs key");
> + return -EINVAL;
> + }
> +
> + ret = rte_kvargs_process(kvlist, IDPF_VPORT, &parse_vport,
> + adapter);
> +
> + rte_kvargs_free(kvlist);
> + return ret;
> +}
> +
> +static void
> +idpf_reset_pf(struct iecm_hw *hw)
> +{
> + uint32_t reg;
> +
> + reg = IECM_READ_REG(hw, PFGEN_CTRL);
> + IECM_WRITE_REG(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR)); }
> +
> +#define IDPF_RESET_WAIT_CNT 100
> +static int
> +idpf_check_pf_reset_done(struct iecm_hw *hw) {
> + uint32_t reg;
> + int i;
> +
> + for (i = 0; i < IDPF_RESET_WAIT_CNT; i++) {
> + reg = IECM_READ_REG(hw, PFGEN_RSTAT);
> + if (reg != 0xFFFFFFFF && (reg & PFGEN_RSTAT_PFR_STATE_M))
> + return 0;
> + rte_delay_ms(1000);
> + }
> +
> + PMD_INIT_LOG(ERR, "IDPF reset timeout");
> + return -EBUSY;
> +}
> +
> +#define CTLQ_NUM 2
> +static int
> +idpf_init_mbx(struct iecm_hw *hw)
> +{
> + struct iecm_ctlq_create_info ctlq_info[CTLQ_NUM] = {
> + {
> + .type = IECM_CTLQ_TYPE_MAILBOX_TX,
> + .id = IDPF_CTLQ_ID,
> + .len = IDPF_CTLQ_LEN,
> + .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
> + .reg = {
> + .head = PF_FW_ATQH,
> + .tail = PF_FW_ATQT,
> + .len = PF_FW_ATQLEN,
> + .bah = PF_FW_ATQBAH,
> + .bal = PF_FW_ATQBAL,
> + .len_mask = PF_FW_ATQLEN_ATQLEN_M,
> + .len_ena_mask =
> PF_FW_ATQLEN_ATQENABLE_M,
> + .head_mask = PF_FW_ATQH_ATQH_M,
> + }
> + },
> + {
> + .type = IECM_CTLQ_TYPE_MAILBOX_RX,
> + .id = IDPF_CTLQ_ID,
> + .len = IDPF_CTLQ_LEN,
> + .buf_size = IDPF_DFLT_MBX_BUF_SIZE,
> + .reg = {
> + .head = PF_FW_ARQH,
> + .tail = PF_FW_ARQT,
> + .len = PF_FW_ARQLEN,
> + .bah = PF_FW_ARQBAH,
> + .bal = PF_FW_ARQBAL,
> + .len_mask = PF_FW_ARQLEN_ARQLEN_M,
> + .len_ena_mask =
> PF_FW_ARQLEN_ARQENABLE_M,
> + .head_mask = PF_FW_ARQH_ARQH_M,
> + }
> + }
> + };
> + struct iecm_ctlq_info *ctlq;
> + int ret;
> +
> + ret = iecm_ctlq_init(hw, CTLQ_NUM, ctlq_info);
> + if (ret)
> + return ret;
> +
> + LIST_FOR_EACH_ENTRY_SAFE(ctlq, NULL, &hw->cq_list_head,
> + struct iecm_ctlq_info, cq_list) {
> + if (ctlq->q_id == IDPF_CTLQ_ID && ctlq->cq_type ==
> IECM_CTLQ_TYPE_MAILBOX_TX)
> + hw->asq = ctlq;
> + if (ctlq->q_id == IDPF_CTLQ_ID && ctlq->cq_type ==
> IECM_CTLQ_TYPE_MAILBOX_RX)
> + hw->arq = ctlq;
> + }
> +
> + if (!hw->asq || !hw->arq) {
> + iecm_ctlq_deinit(hw);
> + ret = -ENOENT;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter
> +*adapter) {
> + struct iecm_hw *hw = &adapter->hw;
> + int ret = 0;
> +
> + hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
> + hw->hw_addr_len = pci_dev->mem_resource[0].len;
> + hw->back = adapter;
> + hw->vendor_id = pci_dev->id.vendor_id;
> + hw->device_id = pci_dev->id.device_id;
> + hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
> +
> + strncpy(adapter->name, pci_dev->device.name, PCI_PRI_STR_SIZE);
> +
> + idpf_reset_pf(hw);
> + ret = idpf_check_pf_reset_done(hw);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "IDPF is still resetting");
> + goto err;
> + }
> +
> + ret = idpf_init_mbx(hw);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to init mailbox");
> + goto err;
> + }
> +
> + adapter->mbx_resp = rte_zmalloc("idpf_adapter_mbx_resp",
> IDPF_DFLT_MBX_BUF_SIZE, 0);
> + if (!adapter->mbx_resp) {
> + PMD_INIT_LOG(ERR, "Failed to allocate
> idpf_adapter_mbx_resp memory");
> + goto err_mbx;
> + }
> +
> + if (idpf_check_api_version(adapter)) {
> + PMD_INIT_LOG(ERR, "Failed to check api version");
> + goto err_api;
> + }
> +
> + adapter->caps = rte_zmalloc("idpf_caps",
> + sizeof(struct virtchnl2_get_capabilities), 0);
> + if (!adapter->caps) {
> + PMD_INIT_LOG(ERR, "Failed to allocate idpf_caps memory");
> + goto err_api;
> + }
> +
> + if (idpf_get_caps(adapter)) {
> + PMD_INIT_LOG(ERR, "Failed to get capabilities");
> + goto err_caps;
> + }
> +
> + adapter->max_vport_nb = adapter->caps->max_vports;
> +
> + adapter->vport_req_info = rte_zmalloc("vport_req_info",
> + adapter->max_vport_nb *
> + sizeof(*adapter->vport_req_info),
> + 0);
> + if (!adapter->vport_req_info) {
> + PMD_INIT_LOG(ERR, "Failed to allocate vport_req_info
> memory");
> + goto err_caps;
> + }
> +
> + adapter->vport_recv_info = rte_zmalloc("vport_recv_info",
> + adapter->max_vport_nb *
> + sizeof(*adapter-
> >vport_recv_info),
> + 0);
> + if (!adapter->vport_recv_info) {
> + PMD_INIT_LOG(ERR, "Failed to allocate vport_recv_info
> memory");
> + goto err_vport_recv_info;
> + }
> +
> + adapter->vports = rte_zmalloc("vports",
> + adapter->max_vport_nb *
> + sizeof(*adapter->vports),
> + 0);
> + if (!adapter->vports) {
> + PMD_INIT_LOG(ERR, "Failed to allocate vports memory");
> + goto err_vports;
> + }
> +
> + adapter->max_rxq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
> + sizeof(struct virtchnl2_config_rx_queues)) /
> + sizeof(struct virtchnl2_rxq_info);
> + adapter->max_txq_per_msg = (IDPF_DFLT_MBX_BUF_SIZE -
> + sizeof(struct virtchnl2_config_tx_queues)) /
> + sizeof(struct virtchnl2_txq_info);
> +
> + adapter->cur_vports = 0;
> + adapter->cur_vport_nb = 0;
> + adapter->next_vport_idx = 0;
> +
> + return ret;
> +
> +err_vports:
> + rte_free(adapter->vports);
> + adapter->vports = NULL;
> +err_vport_recv_info:
> + rte_free(adapter->vport_req_info);
> + adapter->vport_req_info = NULL;
> +err_caps:
> + rte_free(adapter->caps);
> + adapter->caps = NULL;
> +err_api:
> + rte_free(adapter->mbx_resp);
> + adapter->mbx_resp = NULL;
> +err_mbx:
> + iecm_ctlq_deinit(hw);
> +err:
> + return -1;
> +}
> +
> +
> +static int
> +idpf_dev_init(struct rte_eth_dev *dev, void *init_params) {
> + struct idpf_vport *vport = dev->data->dev_private;
> + struct idpf_adapter *adapter = init_params;
> + int ret = 0;
> +
> + PMD_INIT_FUNC_TRACE();
> +
> + dev->dev_ops = &idpf_eth_dev_ops;
> + vport->adapter = adapter;
> +
> + /* for secondary processes, we don't initialise any further as primary
> + * has already done this work.
> + */
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + return ret;
> +
> + dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
> +
> + ret = idpf_init_vport_req_info(dev);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to init vport req_info.");
> + goto err;
> + }
> +
> + ret = idpf_create_vport(dev);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to create vport.");
> + goto err_create_vport;
> + }
> +
> + ret = idpf_init_vport(dev);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to init vports.");
> + goto err_init_vport;
> + }
> +
> + dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN,
> 0);
> + if (dev->data->mac_addrs == NULL) {
> + PMD_INIT_LOG(ERR, "Cannot allocate mac_addr memory.");
> + ret = -ENOMEM;
> + goto err_init_vport;
> + }
> +
> + rte_ether_addr_copy((struct rte_ether_addr *)vport-
> >default_mac_addr,
> + &dev->data->mac_addrs[0]);
> +
> + return 0;
> +
> +err_init_vport:
> + idpf_destroy_vport(vport);
> +err_create_vport:
> +
> +rte_free(vport->adapter->vport_req_info[vport->adapter->next_vport_idx]
> +);
> +err:
> + return ret;
> +}
> +
> +static int
> +idpf_dev_uninit(struct rte_eth_dev *dev) {
> + if (rte_eal_process_type() != RTE_PROC_PRIMARY)
> + return -EPERM;
> +
> + idpf_dev_close(dev);
> +
> + return 0;
> +}
> +
> +static const struct rte_pci_id pci_id_idpf_map[] = {
> + { RTE_PCI_DEVICE(IECM_INTEL_VENDOR_ID, IECM_DEV_ID_PF) },
> + { .vendor_id = 0, /* sentinel */ },
> +};
> +
> +struct idpf_adapter *
> +idpf_find_adapter(struct rte_pci_device *pci_dev) {
> + struct idpf_adapter *adapter;
> +
> + TAILQ_FOREACH(adapter, &adapter_list, next) {
> + if (!strncmp(adapter->name, pci_dev->device.name,
> PCI_PRI_STR_SIZE))
> + return adapter;
> + }
> +
> + return NULL;
> +}
> +
> +static int
> +idpf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
> + struct rte_pci_device *pci_dev) {
> + struct idpf_adapter *adapter;
> + char name[RTE_ETH_NAME_MAX_LEN];
> + int i, retval;
> +
> + if (!adapter_list_init) {
> + TAILQ_INIT(&adapter_list);
> + adapter_list_init = true;
> + }
> +
> + adapter = idpf_find_adapter(pci_dev);
> + if (!adapter) {
> + adapter = (struct idpf_adapter *)rte_zmalloc("idpf_adapter",
> + sizeof(struct idpf_adapter), 0);
> + if (!adapter) {
> + PMD_INIT_LOG(ERR, "Failed to allocate adapter.");
> + return -1;
> + }
> +
> + retval = idpf_adapter_init(pci_dev, adapter);
> + if (retval) {
> + PMD_INIT_LOG(ERR, "Failed to init adapter.");
> + return retval;
> + }
> +
> + TAILQ_INSERT_TAIL(&adapter_list, adapter, next);
> + }
> +
> + retval = idpf_parse_devargs(pci_dev, adapter);
> + if (retval) {
> + PMD_INIT_LOG(ERR, "Failed to parse devargs");
> + return retval;
> + }
> +
> + for (i = 0; i < adapter->req_vport_nb; i++) {
> + snprintf(name, sizeof(name), "idpf_%s_vport_%d",
> + pci_dev->device.name,
> + adapter->req_vports[i]);
> + retval = rte_eth_dev_create(&pci_dev->device, name,
> + sizeof(struct idpf_vport),
> + NULL, NULL, idpf_dev_init,
> + adapter);
> + if (retval)
> + PMD_DRV_LOG(ERR, "failed to creat vport %d",
> + adapter->req_vports[i]);
> + }
> +
> + return 0;
> +}
> +
> +static void
> +idpf_adapter_rel(struct idpf_adapter *adapter) {
> + struct iecm_hw *hw = &adapter->hw;
> + int i;
> +
> + iecm_ctlq_deinit(hw);
> +
> + rte_free(adapter->caps);
> + adapter->caps = NULL;
> +
> + rte_free(adapter->mbx_resp);
> + adapter->mbx_resp = NULL;
> +
> + if (adapter->vport_req_info) {
> + for (i = 0; i < adapter->max_vport_nb; i++) {
> + rte_free(adapter->vport_req_info[i]);
> + adapter->vport_req_info[i] = NULL;
> + }
> + rte_free(adapter->vport_req_info);
> + adapter->vport_req_info = NULL;
> + }
> +
> + if (adapter->vport_recv_info) {
> + for (i = 0; i < adapter->max_vport_nb; i++) {
> + rte_free(adapter->vport_recv_info[i]);
> + adapter->vport_recv_info[i] = NULL;
> + }
> + }
> +
> + rte_free(adapter->vports);
> + adapter->vports = NULL;
> +}
> +
> +static int
> +idpf_pci_remove(struct rte_pci_device *pci_dev) {
> + struct idpf_adapter *adapter = idpf_find_adapter(pci_dev);
> + char name[RTE_ETH_NAME_MAX_LEN];
> + struct rte_eth_dev *eth_dev;
> + int i;
> +
> + for (i = 0; i < IDPF_MAX_VPORT_NUM; i++) {
> + if (adapter->cur_vports & BIT(i)) {
> + snprintf(name, sizeof(name), "idpf_%s_vport_%d",
> + pci_dev->device.name, i);
> + eth_dev = rte_eth_dev_allocated(name);
> + if (eth_dev)
> + rte_eth_dev_destroy(eth_dev,
> idpf_dev_uninit);
> + }
> + }
> +
> + TAILQ_REMOVE(&adapter_list, adapter, next);
> + idpf_adapter_rel(adapter);
> + rte_free(adapter);
> +
> + return 0;
> +}
> +
> +static struct rte_pci_driver rte_idpf_pmd = {
> + .id_table = pci_id_idpf_map,
> + .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
> RTE_PCI_DRV_PROBE_AGAIN,
> + .probe = idpf_pci_probe,
> + .remove = idpf_pci_remove,
> +};
> +
> +/**
> + * Driver initialization routine.
> + * Invoked once at EAL init time.
> + * Register itself as the [Poll Mode] Driver of PCI devices.
> + */
> +RTE_PMD_REGISTER_PCI(net_idpf, rte_idpf_pmd);
> +RTE_PMD_REGISTER_PCI_TABLE(net_idpf, pci_id_idpf_map);
> +RTE_PMD_REGISTER_KMOD_DEP(net_ice, "* igb_uio | uio_pci_generic |
> +vfio-pci");
> +
> +RTE_LOG_REGISTER_SUFFIX(idpf_logtype_init, init, NOTICE);
> +RTE_LOG_REGISTER_SUFFIX(idpf_logtype_driver, driver, NOTICE);
> diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
> new file mode 100644 index 0000000000..e60ba73ec1
> --- /dev/null
> +++ b/drivers/net/idpf/idpf_ethdev.h
> @@ -0,0 +1,229 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2022 Intel Corporation
> + */
> +
> +#ifndef _IDPF_ETHDEV_H_
> +#define _IDPF_ETHDEV_H_
> +
> +#include <stdint.h>
> +#include <rte_mbuf.h>
> +#include <rte_mempool.h>
> +#include <rte_malloc.h>
> +#include <rte_spinlock.h>
> +#include <rte_ethdev.h>
> +#include <rte_kvargs.h>
> +#include <ethdev_driver.h>
> +#include <ethdev_pci.h>
> +
> +#include "base/iecm_osdep.h"
> +#include "base/iecm_type.h"
> +#include "base/iecm_devids.h"
> +#include "base/iecm_lan_txrx.h"
> +#include "base/iecm_lan_pf_regs.h"
> +#include "base/virtchnl.h"
> +#include "base/virtchnl2.h"
> +
> +#define IDPF_MAX_VPORT_NUM 8
> +
> +#define IDPF_DEFAULT_RXQ_NUM 16
> +#define IDPF_DEFAULT_TXQ_NUM 16
> +
> +#define IDPF_INVALID_VPORT_IDX 0xffff
> +#define IDPF_TXQ_PER_GRP 1
> +#define IDPF_TX_COMPLQ_PER_GRP 1
> +#define IDPF_RXQ_PER_GRP 1
> +#define IDPF_RX_BUFQ_PER_GRP 2
> +
> +#define IDPF_CTLQ_ID -1
> +#define IDPF_CTLQ_LEN 64
> +#define IDPF_DFLT_MBX_BUF_SIZE 4096
> +
> +#define IDPF_DFLT_Q_VEC_NUM 1
> +#define IDPF_DFLT_INTERVAL 16
> +
> +#define IDPF_MAX_NUM_QUEUES 256
> +#define IDPF_MIN_BUF_SIZE 1024
> +#define IDPF_MAX_FRAME_SIZE 9728
> +
> +#define IDPF_NUM_MACADDR_MAX 64
> +
> +#define IDPF_MAX_PKT_TYPE 1024
> +
> +#define IDPF_VLAN_TAG_SIZE 4
> +#define IDPF_ETH_OVERHEAD \
> + (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
> IDPF_VLAN_TAG_SIZE * 2)
> +
> +#ifndef ETH_ADDR_LEN
> +#define ETH_ADDR_LEN 6
> +#endif
> +
> +#define IDPF_ADAPTER_NAME_LEN (PCI_PRI_STR_SIZE + 1)
> +
> +/* Message type read in virtual channel from PF */ enum idpf_vc_result
> +{
> + IDPF_MSG_ERR = -1, /* Meet error when accessing admin queue */
> + IDPF_MSG_NON, /* Read nothing from admin queue */
> + IDPF_MSG_SYS, /* Read system msg from admin queue */
> + IDPF_MSG_CMD, /* Read async command result */
> +};
> +
> +struct idpf_chunks_info {
> + uint32_t tx_start_qid;
> + uint32_t rx_start_qid;
> + /* Valid only if split queue model */
> + uint32_t tx_compl_start_qid;
> + uint32_t rx_buf_start_qid;
> +
> + uint64_t tx_qtail_start;
> + uint32_t tx_qtail_spacing;
> + uint64_t rx_qtail_start;
> + uint32_t rx_qtail_spacing;
> + uint64_t tx_compl_qtail_start;
> + uint32_t tx_compl_qtail_spacing;
> + uint64_t rx_buf_qtail_start;
> + uint32_t rx_buf_qtail_spacing;
> +};
> +
> +struct idpf_vport {
> + struct idpf_adapter *adapter; /* Backreference to associated adapter
> */
> + uint16_t vport_id;
> + uint32_t txq_model;
> + uint32_t rxq_model;
> + uint16_t num_tx_q;
> + /* valid only if txq_model is split Q */
> + uint16_t num_tx_complq;
> + uint16_t num_rx_q;
> + /* valid only if rxq_model is split Q */
> + uint16_t num_rx_bufq;
> +
> + uint16_t max_mtu;
> + uint8_t default_mac_addr[VIRTCHNL_ETH_LENGTH_OF_ADDRESS];
> +
> + enum virtchnl_rss_algorithm rss_algorithm;
> + uint16_t rss_key_size;
> + uint16_t rss_lut_size;
> +
> + uint16_t sw_idx; /* SW idx */
> +
> + struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
> + uint16_t max_pkt_len; /* Maximum packet length */
> +
> + /* RSS info */
> + uint32_t *rss_lut;
> + uint8_t *rss_key;
> + uint64_t rss_hf;
> +
> + /* MSIX info*/
> + struct virtchnl2_queue_vector *qv_map; /* queue vector mapping */
> + uint16_t max_vectors;
> + struct virtchnl2_alloc_vectors *recv_vectors;
> +
> + /* Chunk info */
> + struct idpf_chunks_info chunks_info;
> +
> + /* Event from ipf */
> + bool link_up;
> + uint32_t link_speed;
> +
> + uint16_t devarg_id;
> + bool stopped;
> + struct virtchnl2_vport_stats eth_stats_offset; };
> +
> +struct idpf_adapter {
> + TAILQ_ENTRY(idpf_adapter) next;
> + struct iecm_hw hw;
> + char name[IDPF_ADAPTER_NAME_LEN];
> +
> + struct virtchnl_version_info virtchnl_version;
> + struct virtchnl2_get_capabilities *caps;
> +
> + volatile enum virtchnl_ops pend_cmd; /* pending command not
> finished */
> + uint32_t cmd_retval; /* return value of the cmd response from ipf */
> + uint8_t *mbx_resp; /* buffer to store the mailbox response from ipf
> */
> +
> + uint32_t txq_model;
> + uint32_t rxq_model;
> +
> + /* Vport info */
> + uint8_t **vport_req_info;
> + uint8_t **vport_recv_info;
> + struct idpf_vport **vports;
> + uint16_t max_vport_nb;
> + uint16_t req_vports[IDPF_MAX_VPORT_NUM];
> + uint16_t req_vport_nb;
> + uint16_t cur_vports;
> + uint16_t cur_vport_nb;
> + uint16_t next_vport_idx;
> +
> + uint16_t used_vecs_num;
> +
> + /* Max config queue number per VC message */
> + uint32_t max_rxq_per_msg;
> + uint32_t max_txq_per_msg;
> +
> + uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
> +
> + bool stopped;
> +};
> +
> +TAILQ_HEAD(idpf_adapter_list, idpf_adapter); extern struct
> +idpf_adapter_list adapter_list;
> +
> +#define IDPF_DEV_TO_PCI(eth_dev) \
> + RTE_DEV_TO_PCI((eth_dev)->device)
> +
> +/* structure used for sending and checking response of virtchnl ops */
> +struct idpf_cmd_info {
> + uint32_t ops;
> + uint8_t *in_args; /* buffer for sending */
> + uint32_t in_args_size; /* buffer size for sending */
> + uint8_t *out_buffer; /* buffer for response */
> + uint32_t out_size; /* buffer size for response */
> +};
> +
> +/* notify current command done. Only call in case execute
> + * _atomic_set_cmd successfully.
> + */
> +static inline void
> +_notify_cmd(struct idpf_adapter *adapter, int msg_ret) {
> + adapter->cmd_retval = msg_ret;
> + rte_wmb();
> + adapter->pend_cmd = VIRTCHNL_OP_UNKNOWN; }
> +
> +/* clear current command. Only call in case execute
> + * _atomic_set_cmd successfully.
> + */
> +static inline void
> +_clear_cmd(struct idpf_adapter *adapter) {
> + rte_wmb();
> + adapter->pend_cmd = VIRTCHNL_OP_UNKNOWN;
> + adapter->cmd_retval = VIRTCHNL_STATUS_SUCCESS; }
> +
> +/* Check there is pending cmd in execution. If none, set new command.
> +*/ static inline int _atomic_set_cmd(struct idpf_adapter *adapter, enum
> +virtchnl_ops ops) {
> + int ret = rte_atomic32_cmpset(&adapter->pend_cmd,
> VIRTCHNL_OP_UNKNOWN,
> +ops);
> +
> + if (!ret)
> + PMD_DRV_LOG(ERR, "There is incomplete cmd %d", adapter-
> >pend_cmd);
> +
> + return !ret;
> +}
> +
> +struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
> +void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev); int
> +idpf_check_api_version(struct idpf_adapter *adapter); int
> +idpf_get_caps(struct idpf_adapter *adapter); int
> +idpf_create_vport(struct rte_eth_dev *dev); int
> +idpf_destroy_vport(struct idpf_vport *vport); int
> +idpf_ena_dis_vport(struct idpf_vport *vport, bool enable); int
> +idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
> + uint16_t buf_len, uint8_t *buf);
> +
> +#endif /* _IDPF_ETHDEV_H_ */
> diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c new
> file mode 100644 index 0000000000..4708fb0666
> --- /dev/null
> +++ b/drivers/net/idpf/idpf_vchnl.c
> @@ -0,0 +1,495 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2022 Intel Corporation
> + */
> +
> +#include <stdio.h>
> +#include <errno.h>
> +#include <stdint.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <stdarg.h>
> +#include <inttypes.h>
> +#include <rte_byteorder.h>
> +#include <rte_common.h>
> +
> +#include <rte_debug.h>
> +#include <rte_atomic.h>
> +#include <rte_eal.h>
> +#include <rte_ether.h>
> +#include <ethdev_driver.h>
> +#include <ethdev_pci.h>
> +#include <rte_dev.h>
> +
> +#include "idpf_ethdev.h"
> +
> +#include "base/iecm_prototype.h"
> +
> +#define IDPF_CTLQ_LEN 64
> +
> +static int
> +idpf_vc_clean(struct idpf_adapter *adapter) {
> + struct iecm_ctlq_msg *q_msg[IDPF_CTLQ_LEN];
> + uint16_t num_q_msg = IDPF_CTLQ_LEN;
> + struct iecm_dma_mem *dma_mem;
> + int err = 0;
> + uint32_t i;
> +
> + for (i = 0; i < 10; i++) {
> + err = iecm_ctlq_clean_sq(adapter->hw.asq, &num_q_msg,
> q_msg);
> + msleep(20);
> + if (num_q_msg)
> + break;
> + }
> + if (err)
> + goto error;
> +
> + /* Empty queue is not an error */
> + for (i = 0; i < num_q_msg; i++) {
> + dma_mem = q_msg[i]->ctx.indirect.payload;
> + if (dma_mem) {
> + iecm_free_dma_mem(&adapter->hw, dma_mem);
> + rte_free(dma_mem);
> + }
> + rte_free(q_msg[i]);
> + }
> +
> +error:
> + return err;
> +}
> +
> +static int
> +idpf_send_vc_msg(struct idpf_adapter *adapter, enum virtchnl_ops op,
> + uint16_t msg_size, uint8_t *msg)
> +{
> + struct iecm_ctlq_msg *ctlq_msg;
> + struct iecm_dma_mem *dma_mem;
> + int err = 0;
> +
> + err = idpf_vc_clean(adapter);
> + if (err)
> + goto err;
> +
> + ctlq_msg = (struct iecm_ctlq_msg *)rte_zmalloc(NULL,
> + sizeof(struct iecm_ctlq_msg), 0);
> + if (!ctlq_msg) {
> + err = -ENOMEM;
> + goto err;
> + }
> +
> + dma_mem = (struct iecm_dma_mem *)rte_zmalloc(NULL,
> + sizeof(struct iecm_dma_mem), 0);
> + if (!dma_mem) {
> + err = -ENOMEM;
> + goto dma_mem_error;
> + }
> +
> + dma_mem->size = IDPF_DFLT_MBX_BUF_SIZE;
> + iecm_alloc_dma_mem(&adapter->hw, dma_mem, dma_mem->size);
> + if (!dma_mem->va) {
> + err = -ENOMEM;
> + goto dma_alloc_error;
> + }
> +
> + memcpy(dma_mem->va, msg, msg_size);
> +
> + ctlq_msg->opcode = iecm_mbq_opc_send_msg_to_pf;
> + ctlq_msg->func_id = 0;
> + ctlq_msg->data_len = msg_size;
> + ctlq_msg->cookie.mbx.chnl_opcode = op;
> + ctlq_msg->cookie.mbx.chnl_retval = VIRTCHNL_STATUS_SUCCESS;
> + ctlq_msg->ctx.indirect.payload = dma_mem;
> +
> + err = iecm_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
> + if (err)
> + goto send_error;
> +
> + return err;
> +
> +send_error:
> + iecm_free_dma_mem(&adapter->hw, dma_mem);
> +dma_alloc_error:
> + rte_free(dma_mem);
> +dma_mem_error:
> + rte_free(ctlq_msg);
> +err:
> + return err;
> +}
> +
> +static enum idpf_vc_result
> +idpf_read_msg_from_ipf(struct idpf_adapter *adapter, uint16_t buf_len,
> + uint8_t *buf)
> +{
> + struct iecm_hw *hw = &adapter->hw;
> + struct iecm_ctlq_msg ctlq_msg;
> + struct iecm_dma_mem *dma_mem = NULL;
> + enum idpf_vc_result result = IDPF_MSG_NON;
> + enum virtchnl_ops opcode;
> + uint16_t pending = 1;
> + int ret;
> +
> + ret = iecm_ctlq_recv(hw->arq, &pending, &ctlq_msg);
> + if (ret) {
> + PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
> + if (ret != IECM_ERR_CTLQ_NO_WORK)
> + result = IDPF_MSG_ERR;
> + return result;
> + }
> +
> + rte_memcpy(buf, ctlq_msg.ctx.indirect.payload->va, buf_len);
> +
> + opcode = (enum
> virtchnl_ops)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_opcode);
> + adapter->cmd_retval =
> + (enum
> +virtchnl_status_code)rte_le_to_cpu_32(ctlq_msg.cookie.mbx.chnl_retval);
> +
> + PMD_DRV_LOG(DEBUG, "CQ from ipf carries opcode %u, retval %d",
> + opcode, adapter->cmd_retval);
> +
> + if (opcode == VIRTCHNL2_OP_EVENT) {
> + struct virtchnl2_event *ve =
> + (struct virtchnl2_event
> *)ctlq_msg.ctx.indirect.payload->va;
> +
> + result = IDPF_MSG_SYS;
> + switch (ve->event) {
> + case VIRTCHNL2_EVENT_LINK_CHANGE:
> + /* TBD */
> + break;
> + default:
> + PMD_DRV_LOG(ERR, "%s: Unknown event %d from
> ipf",
> + __func__, ve->event);
> + break;
> + }
> + } else {
> + /* async reply msg on command issued by pf previously */
> + result = IDPF_MSG_CMD;
> + if (opcode != adapter->pend_cmd) {
> + PMD_DRV_LOG(WARNING, "command mismatch,
> expect %u, get %u",
> + adapter->pend_cmd, opcode);
> + result = IDPF_MSG_ERR;
> + }
> + }
> +
> + if (ctlq_msg.data_len)
> + dma_mem = ctlq_msg.ctx.indirect.payload;
> + else
> + pending = 0;
> +
> + ret = iecm_ctlq_post_rx_buffs(hw, hw->arq, &pending, &dma_mem);
> + if (ret && dma_mem)
> + iecm_free_dma_mem(hw, dma_mem);
> +
> + return result;
> +}
> +
> +#define MAX_TRY_TIMES 200
> +#define ASQ_DELAY_MS 10
> +
> +int
> +idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops, uint16_t
> buf_len,
> + uint8_t *buf)
> +{
> + int ret, err = 0, i = 0;
> +
> + do {
> + ret = idpf_read_msg_from_ipf(adapter, buf_len, buf);
> + if (ret == IDPF_MSG_CMD)
> + break;
> + rte_delay_ms(ASQ_DELAY_MS);
> + } while (i++ < MAX_TRY_TIMES);
> + if (i >= MAX_TRY_TIMES ||
> + adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
> + err = -1;
> + PMD_DRV_LOG(ERR, "No response or return failure (%d) for
> cmd %d",
> + adapter->cmd_retval, ops);
> + }
> +
> + return err;
> +}
> +
> +static int
> +idpf_execute_vc_cmd(struct idpf_adapter *adapter, struct idpf_cmd_info
> +*args) {
> + int err = 0;
> + int i = 0;
> + int ret;
> +
> + if (_atomic_set_cmd(adapter, args->ops))
> + return -1;
> +
> + ret = idpf_send_vc_msg(adapter, args->ops, args->in_args_size, args-
> >in_args);
> + if (ret) {
> + PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
> + _clear_cmd(adapter);
> + return ret;
> + }
> +
> + switch (args->ops) {
> + case VIRTCHNL_OP_VERSION:
> + case VIRTCHNL2_OP_GET_CAPS:
> + case VIRTCHNL2_OP_CREATE_VPORT:
> + case VIRTCHNL2_OP_DESTROY_VPORT:
> + case VIRTCHNL2_OP_SET_RSS_KEY:
> + case VIRTCHNL2_OP_SET_RSS_LUT:
> + case VIRTCHNL2_OP_SET_RSS_HASH:
> + case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
> + case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
> + case VIRTCHNL2_OP_ENABLE_QUEUES:
> + case VIRTCHNL2_OP_DISABLE_QUEUES:
> + case VIRTCHNL2_OP_ENABLE_VPORT:
> + case VIRTCHNL2_OP_DISABLE_VPORT:
> + case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
> + case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
> + case VIRTCHNL2_OP_ALLOC_VECTORS:
> + case VIRTCHNL2_OP_DEALLOC_VECTORS:
> + case VIRTCHNL2_OP_GET_STATS:
> + /* for init virtchnl ops, need to poll the response */
> + err = idpf_read_one_msg(adapter, args->ops, args->out_size,
> args->out_buffer);
> + _clear_cmd(adapter);
> + break;
> + case VIRTCHNL2_OP_GET_PTYPE_INFO:
> + /* for multuple response message,
> + * do not handle the response here.
> + */
> + break;
> + default:
> + /* For other virtchnl ops in running time,
> + * wait for the cmd done flag.
> + */
> + do {
> + if (adapter->pend_cmd == VIRTCHNL_OP_UNKNOWN)
> + break;
> + rte_delay_ms(ASQ_DELAY_MS);
> + /* If don't read msg or read sys event, continue */
> + } while (i++ < MAX_TRY_TIMES);
> + /* If there's no response is received, clear command */
> + if (i >= MAX_TRY_TIMES ||
> + adapter->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
> + err = -1;
> + PMD_DRV_LOG(ERR, "No response or return failure
> (%d) for cmd %d",
> + adapter->cmd_retval, args->ops);
> + _clear_cmd(adapter);
> + }
> + break;
> + }
> +
> + return err;
> +}
> +
> +int
> +idpf_check_api_version(struct idpf_adapter *adapter) {
> + struct virtchnl_version_info version;
> + struct idpf_cmd_info args;
> + int err;
> +
> + memset(&version, 0, sizeof(struct virtchnl_version_info));
> + version.major = VIRTCHNL_VERSION_MAJOR_2;
> + version.minor = VIRTCHNL_VERSION_MINOR_0;
> +
> + args.ops = VIRTCHNL_OP_VERSION;
> + args.in_args = (uint8_t *)&version;
> + args.in_args_size = sizeof(version);
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_execute_vc_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR,
> + "Failed to execute command of
> VIRTCHNL_OP_VERSION");
> + return err;
> + }
> +
> + return err;
> +}
> +
> +int
> +idpf_get_caps(struct idpf_adapter *adapter) {
> + struct virtchnl2_get_capabilities caps_msg;
> + struct idpf_cmd_info args;
> + int err;
> +
> + memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
> + caps_msg.csum_caps =
> + VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |
> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP |
> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |
> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |
> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP |
> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |
> + VIRTCHNL2_CAP_TX_CSUM_GENERIC |
> + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |
> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |
> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |
> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |
> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |
> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP |
> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP |
> + VIRTCHNL2_CAP_RX_CSUM_GENERIC;
> +
> + caps_msg.seg_caps =
> + VIRTCHNL2_CAP_SEG_IPV4_TCP |
> + VIRTCHNL2_CAP_SEG_IPV4_UDP |
> + VIRTCHNL2_CAP_SEG_IPV4_SCTP |
> + VIRTCHNL2_CAP_SEG_IPV6_TCP |
> + VIRTCHNL2_CAP_SEG_IPV6_UDP |
> + VIRTCHNL2_CAP_SEG_IPV6_SCTP |
> + VIRTCHNL2_CAP_SEG_GENERIC;
> +
> + caps_msg.rss_caps =
> + VIRTCHNL2_CAP_RSS_IPV4_TCP |
> + VIRTCHNL2_CAP_RSS_IPV4_UDP |
> + VIRTCHNL2_CAP_RSS_IPV4_SCTP |
> + VIRTCHNL2_CAP_RSS_IPV4_OTHER |
> + VIRTCHNL2_CAP_RSS_IPV6_TCP |
> + VIRTCHNL2_CAP_RSS_IPV6_UDP |
> + VIRTCHNL2_CAP_RSS_IPV6_SCTP |
> + VIRTCHNL2_CAP_RSS_IPV6_OTHER |
> + VIRTCHNL2_CAP_RSS_IPV4_AH |
> + VIRTCHNL2_CAP_RSS_IPV4_ESP |
> + VIRTCHNL2_CAP_RSS_IPV4_AH_ESP |
> + VIRTCHNL2_CAP_RSS_IPV6_AH |
> + VIRTCHNL2_CAP_RSS_IPV6_ESP |
> + VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
> +
> + caps_msg.hsplit_caps =
> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 |
> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 |
> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6;
> +
> + caps_msg.rsc_caps =
> + VIRTCHNL2_CAP_RSC_IPV4_TCP |
> + VIRTCHNL2_CAP_RSC_IPV4_SCTP |
> + VIRTCHNL2_CAP_RSC_IPV6_TCP |
> + VIRTCHNL2_CAP_RSC_IPV6_SCTP;
> +
> + caps_msg.other_caps =
> + VIRTCHNL2_CAP_RDMA |
> + VIRTCHNL2_CAP_SRIOV |
> + VIRTCHNL2_CAP_MACFILTER |
> + VIRTCHNL2_CAP_FLOW_DIRECTOR |
> + VIRTCHNL2_CAP_SPLITQ_QSCHED |
> + VIRTCHNL2_CAP_CRC |
> + VIRTCHNL2_CAP_WB_ON_ITR |
> + VIRTCHNL2_CAP_PROMISC |
> + VIRTCHNL2_CAP_LINK_SPEED |
> + VIRTCHNL2_CAP_VLAN;
> +
> + args.ops = VIRTCHNL2_OP_GET_CAPS;
> + args.in_args = (uint8_t *)&caps_msg;
> + args.in_args_size = sizeof(caps_msg);
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_execute_vc_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR,
> + "Failed to execute command of
> VIRTCHNL2_OP_GET_CAPS");
> + return err;
> + }
> +
> + rte_memcpy(adapter->caps, args.out_buffer, sizeof(caps_msg));
> +
> + return err;
> +}
> +
> +int
> +idpf_create_vport(struct rte_eth_dev *dev) {
> + struct rte_pci_device *pci_dev = IDPF_DEV_TO_PCI(dev);
> + struct idpf_adapter *adapter = idpf_find_adapter(pci_dev);
> + uint16_t idx = adapter->next_vport_idx;
> + struct virtchnl2_create_vport *vport_req_info =
> + (struct virtchnl2_create_vport *)adapter-
> >vport_req_info[idx];
> + struct virtchnl2_create_vport vport_msg;
> + struct idpf_cmd_info args;
> + int err = -1;
> +
> + memset(&vport_msg, 0, sizeof(struct virtchnl2_create_vport));
> + vport_msg.vport_type = vport_req_info->vport_type;
> + vport_msg.txq_model = vport_req_info->txq_model;
> + vport_msg.rxq_model = vport_req_info->rxq_model;
> + vport_msg.num_tx_q = vport_req_info->num_tx_q;
> + vport_msg.num_tx_complq = vport_req_info->num_tx_complq;
> + vport_msg.num_rx_q = vport_req_info->num_rx_q;
> + vport_msg.num_rx_bufq = vport_req_info->num_rx_bufq;
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_CREATE_VPORT;
> + args.in_args = (uint8_t *)&vport_msg;
> + args.in_args_size = sizeof(vport_msg);
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_execute_vc_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR,
> + "Failed to execute command of
> VIRTCHNL2_OP_CREATE_VPORT");
> + return err;
> + }
> +
> + if (!adapter->vport_recv_info[idx]) {
> + adapter->vport_recv_info[idx] = rte_zmalloc(NULL,
> + IDPF_DFLT_MBX_BUF_SIZE,
> 0);
> + if (!adapter->vport_recv_info[idx]) {
> + PMD_INIT_LOG(ERR, "Failed to alloc
> vport_recv_info.");
> + return err;
> + }
> + }
> + rte_memcpy(adapter->vport_recv_info[idx], args.out_buffer,
> + IDPF_DFLT_MBX_BUF_SIZE);
> + return err;
> +}
> +
> +int
> +idpf_destroy_vport(struct idpf_vport *vport) {
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_vport vc_vport;
> + struct idpf_cmd_info args;
> + int err;
> +
> + vc_vport.vport_id = vport->vport_id;
> +
> + memset(&args, 0, sizeof(args));
> + args.ops = VIRTCHNL2_OP_DESTROY_VPORT;
> + args.in_args = (uint8_t *)&vc_vport;
> + args.in_args_size = sizeof(vc_vport);
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_execute_vc_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR, "Failed to execute command of
> VIRTCHNL2_OP_DESTROY_VPORT");
> + return err;
> + }
> +
> + return err;
> +}
> +
> +int
> +idpf_ena_dis_vport(struct idpf_vport *vport, bool enable) {
> + struct idpf_adapter *adapter = vport->adapter;
> + struct virtchnl2_vport vc_vport;
> + struct idpf_cmd_info args;
> + int err;
> +
> + vc_vport.vport_id = vport->vport_id;
> + args.ops = enable ? VIRTCHNL2_OP_ENABLE_VPORT :
> + VIRTCHNL2_OP_DISABLE_VPORT;
> + args.in_args = (u8 *)&vc_vport;
> + args.in_args_size = sizeof(vc_vport);
> + args.out_buffer = adapter->mbx_resp;
> + args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
> +
> + err = idpf_execute_vc_cmd(adapter, &args);
> + if (err) {
> + PMD_DRV_LOG(ERR, "Failed to execute command of
> VIRTCHNL2_OP_%s_VPORT",
> + enable ? "ENABLE" : "DISABLE");
> + }
> +
> + return err;
> +}
> diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
> new file mode 100644 index 0000000000..7d776d3a15
> --- /dev/null
> +++ b/drivers/net/idpf/meson.build
> @@ -0,0 +1,18 @@
> +# SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2022 Intel
> +Corporation
> +
> +if is_windows
> + build = false
> + reason = 'not supported on Windows'
> + subdir_done()
We should use 4 spaces instead of a tab in meson files.
> +endif
> +
> +subdir('base')
> +objs = [base_objs]
> +
> +sources = files(
> + 'idpf_ethdev.c',
> + 'idpf_vchnl.c',
Ditto.
> +)
> +
> +includes += include_directories('base')
> \ No newline at end of file
> diff --git a/drivers/net/idpf/version.map b/drivers/net/idpf/version.map
> new file mode 100644 index 0000000000..b7da224860
> --- /dev/null
> +++ b/drivers/net/idpf/version.map
> @@ -0,0 +1,3 @@
> +DPDK_22 {
> + local: *;
> +};
> \ No newline at end of file
> diff --git a/drivers/net/meson.build b/drivers/net/meson.build index
> e35652fe63..8faf8120c2 100644
> --- a/drivers/net/meson.build
> +++ b/drivers/net/meson.build
> @@ -28,6 +28,7 @@ drivers = [
> 'i40e',
> 'iavf',
> 'ice',
> + 'idpf',
Ditto.
> 'igc',
> 'ionic',
> 'ipn3ke',
> --
> 2.25.1