From: Beilei Xing <beilei.x...@intel.com>

Add idpf_rxq and idpf_txq structure in common module.
Move configure rxq and txq to common module.

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
---
 drivers/common/idpf/idpf_common_device.h   |   2 +
 drivers/common/idpf/idpf_common_rxtx.h     | 112 +++++++++++++
 drivers/common/idpf/idpf_common_virtchnl.c | 160 ++++++++++++++++++
 drivers/common/idpf/idpf_common_virtchnl.h |   9 +-
 drivers/common/idpf/version.map            |   3 +-
 drivers/net/idpf/idpf_ethdev.h             |   2 -
 drivers/net/idpf/idpf_rxtx.h               |  97 +----------
 drivers/net/idpf/idpf_vchnl.c              | 184 ---------------------
 drivers/net/idpf/meson.build               |   1 -
 9 files changed, 283 insertions(+), 287 deletions(-)
 create mode 100644 drivers/common/idpf/idpf_common_rxtx.h
 delete mode 100644 drivers/net/idpf/idpf_vchnl.c

diff --git a/drivers/common/idpf/idpf_common_device.h 
b/drivers/common/idpf/idpf_common_device.h
index 78f41554eb..c007c0b705 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -18,8 +18,10 @@
 
 #define IDPF_DEFAULT_RXQ_NUM   16
 #define IDPF_RX_BUFQ_PER_GRP   2
+#define IDPF_RXQ_PER_GRP       1
 #define IDPF_DEFAULT_TXQ_NUM   16
 #define IDPF_TX_COMPLQ_PER_GRP 1
+#define IDPF_TXQ_PER_GRP       1
 
 #define IDPF_MAX_PKT_TYPE      1024
 
diff --git a/drivers/common/idpf/idpf_common_rxtx.h 
b/drivers/common/idpf/idpf_common_rxtx.h
new file mode 100644
index 0000000000..a9ed31c08a
--- /dev/null
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#ifndef _IDPF_COMMON_RXTX_H_
+#define _IDPF_COMMON_RXTX_H_
+
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf_core.h>
+
+#include "idpf_common_device.h"
+
+struct idpf_rx_stats {
+       uint64_t mbuf_alloc_failed;
+};
+
+struct idpf_rx_queue {
+       struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
+       struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
+       const struct rte_memzone *mz;   /* memzone for Rx ring */
+       volatile void *rx_ring;
+       struct rte_mbuf **sw_ring;      /* address of SW ring */
+       uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
+
+       uint16_t nb_rx_desc;            /* ring length */
+       uint16_t rx_tail;               /* current value of tail */
+       volatile uint8_t *qrx_tail;     /* register address of tail */
+       uint16_t rx_free_thresh;        /* max free RX desc to hold */
+       uint16_t nb_rx_hold;            /* number of held free RX desc */
+       struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
+       struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
+       struct rte_mbuf fake_mbuf;      /* dummy mbuf */
+
+       /* used for VPMD */
+       uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
+       uint16_t rxrearm_start;    /* the idx we start the re-arming from */
+       uint64_t mbuf_initializer; /* value to init mbufs */
+
+       uint16_t rx_nb_avail;
+       uint16_t rx_next_avail;
+
+       uint16_t port_id;       /* device port ID */
+       uint16_t queue_id;      /* Rx queue index */
+       uint16_t rx_buf_len;    /* The packet buffer size */
+       uint16_t rx_hdr_len;    /* The header buffer size */
+       uint16_t max_pkt_len;   /* Maximum packet length */
+       uint8_t rxdid;
+
+       bool q_set;             /* if rx queue has been configured */
+       bool q_started;         /* if rx queue has been started */
+       bool rx_deferred_start; /* don't start this queue in dev start */
+       const struct idpf_rxq_ops *ops;
+
+       struct idpf_rx_stats rx_stats;
+
+       /* only valid for split queue mode */
+       uint8_t expected_gen_id;
+       struct idpf_rx_queue *bufq1;
+       struct idpf_rx_queue *bufq2;
+
+       uint64_t offloads;
+       uint32_t hw_register_set;
+};
+
+struct idpf_tx_entry {
+       struct rte_mbuf *mbuf;
+       uint16_t next_id;
+       uint16_t last_id;
+};
+
+/* Structure associated with each TX queue. */
+struct idpf_tx_queue {
+       const struct rte_memzone *mz;           /* memzone for Tx ring */
+       volatile struct idpf_flex_tx_desc *tx_ring;     /* Tx ring virtual 
address */
+       volatile union {
+               struct idpf_flex_tx_sched_desc *desc_ring;
+               struct idpf_splitq_tx_compl_desc *compl_ring;
+       };
+       uint64_t tx_ring_phys_addr;             /* Tx ring DMA address */
+       struct idpf_tx_entry *sw_ring;          /* address array of SW ring */
+
+       uint16_t nb_tx_desc;            /* ring length */
+       uint16_t tx_tail;               /* current value of tail */
+       volatile uint8_t *qtx_tail;     /* register address of tail */
+       /* number of used desc since RS bit set */
+       uint16_t nb_used;
+       uint16_t nb_free;
+       uint16_t last_desc_cleaned;     /* last desc have been cleaned*/
+       uint16_t free_thresh;
+       uint16_t rs_thresh;
+
+       uint16_t port_id;
+       uint16_t queue_id;
+       uint64_t offloads;
+       uint16_t next_dd;       /* next to set RS, for VPMD */
+       uint16_t next_rs;       /* next to check DD,  for VPMD */
+
+       bool q_set;             /* if tx queue has been configured */
+       bool q_started;         /* if tx queue has been started */
+       bool tx_deferred_start; /* don't start this queue in dev start */
+       const struct idpf_txq_ops *ops;
+
+       /* only valid for split queue mode */
+       uint16_t sw_nb_desc;
+       uint16_t sw_tail;
+       void **txqs;
+       uint32_t tx_start_qid;
+       uint8_t expected_gen_id;
+       struct idpf_tx_queue *complq;
+};
+
+#endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/idpf_common_virtchnl.c 
b/drivers/common/idpf/idpf_common_virtchnl.c
index c5b68e8968..324214caa1 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -805,3 +805,163 @@ idpf_vc_query_ptype_info(struct idpf_adapter *adapter)
        rte_free(ptype_info);
        return err;
 }
+
+#define IDPF_RX_BUF_STRIDE             64
+int
+idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
+{
+       struct idpf_adapter *adapter = vport->adapter;
+       struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+       struct virtchnl2_rxq_info *rxq_info;
+       struct idpf_cmd_info args;
+       uint16_t num_qs;
+       int size, err, i;
+
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+               num_qs = IDPF_RXQ_PER_GRP;
+       else
+               num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
+
+       size = sizeof(*vc_rxqs) + (num_qs - 1) *
+               sizeof(struct virtchnl2_rxq_info);
+       vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+       if (vc_rxqs == NULL) {
+               DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+               err = -ENOMEM;
+               return err;
+       }
+       vc_rxqs->vport_id = vport->vport_id;
+       vc_rxqs->num_qinfo = num_qs;
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+               rxq_info = &vc_rxqs->qinfo[0];
+               rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+               rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+               rxq_info->queue_id = rxq->queue_id;
+               rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+               rxq_info->data_buffer_size = rxq->rx_buf_len;
+               rxq_info->max_pkt_size = vport->max_pkt_len;
+
+               rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+               rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+               rxq_info->ring_len = rxq->nb_rx_desc;
+       }  else {
+               /* Rx queue */
+               rxq_info = &vc_rxqs->qinfo[0];
+               rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
+               rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+               rxq_info->queue_id = rxq->queue_id;
+               rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+               rxq_info->data_buffer_size = rxq->rx_buf_len;
+               rxq_info->max_pkt_size = vport->max_pkt_len;
+
+               rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+               rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+               rxq_info->ring_len = rxq->nb_rx_desc;
+               rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
+               rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
+               rxq_info->rx_buffer_low_watermark = 64;
+
+               /* Buffer queue */
+               for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
+                       struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : 
rxq->bufq2;
+                       rxq_info = &vc_rxqs->qinfo[i];
+                       rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
+                       rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+                       rxq_info->queue_id = bufq->queue_id;
+                       rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+                       rxq_info->data_buffer_size = bufq->rx_buf_len;
+                       rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+                       rxq_info->ring_len = bufq->nb_rx_desc;
+
+                       rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
+                       rxq_info->rx_buffer_low_watermark = 64;
+               }
+       }
+
+       memset(&args, 0, sizeof(args));
+       args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+       args.in_args = (uint8_t *)vc_rxqs;
+       args.in_args_size = size;
+       args.out_buffer = adapter->mbx_resp;
+       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+       err = idpf_execute_vc_cmd(adapter, &args);
+       rte_free(vc_rxqs);
+       if (err != 0)
+               DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+       return err;
+}
+
+int
+idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
+{
+       struct idpf_adapter *adapter = vport->adapter;
+       struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+       struct virtchnl2_txq_info *txq_info;
+       struct idpf_cmd_info args;
+       uint16_t num_qs;
+       int size, err;
+
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
+               num_qs = IDPF_TXQ_PER_GRP;
+       else
+               num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
+
+       size = sizeof(*vc_txqs) + (num_qs - 1) *
+               sizeof(struct virtchnl2_txq_info);
+       vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+       if (vc_txqs == NULL) {
+               DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+               err = -ENOMEM;
+               return err;
+       }
+       vc_txqs->vport_id = vport->vport_id;
+       vc_txqs->num_qinfo = num_qs;
+
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+               txq_info = &vc_txqs->qinfo[0];
+               txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+               txq_info->queue_id = txq->queue_id;
+               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+               txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+               txq_info->ring_len = txq->nb_tx_desc;
+       } else {
+               /* txq info */
+               txq_info = &vc_txqs->qinfo[0];
+               txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
+               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
+               txq_info->queue_id = txq->queue_id;
+               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+               txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+               txq_info->ring_len = txq->nb_tx_desc;
+               txq_info->tx_compl_queue_id = txq->complq->queue_id;
+               txq_info->relative_queue_id = txq_info->queue_id;
+
+               /* tx completion queue info */
+               txq_info = &vc_txqs->qinfo[1];
+               txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
+               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+               txq_info->queue_id = txq->complq->queue_id;
+               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+               txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+               txq_info->ring_len = txq->complq->nb_tx_desc;
+       }
+
+       memset(&args, 0, sizeof(args));
+       args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+       args.in_args = (uint8_t *)vc_txqs;
+       args.in_args_size = size;
+       args.out_buffer = adapter->mbx_resp;
+       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+       err = idpf_execute_vc_cmd(adapter, &args);
+       rte_free(vc_txqs);
+       if (err != 0)
+               DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+       return err;
+}
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h 
b/drivers/common/idpf/idpf_common_virtchnl.h
index e3e0825d75..d16b6b66f4 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -6,6 +6,7 @@
 #define _IDPF_COMMON_VIRTCHNL_H_
 
 #include <idpf_common_device.h>
+#include <idpf_common_rxtx.h>
 
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
@@ -20,6 +21,8 @@ int idpf_vc_config_irq_map_unmap(struct idpf_vport *vport,
 int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
                      uint16_t buf_len, uint8_t *buf);
+int idpf_execute_vc_cmd(struct idpf_adapter *adapter,
+                       struct idpf_cmd_info *args);
 __rte_internal
 int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
                      bool rx, bool on);
@@ -32,7 +35,7 @@ int idpf_vc_alloc_vectors(struct idpf_vport *vport, uint16_t 
num_vectors);
 __rte_internal
 int idpf_vc_dealloc_vectors(struct idpf_vport *vport);
 __rte_internal
-int idpf_execute_vc_cmd(struct idpf_adapter *adapter,
-                       struct idpf_cmd_info *args);
-
+int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
+__rte_internal
+int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
 #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index ca94196248..0e3ed57b88 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -12,7 +12,8 @@ INTERNAL {
        idpf_vc_ena_dis_vport;
        idpf_vc_alloc_vectors;
        idpf_vc_dealloc_vectors;
-       idpf_execute_vc_cmd;
+       idpf_vc_config_rxq;
+       idpf_vc_config_txq;
        idpf_adapter_init;
        idpf_adapter_deinit;
        idpf_vport_init;
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 130c02030b..6d4738f6fe 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -23,8 +23,6 @@
 #define IDPF_MAX_VPORT_NUM     8
 
 #define IDPF_INVALID_VPORT_IDX 0xffff
-#define IDPF_TXQ_PER_GRP       1
-#define IDPF_RXQ_PER_GRP       1
 
 #define IDPF_DFLT_Q_VEC_NUM    1
 
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index cac6040943..b8325f9b96 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -5,6 +5,7 @@
 #ifndef _IDPF_RXTX_H_
 #define _IDPF_RXTX_H_
 
+#include <idpf_common_rxtx.h>
 #include "idpf_ethdev.h"
 
 /* MTS */
@@ -84,103 +85,10 @@
 
 extern uint64_t idpf_timestamp_dynflag;
 
-struct idpf_rx_queue {
-       struct idpf_adapter *adapter;   /* the adapter this queue belongs to */
-       struct rte_mempool *mp;         /* mbuf pool to populate Rx ring */
-       const struct rte_memzone *mz;   /* memzone for Rx ring */
-       volatile void *rx_ring;
-       struct rte_mbuf **sw_ring;      /* address of SW ring */
-       uint64_t rx_ring_phys_addr;     /* Rx ring DMA address */
-
-       uint16_t nb_rx_desc;            /* ring length */
-       uint16_t rx_tail;               /* current value of tail */
-       volatile uint8_t *qrx_tail;     /* register address of tail */
-       uint16_t rx_free_thresh;        /* max free RX desc to hold */
-       uint16_t nb_rx_hold;            /* number of held free RX desc */
-       struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
-       struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
-       struct rte_mbuf fake_mbuf;      /* dummy mbuf */
-
-       /* used for VPMD */
-       uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
-       uint16_t rxrearm_start;    /* the idx we start the re-arming from */
-       uint64_t mbuf_initializer; /* value to init mbufs */
-
-       uint16_t rx_nb_avail;
-       uint16_t rx_next_avail;
-
-       uint16_t port_id;       /* device port ID */
-       uint16_t queue_id;      /* Rx queue index */
-       uint16_t rx_buf_len;    /* The packet buffer size */
-       uint16_t rx_hdr_len;    /* The header buffer size */
-       uint16_t max_pkt_len;   /* Maximum packet length */
-       uint8_t rxdid;
-
-       bool q_set;             /* if rx queue has been configured */
-       bool q_started;         /* if rx queue has been started */
-       bool rx_deferred_start; /* don't start this queue in dev start */
-       const struct idpf_rxq_ops *ops;
-
-       /* only valid for split queue mode */
-       uint8_t expected_gen_id;
-       struct idpf_rx_queue *bufq1;
-       struct idpf_rx_queue *bufq2;
-
-       uint64_t offloads;
-       uint32_t hw_register_set;
-};
-
-struct idpf_tx_entry {
-       struct rte_mbuf *mbuf;
-       uint16_t next_id;
-       uint16_t last_id;
-};
-
 struct idpf_tx_vec_entry {
        struct rte_mbuf *mbuf;
 };
 
-/* Structure associated with each TX queue. */
-struct idpf_tx_queue {
-       const struct rte_memzone *mz;           /* memzone for Tx ring */
-       volatile struct idpf_flex_tx_desc *tx_ring;     /* Tx ring virtual 
address */
-       volatile union {
-               struct idpf_flex_tx_sched_desc *desc_ring;
-               struct idpf_splitq_tx_compl_desc *compl_ring;
-       };
-       uint64_t tx_ring_phys_addr;             /* Tx ring DMA address */
-       struct idpf_tx_entry *sw_ring;          /* address array of SW ring */
-
-       uint16_t nb_tx_desc;            /* ring length */
-       uint16_t tx_tail;               /* current value of tail */
-       volatile uint8_t *qtx_tail;     /* register address of tail */
-       /* number of used desc since RS bit set */
-       uint16_t nb_used;
-       uint16_t nb_free;
-       uint16_t last_desc_cleaned;     /* last desc have been cleaned*/
-       uint16_t free_thresh;
-       uint16_t rs_thresh;
-
-       uint16_t port_id;
-       uint16_t queue_id;
-       uint64_t offloads;
-       uint16_t next_dd;       /* next to set RS, for VPMD */
-       uint16_t next_rs;       /* next to check DD,  for VPMD */
-
-       bool q_set;             /* if tx queue has been configured */
-       bool q_started;         /* if tx queue has been started */
-       bool tx_deferred_start; /* don't start this queue in dev start */
-       const struct idpf_txq_ops *ops;
-
-       /* only valid for split queue mode */
-       uint16_t sw_nb_desc;
-       uint16_t sw_tail;
-       void **txqs;
-       uint32_t tx_start_qid;
-       uint8_t expected_gen_id;
-       struct idpf_tx_queue *complq;
-};
-
 /* Offload features */
 union idpf_tx_offload {
        uint64_t data;
@@ -239,9 +147,6 @@ void idpf_stop_queues(struct rte_eth_dev *dev);
 void idpf_set_rx_function(struct rte_eth_dev *dev);
 void idpf_set_tx_function(struct rte_eth_dev *dev);
 
-int idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq);
-int idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq);
-
 #define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  10000
 /* Helper function to convert a 32b nanoseconds timestamp to 64b. */
 static inline uint64_t
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
deleted file mode 100644
index 45d05ed108..0000000000
--- a/drivers/net/idpf/idpf_vchnl.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2022 Intel Corporation
- */
-
-#include <stdio.h>
-#include <errno.h>
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <inttypes.h>
-#include <rte_byteorder.h>
-#include <rte_common.h>
-
-#include <rte_debug.h>
-#include <rte_atomic.h>
-#include <rte_eal.h>
-#include <rte_ether.h>
-#include <ethdev_driver.h>
-#include <ethdev_pci.h>
-#include <rte_dev.h>
-
-#include "idpf_ethdev.h"
-#include "idpf_rxtx.h"
-
-#define IDPF_RX_BUF_STRIDE             64
-int
-idpf_vc_config_rxq(struct idpf_vport *vport, struct idpf_rx_queue *rxq)
-{
-       struct idpf_adapter *adapter = vport->adapter;
-       struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
-       struct virtchnl2_rxq_info *rxq_info;
-       struct idpf_cmd_info args;
-       uint16_t num_qs;
-       int size, err, i;
-
-       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
-               num_qs = IDPF_RXQ_PER_GRP;
-       else
-               num_qs = IDPF_RXQ_PER_GRP + IDPF_RX_BUFQ_PER_GRP;
-
-       size = sizeof(*vc_rxqs) + (num_qs - 1) *
-               sizeof(struct virtchnl2_rxq_info);
-       vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
-       if (vc_rxqs == NULL) {
-               PMD_DRV_LOG(ERR, "Failed to allocate 
virtchnl2_config_rx_queues");
-               err = -ENOMEM;
-               return err;
-       }
-       vc_rxqs->vport_id = vport->vport_id;
-       vc_rxqs->num_qinfo = num_qs;
-       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-               rxq_info = &vc_rxqs->qinfo[0];
-               rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
-               rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-               rxq_info->queue_id = rxq->queue_id;
-               rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-               rxq_info->data_buffer_size = rxq->rx_buf_len;
-               rxq_info->max_pkt_size = vport->max_pkt_len;
-
-               rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
-               rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
-
-               rxq_info->ring_len = rxq->nb_rx_desc;
-       }  else {
-               /* Rx queue */
-               rxq_info = &vc_rxqs->qinfo[0];
-               rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
-               rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
-               rxq_info->queue_id = rxq->queue_id;
-               rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-               rxq_info->data_buffer_size = rxq->rx_buf_len;
-               rxq_info->max_pkt_size = vport->max_pkt_len;
-
-               rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
-               rxq_info->qflags |= VIRTCHNL2_RX_DESC_SIZE_32BYTE;
-
-               rxq_info->ring_len = rxq->nb_rx_desc;
-               rxq_info->rx_bufq1_id = rxq->bufq1->queue_id;
-               rxq_info->rx_bufq2_id = rxq->bufq2->queue_id;
-               rxq_info->rx_buffer_low_watermark = 64;
-
-               /* Buffer queue */
-               for (i = 1; i <= IDPF_RX_BUFQ_PER_GRP; i++) {
-                       struct idpf_rx_queue *bufq = i == 1 ? rxq->bufq1 : 
rxq->bufq2;
-                       rxq_info = &vc_rxqs->qinfo[i];
-                       rxq_info->dma_ring_addr = bufq->rx_ring_phys_addr;
-                       rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
-                       rxq_info->queue_id = bufq->queue_id;
-                       rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-                       rxq_info->data_buffer_size = bufq->rx_buf_len;
-                       rxq_info->desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
-                       rxq_info->ring_len = bufq->nb_rx_desc;
-
-                       rxq_info->buffer_notif_stride = IDPF_RX_BUF_STRIDE;
-                       rxq_info->rx_buffer_low_watermark = 64;
-               }
-       }
-
-       memset(&args, 0, sizeof(args));
-       args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
-       args.in_args = (uint8_t *)vc_rxqs;
-       args.in_args_size = size;
-       args.out_buffer = adapter->mbx_resp;
-       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
-
-       err = idpf_execute_vc_cmd(adapter, &args);
-       rte_free(vc_rxqs);
-       if (err != 0)
-               PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_RX_QUEUES");
-
-       return err;
-}
-
-int
-idpf_vc_config_txq(struct idpf_vport *vport, struct idpf_tx_queue *txq)
-{
-       struct idpf_adapter *adapter = vport->adapter;
-       struct virtchnl2_config_tx_queues *vc_txqs = NULL;
-       struct virtchnl2_txq_info *txq_info;
-       struct idpf_cmd_info args;
-       uint16_t num_qs;
-       int size, err;
-
-       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
-               num_qs = IDPF_TXQ_PER_GRP;
-       else
-               num_qs = IDPF_TXQ_PER_GRP + IDPF_TX_COMPLQ_PER_GRP;
-
-       size = sizeof(*vc_txqs) + (num_qs - 1) *
-               sizeof(struct virtchnl2_txq_info);
-       vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
-       if (vc_txqs == NULL) {
-               PMD_DRV_LOG(ERR, "Failed to allocate 
virtchnl2_config_tx_queues");
-               err = -ENOMEM;
-               return err;
-       }
-       vc_txqs->vport_id = vport->vport_id;
-       vc_txqs->num_qinfo = num_qs;
-
-       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
-               txq_info = &vc_txqs->qinfo[0];
-               txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
-               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-               txq_info->queue_id = txq->queue_id;
-               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
-               txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
-               txq_info->ring_len = txq->nb_tx_desc;
-       } else {
-               /* txq info */
-               txq_info = &vc_txqs->qinfo[0];
-               txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
-               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX;
-               txq_info->queue_id = txq->queue_id;
-               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-               txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-               txq_info->ring_len = txq->nb_tx_desc;
-               txq_info->tx_compl_queue_id = txq->complq->queue_id;
-               txq_info->relative_queue_id = txq_info->queue_id;
-
-               /* tx completion queue info */
-               txq_info = &vc_txqs->qinfo[1];
-               txq_info->dma_ring_addr = txq->complq->tx_ring_phys_addr;
-               txq_info->type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
-               txq_info->queue_id = txq->complq->queue_id;
-               txq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
-               txq_info->sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
-               txq_info->ring_len = txq->complq->nb_tx_desc;
-       }
-
-       memset(&args, 0, sizeof(args));
-       args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
-       args.in_args = (uint8_t *)vc_txqs;
-       args.in_args_size = size;
-       args.out_buffer = adapter->mbx_resp;
-       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
-
-       err = idpf_execute_vc_cmd(adapter, &args);
-       rte_free(vc_txqs);
-       if (err != 0)
-               PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_TX_QUEUES");
-
-       return err;
-}
diff --git a/drivers/net/idpf/meson.build b/drivers/net/idpf/meson.build
index 998afd21fe..6d98b80ad3 100644
--- a/drivers/net/idpf/meson.build
+++ b/drivers/net/idpf/meson.build
@@ -12,7 +12,6 @@ deps += ['common_idpf']
 sources = files(
         'idpf_ethdev.c',
         'idpf_rxtx.c',
-        'idpf_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.26.2

Reply via email to