The queue structures of i40e and ice drivers are virtually identical, so
merge them into a common struct. This should allow easier function
merging in future using that common struct.

Signed-off-by: Bruce Richardson <bruce.richard...@intel.com>
---
 drivers/net/_common_intel/tx.h                | 55 +++++++++++++++++
 drivers/net/i40e/i40e_ethdev.c                |  4 +-
 drivers/net/i40e/i40e_ethdev.h                |  4 +-
 drivers/net/i40e/i40e_fdir.c                  |  4 +-
 .../net/i40e/i40e_recycle_mbufs_vec_common.c  |  2 +-
 drivers/net/i40e/i40e_rxtx.c                  | 58 +++++++++---------
 drivers/net/i40e/i40e_rxtx.h                  | 50 ++--------------
 drivers/net/i40e/i40e_rxtx_vec_altivec.c      |  4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx2.c         |  4 +-
 drivers/net/i40e/i40e_rxtx_vec_avx512.c       |  6 +-
 drivers/net/i40e/i40e_rxtx_vec_common.h       |  2 +-
 drivers/net/i40e/i40e_rxtx_vec_neon.c         |  4 +-
 drivers/net/i40e/i40e_rxtx_vec_sse.c          |  4 +-
 drivers/net/ice/ice_dcf.c                     |  4 +-
 drivers/net/ice/ice_dcf_ethdev.c              | 10 ++--
 drivers/net/ice/ice_diagnose.c                |  2 +-
 drivers/net/ice/ice_ethdev.c                  |  2 +-
 drivers/net/ice/ice_ethdev.h                  |  4 +-
 drivers/net/ice/ice_rxtx.c                    | 60 +++++++++----------
 drivers/net/ice/ice_rxtx.h                    | 41 +------------
 drivers/net/ice/ice_rxtx_vec_avx2.c           |  4 +-
 drivers/net/ice/ice_rxtx_vec_avx512.c         |  8 +--
 drivers/net/ice/ice_rxtx_vec_common.h         |  8 +--
 drivers/net/ice/ice_rxtx_vec_sse.c            |  6 +-
 24 files changed, 165 insertions(+), 185 deletions(-)

diff --git a/drivers/net/_common_intel/tx.h b/drivers/net/_common_intel/tx.h
index 5397007411..c965f5ee6c 100644
--- a/drivers/net/_common_intel/tx.h
+++ b/drivers/net/_common_intel/tx.h
@@ -8,6 +8,9 @@
 #include <stdint.h>
 #include <rte_mbuf.h>
 
+/* forward declaration of the common intel (ci) queue structure */
+struct ci_tx_queue;
+
 /**
  * Structure associated with each descriptor of the TX ring of a TX queue.
  */
@@ -24,6 +27,58 @@ struct ci_tx_entry_vec {
        struct rte_mbuf *mbuf; /* mbuf associated with TX desc, if any. */
 };
 
+typedef void (*ice_tx_release_mbufs_t)(struct ci_tx_queue *txq);
+
+struct ci_tx_queue {
+       union { /* TX ring virtual address */
+               volatile struct ice_tx_desc *ice_tx_ring;
+               volatile struct i40e_tx_desc *i40e_tx_ring;
+       };
+       volatile uint8_t *qtx_tail;               /* register address of tail */
+       struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
+       rte_iova_t tx_ring_dma;        /* TX ring DMA address */
+       uint16_t nb_tx_desc;           /* number of TX descriptors */
+       uint16_t tx_tail; /* current value of tail register */
+       uint16_t nb_tx_used; /* number of TX desc used since RS bit set */
+       /* index to last TX descriptor to have been cleaned */
+       uint16_t last_desc_cleaned;
+       /* Total number of TX descriptors ready to be allocated. */
+       uint16_t nb_tx_free;
+       /* Start freeing TX buffers if there are less free descriptors than
+        * this value.
+        */
+       uint16_t tx_free_thresh;
+       /* Number of TX descriptors to use before RS bit is set. */
+       uint16_t tx_rs_thresh;
+       uint8_t pthresh;   /**< Prefetch threshold register. */
+       uint8_t hthresh;   /**< Host threshold register. */
+       uint8_t wthresh;   /**< Write-back threshold reg. */
+       uint16_t port_id;  /* Device port identifier. */
+       uint16_t queue_id; /* TX queue index. */
+       uint16_t reg_idx;
+       uint64_t offloads;
+       uint16_t tx_next_dd;
+       uint16_t tx_next_rs;
+       uint64_t mbuf_errors;
+       bool tx_deferred_start; /* don't start this queue in dev start */
+       bool q_set;             /* indicate if tx queue has been configured */
+       union {                  /* the VSI this queue belongs to */
+               struct ice_vsi *ice_vsi;
+               struct i40e_vsi *i40e_vsi;
+       };
+       const struct rte_memzone *mz;
+
+       union {
+               struct { /* ICE driver specific values */
+                       ice_tx_release_mbufs_t tx_rel_mbufs;
+                       uint32_t q_teid; /* TX schedule node id. */
+               };
+               struct { /* I40E driver specific values */
+                       uint8_t dcb_tc;
+               };
+       };
+};
+
 static __rte_always_inline void
 ci_tx_backlog_entry(struct ci_tx_entry *txep, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
 {
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 30dcdc68a8..bf5560ccc8 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -3685,7 +3685,7 @@ i40e_dev_update_mbuf_stats(struct rte_eth_dev *ethdev,
                struct i40e_mbuf_stats *mbuf_stats)
 {
        uint16_t idx;
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
 
        for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
                txq = ethdev->data->tx_queues[idx];
@@ -6585,7 +6585,7 @@ i40e_dev_tx_init(struct i40e_pf *pf)
        struct rte_eth_dev_data *data = pf->dev_data;
        uint16_t i;
        uint32_t ret = I40E_SUCCESS;
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
 
        for (i = 0; i < data->nb_tx_queues; i++) {
                txq = data->tx_queues[i];
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 98213948b4..d351193ed9 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -334,7 +334,7 @@ struct i40e_vsi_list {
 };
 
 struct i40e_rx_queue;
-struct i40e_tx_queue;
+struct ci_tx_queue;
 
 /* Bandwidth limit information */
 struct i40e_bw_info {
@@ -738,7 +738,7 @@ TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
 struct i40e_fdir_info {
        struct i40e_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
        uint16_t match_counter_index;  /* Statistic counter index used for 
fdir*/
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
        struct i40e_rx_queue *rxq;
        void *prg_pkt[I40E_FDIR_PRG_PKT_CNT];     /* memory for fdir program 
packet */
        uint64_t dma_addr[I40E_FDIR_PRG_PKT_CNT]; /* physic address of packet 
memory*/
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index c600167634..349627a2ed 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -1372,7 +1372,7 @@ i40e_find_available_buffer(struct rte_eth_dev *dev)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_fdir_info *fdir_info = &pf->fdir;
-       struct i40e_tx_queue *txq = pf->fdir.txq;
+       struct ci_tx_queue *txq = pf->fdir.txq;
 
        /* no available buffer
         * search for more available buffers from the current
@@ -1628,7 +1628,7 @@ i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
                                  const struct i40e_fdir_filter_conf *filter,
                                  bool add, bool wait_status)
 {
-       struct i40e_tx_queue *txq = pf->fdir.txq;
+       struct ci_tx_queue *txq = pf->fdir.txq;
        struct i40e_rx_queue *rxq = pf->fdir.rxq;
        const struct i40e_fdir_action *fdir_action = &filter->action;
        volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c 
b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
index 8679e5c1fd..5a65c80d90 100644
--- a/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
+++ b/drivers/net/i40e/i40e_recycle_mbufs_vec_common.c
@@ -55,7 +55,7 @@ uint16_t
 i40e_recycle_tx_mbufs_reuse_vec(void *tx_queue,
        struct rte_eth_recycle_rxq_info *recycle_rxq_info)
 {
-       struct i40e_tx_queue *txq = tx_queue;
+       struct ci_tx_queue *txq = tx_queue;
        struct ci_tx_entry *txep;
        struct rte_mbuf **rxep;
        int i, n;
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 34ef931859..305bc53480 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -376,7 +376,7 @@ i40e_build_ctob(uint32_t td_cmd,
 }
 
 static inline int
-i40e_xmit_cleanup(struct i40e_tx_queue *txq)
+i40e_xmit_cleanup(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *sw_ring = txq->sw_ring;
        volatile struct i40e_tx_desc *txd = txq->i40e_tx_ring;
@@ -1080,7 +1080,7 @@ i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt)
 uint16_t
 i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
        struct ci_tx_entry *sw_ring;
        struct ci_tx_entry *txe, *txn;
        volatile struct i40e_tx_desc *txd;
@@ -1329,7 +1329,7 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
 }
 
 static __rte_always_inline int
-i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+i40e_tx_free_bufs(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txep;
        uint16_t tx_rs_thresh = txq->tx_rs_thresh;
@@ -1413,7 +1413,7 @@ tx1(volatile struct i40e_tx_desc *txdp, struct rte_mbuf 
**pkts)
 
 /* Fill hardware descriptor ring with mbuf data */
 static inline void
-i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
+i40e_tx_fill_hw_ring(struct ci_tx_queue *txq,
                     struct rte_mbuf **pkts,
                     uint16_t nb_pkts)
 {
@@ -1441,7 +1441,7 @@ i40e_tx_fill_hw_ring(struct i40e_tx_queue *txq,
 }
 
 static inline uint16_t
-tx_xmit_pkts(struct i40e_tx_queue *txq,
+tx_xmit_pkts(struct ci_tx_queue *txq,
             struct rte_mbuf **tx_pkts,
             uint16_t nb_pkts)
 {
@@ -1504,14 +1504,14 @@ i40e_xmit_pkts_simple(void *tx_queue,
        uint16_t nb_tx = 0;
 
        if (likely(nb_pkts <= I40E_TX_MAX_BURST))
-               return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+               return tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
                                                tx_pkts, nb_pkts);
 
        while (nb_pkts) {
                uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
                                                I40E_TX_MAX_BURST);
 
-               ret = tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
+               ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
                                                &tx_pkts[nb_tx], num);
                nb_tx = (uint16_t)(nb_tx + ret);
                nb_pkts = (uint16_t)(nb_pkts - ret);
@@ -1527,7 +1527,7 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                   uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
@@ -1549,7 +1549,7 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 static uint16_t
 i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
 {
-       struct i40e_tx_queue *txq = tx_queue;
+       struct ci_tx_queue *txq = tx_queue;
        uint16_t idx;
        uint64_t ol_flags;
        struct rte_mbuf *mb;
@@ -1611,7 +1611,7 @@ i40e_xmit_pkts_check(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts
                                        pkt_error = true;
                                        break;
                                }
-                               if (mb->nb_segs > ((struct i40e_tx_queue 
*)tx_queue)->nb_tx_desc) {
+                               if (mb->nb_segs > ((struct ci_tx_queue 
*)tx_queue)->nb_tx_desc) {
                                        PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs 
out of ring length");
                                        pkt_error = true;
                                        break;
@@ -1873,7 +1873,7 @@ int
 i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        int err;
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        PMD_INIT_FUNC_TRACE();
@@ -1907,7 +1907,7 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 int
 i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int err;
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -2311,7 +2311,7 @@ i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t 
offset)
 int
 i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-       struct i40e_tx_queue *txq = tx_queue;
+       struct ci_tx_queue *txq = tx_queue;
        volatile uint64_t *status;
        uint64_t mask, expect;
        uint32_t desc;
@@ -2341,7 +2341,7 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t 
offset)
 
 static int
 i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
-                               struct i40e_tx_queue *txq)
+                               struct ci_tx_queue *txq)
 {
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -2394,7 +2394,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 {
        struct i40e_vsi *vsi;
        struct i40e_pf *pf = NULL;
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
        const struct rte_memzone *tz;
        uint32_t ring_size;
        uint16_t tx_rs_thresh, tx_free_thresh;
@@ -2515,7 +2515,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("i40e tx queue",
-                                 sizeof(struct i40e_tx_queue),
+                                 sizeof(struct ci_tx_queue),
                                  RTE_CACHE_LINE_SIZE,
                                  socket_id);
        if (!txq) {
@@ -2600,7 +2600,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
 void
 i40e_tx_queue_release(void *txq)
 {
-       struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+       struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
 
        if (!q) {
                PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
@@ -2705,7 +2705,7 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
 }
 
 void
-i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
+i40e_tx_queue_release_mbufs(struct ci_tx_queue *txq)
 {
        struct rte_eth_dev *dev;
        uint16_t i;
@@ -2765,7 +2765,7 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
 }
 
 static int
-i40e_tx_done_cleanup_full(struct i40e_tx_queue *txq,
+i40e_tx_done_cleanup_full(struct ci_tx_queue *txq,
                        uint32_t free_cnt)
 {
        struct ci_tx_entry *swr_ring = txq->sw_ring;
@@ -2824,7 +2824,7 @@ i40e_tx_done_cleanup_full(struct i40e_tx_queue *txq,
 }
 
 static int
-i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq,
+i40e_tx_done_cleanup_simple(struct ci_tx_queue *txq,
                        uint32_t free_cnt)
 {
        int i, n, cnt;
@@ -2848,7 +2848,7 @@ i40e_tx_done_cleanup_simple(struct i40e_tx_queue *txq,
 }
 
 static int
-i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq __rte_unused,
+i40e_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused,
                        uint32_t free_cnt __rte_unused)
 {
        return -ENOTSUP;
@@ -2856,7 +2856,7 @@ i40e_tx_done_cleanup_vec(struct i40e_tx_queue *txq 
__rte_unused,
 int
 i40e_tx_done_cleanup(void *txq, uint32_t free_cnt)
 {
-       struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
+       struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
        struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -2872,7 +2872,7 @@ i40e_tx_done_cleanup(void *txq, uint32_t free_cnt)
 }
 
 void
-i40e_reset_tx_queue(struct i40e_tx_queue *txq)
+i40e_reset_tx_queue(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txe;
        uint16_t i, prev, size;
@@ -2911,7 +2911,7 @@ i40e_reset_tx_queue(struct i40e_tx_queue *txq)
 
 /* Init the TX queue in hardware */
 int
-i40e_tx_queue_init(struct i40e_tx_queue *txq)
+i40e_tx_queue_init(struct ci_tx_queue *txq)
 {
        enum i40e_status_code err = I40E_SUCCESS;
        struct i40e_vsi *vsi = txq->i40e_vsi;
@@ -3167,7 +3167,7 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
 enum i40e_status_code
 i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
 {
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
        const struct rte_memzone *tz = NULL;
        struct rte_eth_dev *dev;
        uint32_t ring_size;
@@ -3181,7 +3181,7 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
 
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("i40e fdir tx queue",
-                                 sizeof(struct i40e_tx_queue),
+                                 sizeof(struct ci_tx_queue),
                                  RTE_CACHE_LINE_SIZE,
                                  SOCKET_ID_ANY);
        if (!txq) {
@@ -3304,7 +3304,7 @@ void
 i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        struct rte_eth_txq_info *qinfo)
 {
-       struct i40e_tx_queue *txq;
+       struct ci_tx_queue *txq;
 
        txq = dev->data->tx_queues[queue_id];
 
@@ -3552,7 +3552,7 @@ i40e_rx_burst_mode_get(struct rte_eth_dev *dev, 
__rte_unused uint16_t queue_id,
 }
 
 void __rte_cold
-i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq)
 {
        struct i40e_adapter *ad =
                I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -3592,7 +3592,7 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
 #endif
                if (ad->tx_vec_allowed) {
                        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-                               struct i40e_tx_queue *txq =
+                               struct ci_tx_queue *txq =
                                        dev->data->tx_queues[i];
 
                                if (txq && i40e_txq_vec_setup(txq)) {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 8315ee2f59..043d1df912 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -124,44 +124,6 @@ struct i40e_rx_queue {
        const struct rte_memzone *mz;
 };
 
-/*
- * Structure associated with each TX queue.
- */
-struct i40e_tx_queue {
-       uint16_t nb_tx_desc; /**< number of TX descriptors */
-       rte_iova_t tx_ring_dma; /**< TX ring DMA address */
-       volatile struct i40e_tx_desc *i40e_tx_ring; /**< TX ring virtual 
address */
-       struct ci_tx_entry *sw_ring; /**< virtual address of SW ring */
-       uint16_t tx_tail; /**< current value of tail register */
-       volatile uint8_t *qtx_tail; /**< register address of tail */
-       uint16_t nb_tx_used; /**< number of TX desc used since RS bit set */
-       /**< index to last TX descriptor to have been cleaned */
-       uint16_t last_desc_cleaned;
-       /**< Total number of TX descriptors ready to be allocated. */
-       uint16_t nb_tx_free;
-       /**< Start freeing TX buffers if there are less free descriptors than
-            this value. */
-       uint16_t tx_free_thresh;
-       /** Number of TX descriptors to use before RS bit is set. */
-       uint16_t tx_rs_thresh;
-       uint8_t pthresh; /**< Prefetch threshold register. */
-       uint8_t hthresh; /**< Host threshold register. */
-       uint8_t wthresh; /**< Write-back threshold reg. */
-       uint16_t port_id; /**< Device port identifier. */
-       uint16_t queue_id; /**< TX queue index. */
-       uint16_t reg_idx;
-       struct i40e_vsi *i40e_vsi; /**< the VSI this queue belongs to */
-       uint16_t tx_next_dd;
-       uint16_t tx_next_rs;
-       bool q_set; /**< indicate if tx queue has been configured */
-       uint64_t mbuf_errors;
-
-       bool tx_deferred_start; /**< don't start this queue in dev start */
-       uint8_t dcb_tc;         /**< Traffic class of tx queue */
-       uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
-       const struct rte_memzone *mz;
-};
-
 /** Offload features */
 union i40e_tx_offload {
        uint64_t data;
@@ -209,15 +171,15 @@ uint16_t i40e_simple_prep_pkts(void *tx_queue, struct 
rte_mbuf **tx_pkts,
                               uint16_t nb_pkts);
 uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);
-int i40e_tx_queue_init(struct i40e_tx_queue *txq);
+int i40e_tx_queue_init(struct ci_tx_queue *txq);
 int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
-void i40e_free_tx_resources(struct i40e_tx_queue *txq);
+void i40e_free_tx_resources(struct ci_tx_queue *txq);
 void i40e_free_rx_resources(struct i40e_rx_queue *rxq);
 void i40e_dev_clear_queues(struct rte_eth_dev *dev);
 void i40e_dev_free_queues(struct rte_eth_dev *dev);
 void i40e_reset_rx_queue(struct i40e_rx_queue *rxq);
-void i40e_reset_tx_queue(struct i40e_tx_queue *txq);
-void i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq);
+void i40e_reset_tx_queue(struct ci_tx_queue *txq);
+void i40e_tx_queue_release_mbufs(struct ci_tx_queue *txq);
 int i40e_tx_done_cleanup(void *txq, uint32_t free_cnt);
 int i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq);
 void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
@@ -237,13 +199,13 @@ uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
                                      uint16_t nb_pkts);
 int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
-int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
+int i40e_txq_vec_setup(struct ci_tx_queue *txq);
 void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
 uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                                   uint16_t nb_pkts);
 void i40e_set_rx_function(struct rte_eth_dev *dev);
 void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
-                              struct i40e_tx_queue *txq);
+                              struct ci_tx_queue *txq);
 void i40e_set_tx_function(struct rte_eth_dev *dev);
 void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
 void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c 
b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index bf0e9ebd71..500bba2cef 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -551,7 +551,7 @@ uint16_t
 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                          uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct i40e_tx_desc *txdp;
        struct ci_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -625,7 +625,7 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
 }
 
 int __rte_cold
-i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+i40e_txq_vec_setup(struct ci_tx_queue __rte_unused * txq)
 {
        return 0;
 }
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c 
b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index 5042e348db..29bef64287 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -743,7 +743,7 @@ static inline uint16_t
 i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                          uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct i40e_tx_desc *txdp;
        struct ci_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -808,7 +808,7 @@ i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                   uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx512.c 
b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
index 04fbe3b2e3..a3f6d1667f 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx512.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx512.c
@@ -755,7 +755,7 @@ i40e_recv_scattered_pkts_vec_avx512(void *rx_queue,
 }
 
 static __rte_always_inline int
-i40e_tx_free_bufs_avx512(struct i40e_tx_queue *txq)
+i40e_tx_free_bufs_avx512(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry_vec *txep;
        uint32_t n;
@@ -933,7 +933,7 @@ static inline uint16_t
 i40e_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
                                 uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct i40e_tx_desc *txdp;
        struct ci_tx_entry_vec *txep;
        uint16_t n, nb_commit, tx_id;
@@ -999,7 +999,7 @@ i40e_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                          uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h 
b/drivers/net/i40e/i40e_rxtx_vec_common.h
index e81f958361..57d6263ccf 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -17,7 +17,7 @@
 #endif
 
 static __rte_always_inline int
-i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+i40e_tx_free_bufs(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txep;
        uint32_t n;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c 
b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 05191e4884..4006538ba5 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -679,7 +679,7 @@ uint16_t
 i40e_xmit_fixed_burst_vec(void *__rte_restrict tx_queue,
        struct rte_mbuf **__rte_restrict tx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct i40e_tx_desc *txdp;
        struct ci_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -753,7 +753,7 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
 }
 
 int __rte_cold
-i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+i40e_txq_vec_setup(struct ci_tx_queue __rte_unused *txq)
 {
        return 0;
 }
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c 
b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index d81b553842..e9a5715515 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -698,7 +698,7 @@ uint16_t
 i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                          uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct i40e_tx_desc *txdp;
        struct ci_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -771,7 +771,7 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
 }
 
 int __rte_cold
-i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+i40e_txq_vec_setup(struct ci_tx_queue __rte_unused *txq)
 {
        return 0;
 }
diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c
index 204d4eadbb..65c18921f4 100644
--- a/drivers/net/ice/ice_dcf.c
+++ b/drivers/net/ice/ice_dcf.c
@@ -1177,8 +1177,8 @@ ice_dcf_configure_queues(struct ice_dcf_hw *hw)
 {
        struct ice_rx_queue **rxq =
                (struct ice_rx_queue **)hw->eth_dev->data->rx_queues;
-       struct ice_tx_queue **txq =
-               (struct ice_tx_queue **)hw->eth_dev->data->tx_queues;
+       struct ci_tx_queue **txq =
+               (struct ci_tx_queue **)hw->eth_dev->data->tx_queues;
        struct virtchnl_vsi_queue_config_info *vc_config;
        struct virtchnl_queue_pair_info *vc_qp;
        struct dcf_virtchnl_cmd args;
diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c
index 4ffd1f5567..a0c065d78c 100644
--- a/drivers/net/ice/ice_dcf_ethdev.c
+++ b/drivers/net/ice/ice_dcf_ethdev.c
@@ -387,7 +387,7 @@ reset_rx_queue(struct ice_rx_queue *rxq)
 }
 
 static inline void
-reset_tx_queue(struct ice_tx_queue *txq)
+reset_tx_queue(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txe;
        uint32_t i, size;
@@ -454,7 +454,7 @@ ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 {
        struct ice_dcf_adapter *ad = dev->data->dev_private;
        struct iavf_hw *hw = &ad->real_hw.avf;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int err = 0;
 
        if (tx_queue_id >= dev->data->nb_tx_queues)
@@ -486,7 +486,7 @@ ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 {
        struct ice_dcf_adapter *ad = dev->data->dev_private;
        struct ice_dcf_hw *hw = &ad->real_hw;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int err;
 
        if (tx_queue_id >= dev->data->nb_tx_queues)
@@ -511,7 +511,7 @@ static int
 ice_dcf_start_queues(struct rte_eth_dev *dev)
 {
        struct ice_rx_queue *rxq;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int nb_rxq = 0;
        int nb_txq, i;
 
@@ -638,7 +638,7 @@ ice_dcf_stop_queues(struct rte_eth_dev *dev)
        struct ice_dcf_adapter *ad = dev->data->dev_private;
        struct ice_dcf_hw *hw = &ad->real_hw;
        struct ice_rx_queue *rxq;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int ret, i;
 
        /* Stop All queues */
diff --git a/drivers/net/ice/ice_diagnose.c b/drivers/net/ice/ice_diagnose.c
index 5bec9d00ad..a50068441a 100644
--- a/drivers/net/ice/ice_diagnose.c
+++ b/drivers/net/ice/ice_diagnose.c
@@ -605,7 +605,7 @@ void print_node(const struct rte_eth_dev_data *ethdata,
                        get_elem_type(data->data.elem_type));
        if (data->data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
                for (uint16_t i = 0; i < ethdata->nb_tx_queues; i++) {
-                       struct ice_tx_queue *q = ethdata->tx_queues[i];
+                       struct ci_tx_queue *q = ethdata->tx_queues[i];
                        if (q->q_teid == data->node_teid) {
                                fprintf(stream, 
"\t\t\t\t<tr><td>TXQ</td><td>%u</td></tr>\n", i);
                                break;
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 93a6308a86..80eee03204 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -6448,7 +6448,7 @@ ice_update_mbuf_stats(struct rte_eth_dev *ethdev,
                struct ice_mbuf_stats *mbuf_stats)
 {
        uint16_t idx;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
 
        for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
                txq = ethdev->data->tx_queues[idx];
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index a5b27fabd2..ba54655499 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -258,7 +258,7 @@ struct ice_vsi_list {
 };
 
 struct ice_rx_queue;
-struct ice_tx_queue;
+struct ci_tx_queue;
 
 /**
  * Structure that defines a VSI, associated with a adapter.
@@ -408,7 +408,7 @@ struct ice_fdir_counter_pool_container {
  */
 struct ice_fdir_info {
        struct ice_vsi *fdir_vsi;     /* pointer to fdir VSI structure */
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        struct ice_rx_queue *rxq;
        void *prg_pkt;                 /* memory for fdir program packet */
        uint64_t dma_addr;             /* physic address of packet memory*/
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 5ec92f6d0c..bcc7c7a016 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -743,7 +743,7 @@ ice_rx_queue_stop(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
 int
 ice_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int err;
        struct ice_vsi *vsi;
        struct ice_hw *hw;
@@ -944,7 +944,7 @@ int
 ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int err;
        struct ice_vsi *vsi;
        struct ice_hw *hw;
@@ -1008,7 +1008,7 @@ ice_fdir_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 
 /* Free all mbufs for descriptors in tx queue */
 static void
-_ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
+_ice_tx_queue_release_mbufs(struct ci_tx_queue *txq)
 {
        uint16_t i;
 
@@ -1026,7 +1026,7 @@ _ice_tx_queue_release_mbufs(struct ice_tx_queue *txq)
 }
 
 static void
-ice_reset_tx_queue(struct ice_tx_queue *txq)
+ice_reset_tx_queue(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txe;
        uint16_t i, prev, size;
@@ -1066,7 +1066,7 @@ ice_reset_tx_queue(struct ice_tx_queue *txq)
 int
 ice_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_vsi *vsi = pf->main_vsi;
@@ -1134,7 +1134,7 @@ ice_fdir_rx_queue_stop(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
 int
 ice_fdir_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_vsi *vsi = pf->main_vsi;
@@ -1354,7 +1354,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 {
        struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct ice_vsi *vsi = pf->main_vsi;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        const struct rte_memzone *tz;
        uint32_t ring_size;
        uint16_t tx_rs_thresh, tx_free_thresh;
@@ -1467,7 +1467,7 @@ ice_tx_queue_setup(struct rte_eth_dev *dev,
 
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket(NULL,
-                                sizeof(struct ice_tx_queue),
+                                sizeof(struct ci_tx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 socket_id);
        if (!txq) {
@@ -1542,7 +1542,7 @@ ice_dev_tx_queue_release(struct rte_eth_dev *dev, 
uint16_t qid)
 void
 ice_tx_queue_release(void *txq)
 {
-       struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+       struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
 
        if (!q) {
                PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
@@ -1577,7 +1577,7 @@ void
 ice_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                 struct rte_eth_txq_info *qinfo)
 {
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
 
        txq = dev->data->tx_queues[queue_id];
 
@@ -2354,7 +2354,7 @@ ice_rx_descriptor_status(void *rx_queue, uint16_t offset)
 int
 ice_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-       struct ice_tx_queue *txq = tx_queue;
+       struct ci_tx_queue *txq = tx_queue;
        volatile uint64_t *status;
        uint64_t mask, expect;
        uint32_t desc;
@@ -2412,7 +2412,7 @@ ice_free_queues(struct rte_eth_dev *dev)
 int
 ice_fdir_setup_tx_resources(struct ice_pf *pf)
 {
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        const struct rte_memzone *tz = NULL;
        uint32_t ring_size;
        struct rte_eth_dev *dev;
@@ -2426,7 +2426,7 @@ ice_fdir_setup_tx_resources(struct ice_pf *pf)
 
        /* Allocate the TX queue data structure. */
        txq = rte_zmalloc_socket("ice fdir tx queue",
-                                sizeof(struct ice_tx_queue),
+                                sizeof(struct ci_tx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 SOCKET_ID_ANY);
        if (!txq) {
@@ -2835,7 +2835,7 @@ ice_txd_enable_checksum(uint64_t ol_flags,
 }
 
 static inline int
-ice_xmit_cleanup(struct ice_tx_queue *txq)
+ice_xmit_cleanup(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *sw_ring = txq->sw_ring;
        volatile struct ice_tx_desc *txd = txq->ice_tx_ring;
@@ -2958,7 +2958,7 @@ ice_calc_pkt_desc(struct rte_mbuf *tx_pkt)
 uint16_t
 ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        volatile struct ice_tx_desc *ice_tx_ring;
        volatile struct ice_tx_desc *txd;
        struct ci_tx_entry *sw_ring;
@@ -3182,7 +3182,7 @@ ice_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
 }
 
 static __rte_always_inline int
-ice_tx_free_bufs(struct ice_tx_queue *txq)
+ice_tx_free_bufs(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txep;
        uint16_t i;
@@ -3218,7 +3218,7 @@ ice_tx_free_bufs(struct ice_tx_queue *txq)
 }
 
 static int
-ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
+ice_tx_done_cleanup_full(struct ci_tx_queue *txq,
                        uint32_t free_cnt)
 {
        struct ci_tx_entry *swr_ring = txq->sw_ring;
@@ -3278,7 +3278,7 @@ ice_tx_done_cleanup_full(struct ice_tx_queue *txq,
 
 #ifdef RTE_ARCH_X86
 static int
-ice_tx_done_cleanup_vec(struct ice_tx_queue *txq __rte_unused,
+ice_tx_done_cleanup_vec(struct ci_tx_queue *txq __rte_unused,
                        uint32_t free_cnt __rte_unused)
 {
        return -ENOTSUP;
@@ -3286,7 +3286,7 @@ ice_tx_done_cleanup_vec(struct ice_tx_queue *txq 
__rte_unused,
 #endif
 
 static int
-ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
+ice_tx_done_cleanup_simple(struct ci_tx_queue *txq,
                        uint32_t free_cnt)
 {
        int i, n, cnt;
@@ -3312,7 +3312,7 @@ ice_tx_done_cleanup_simple(struct ice_tx_queue *txq,
 int
 ice_tx_done_cleanup(void *txq, uint32_t free_cnt)
 {
-       struct ice_tx_queue *q = (struct ice_tx_queue *)txq;
+       struct ci_tx_queue *q = (struct ci_tx_queue *)txq;
        struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
        struct ice_adapter *ad =
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -3357,7 +3357,7 @@ tx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf 
**pkts)
 }
 
 static inline void
-ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct rte_mbuf **pkts,
+ice_tx_fill_hw_ring(struct ci_tx_queue *txq, struct rte_mbuf **pkts,
                    uint16_t nb_pkts)
 {
        volatile struct ice_tx_desc *txdp = &txq->ice_tx_ring[txq->tx_tail];
@@ -3389,7 +3389,7 @@ ice_tx_fill_hw_ring(struct ice_tx_queue *txq, struct 
rte_mbuf **pkts,
 }
 
 static inline uint16_t
-tx_xmit_pkts(struct ice_tx_queue *txq,
+tx_xmit_pkts(struct ci_tx_queue *txq,
             struct rte_mbuf **tx_pkts,
             uint16_t nb_pkts)
 {
@@ -3452,14 +3452,14 @@ ice_xmit_pkts_simple(void *tx_queue,
        uint16_t nb_tx = 0;
 
        if (likely(nb_pkts <= ICE_TX_MAX_BURST))
-               return tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
+               return tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
                                    tx_pkts, nb_pkts);
 
        while (nb_pkts) {
                uint16_t ret, num = (uint16_t)RTE_MIN(nb_pkts,
                                                      ICE_TX_MAX_BURST);
 
-               ret = tx_xmit_pkts((struct ice_tx_queue *)tx_queue,
+               ret = tx_xmit_pkts((struct ci_tx_queue *)tx_queue,
                                   &tx_pkts[nb_tx], num);
                nb_tx = (uint16_t)(nb_tx + ret);
                nb_pkts = (uint16_t)(nb_pkts - ret);
@@ -3667,7 +3667,7 @@ ice_rx_burst_mode_get(struct rte_eth_dev *dev, 
__rte_unused uint16_t queue_id,
 }
 
 void __rte_cold
-ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
+ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ci_tx_queue *txq)
 {
        struct ice_adapter *ad =
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
@@ -3716,7 +3716,7 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
 static uint16_t
 ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
 {
-       struct ice_tx_queue *txq = tx_queue;
+       struct ci_tx_queue *txq = tx_queue;
        uint16_t idx;
        struct rte_mbuf *mb;
        bool pkt_error = false;
@@ -3778,7 +3778,7 @@ ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts)
                                        pkt_error = true;
                                        break;
                                }
-                               if (mb->nb_segs > ((struct ice_tx_queue 
*)tx_queue)->nb_tx_desc) {
+                               if (mb->nb_segs > ((struct ci_tx_queue 
*)tx_queue)->nb_tx_desc) {
                                        PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs 
out of ring length");
                                        pkt_error = true;
                                        break;
@@ -3839,7 +3839,7 @@ ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                    (m->tso_segsz < ICE_MIN_TSO_MSS ||
                     m->tso_segsz > ICE_MAX_TSO_MSS ||
                     m->nb_segs >
-                       ((struct ice_tx_queue *)tx_queue)->nb_tx_desc ||
+                       ((struct ci_tx_queue *)tx_queue)->nb_tx_desc ||
                     m->pkt_len > ICE_MAX_TSO_FRAME_SIZE)) {
                        /**
                         * MSS outside the range are considered malicious
@@ -3881,7 +3881,7 @@ ice_set_tx_function(struct rte_eth_dev *dev)
                ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
        int mbuf_check = ad->devargs.mbuf_check;
 #ifdef RTE_ARCH_X86
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int i;
        int tx_check_ret = -1;
 
@@ -4693,7 +4693,7 @@ ice_check_fdir_programming_status(struct ice_rx_queue 
*rxq)
 int
 ice_fdir_programming(struct ice_pf *pf, struct ice_fltr_desc *fdir_desc)
 {
-       struct ice_tx_queue *txq = pf->fdir.txq;
+       struct ci_tx_queue *txq = pf->fdir.txq;
        struct ice_rx_queue *rxq = pf->fdir.rxq;
        volatile struct ice_fltr_desc *fdirdp;
        volatile struct ice_tx_desc *txdp;
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index 3257f449f5..1cae8a9b50 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -79,7 +79,6 @@ extern int ice_timestamp_dynfield_offset;
 #define ICE_TX_MTU_SEG_MAX     8
 
 typedef void (*ice_rx_release_mbufs_t)(struct ice_rx_queue *rxq);
-typedef void (*ice_tx_release_mbufs_t)(struct ice_tx_queue *txq);
 typedef void (*ice_rxd_to_pkt_fields_t)(struct ice_rx_queue *rxq,
                                        struct rte_mbuf *mb,
                                        volatile union ice_rx_flex_desc *rxdp);
@@ -145,42 +144,6 @@ struct ice_rx_queue {
        bool ts_enable; /* if rxq timestamp is enabled */
 };
 
-struct ice_tx_queue {
-       uint16_t nb_tx_desc; /* number of TX descriptors */
-       rte_iova_t tx_ring_dma; /* TX ring DMA address */
-       volatile struct ice_tx_desc *ice_tx_ring; /* TX ring virtual address */
-       struct ci_tx_entry *sw_ring; /* virtual address of SW ring */
-       uint16_t tx_tail; /* current value of tail register */
-       volatile uint8_t *qtx_tail; /* register address of tail */
-       uint16_t nb_tx_used; /* number of TX desc used since RS bit set */
-       /* index to last TX descriptor to have been cleaned */
-       uint16_t last_desc_cleaned;
-       /* Total number of TX descriptors ready to be allocated. */
-       uint16_t nb_tx_free;
-       /* Start freeing TX buffers if there are less free descriptors than
-        * this value.
-        */
-       uint16_t tx_free_thresh;
-       /* Number of TX descriptors to use before RS bit is set. */
-       uint16_t tx_rs_thresh;
-       uint8_t pthresh; /**< Prefetch threshold register. */
-       uint8_t hthresh; /**< Host threshold register. */
-       uint8_t wthresh; /**< Write-back threshold reg. */
-       uint16_t port_id; /* Device port identifier. */
-       uint16_t queue_id; /* TX queue index. */
-       uint32_t q_teid; /* TX schedule node id. */
-       uint16_t reg_idx;
-       uint64_t offloads;
-       struct ice_vsi *ice_vsi; /* the VSI this queue belongs to */
-       uint16_t tx_next_dd;
-       uint16_t tx_next_rs;
-       uint64_t mbuf_errors;
-       bool tx_deferred_start; /* don't start this queue in dev start */
-       bool q_set; /* indicate if tx queue has been configured */
-       ice_tx_release_mbufs_t tx_rel_mbufs;
-       const struct rte_memzone *mz;
-};
-
 /* Offload features */
 union ice_tx_offload {
        uint64_t data;
@@ -268,7 +231,7 @@ void ice_set_rx_function(struct rte_eth_dev *dev);
 uint16_t ice_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                       uint16_t nb_pkts);
 void ice_set_tx_function_flag(struct rte_eth_dev *dev,
-                             struct ice_tx_queue *txq);
+                             struct ci_tx_queue *txq);
 void ice_set_tx_function(struct rte_eth_dev *dev);
 uint32_t ice_rx_queue_count(void *rx_queue);
 void ice_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
@@ -290,7 +253,7 @@ void ice_select_rxd_to_pkt_fields_handler(struct 
ice_rx_queue *rxq,
 int ice_rx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_tx_vec_dev_check(struct rte_eth_dev *dev);
 int ice_rxq_vec_setup(struct ice_rx_queue *rxq);
-int ice_txq_vec_setup(struct ice_tx_queue *txq);
+int ice_txq_vec_setup(struct ci_tx_queue *txq);
 uint16_t ice_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                           uint16_t nb_pkts);
 uint16_t ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c 
b/drivers/net/ice/ice_rxtx_vec_avx2.c
index dde07ac99e..12ffa0fa9a 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx2.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx2.c
@@ -856,7 +856,7 @@ static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                              uint16_t nb_pkts, bool offload)
 {
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct ice_tx_desc *txdp;
        struct ci_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -924,7 +924,7 @@ ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct 
rte_mbuf **tx_pkts,
                              uint16_t nb_pkts, bool offload)
 {
        uint16_t nb_tx = 0;
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c 
b/drivers/net/ice/ice_rxtx_vec_avx512.c
index e4d0270176..eabd8b04a0 100644
--- a/drivers/net/ice/ice_rxtx_vec_avx512.c
+++ b/drivers/net/ice/ice_rxtx_vec_avx512.c
@@ -860,7 +860,7 @@ ice_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
 }
 
 static __rte_always_inline int
-ice_tx_free_bufs_avx512(struct ice_tx_queue *txq)
+ice_tx_free_bufs_avx512(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry_vec *txep;
        uint32_t n;
@@ -1053,7 +1053,7 @@ static __rte_always_inline uint16_t
 ice_xmit_fixed_burst_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
                                uint16_t nb_pkts, bool do_offload)
 {
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct ice_tx_desc *txdp;
        struct ci_tx_entry_vec *txep;
        uint16_t n, nb_commit, tx_id;
@@ -1122,7 +1122,7 @@ ice_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                         uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
@@ -1144,7 +1144,7 @@ ice_xmit_pkts_vec_avx512_offload(void *tx_queue, struct 
rte_mbuf **tx_pkts,
                                 uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
diff --git a/drivers/net/ice/ice_rxtx_vec_common.h 
b/drivers/net/ice/ice_rxtx_vec_common.h
index 7b865b53ad..b39289ceb5 100644
--- a/drivers/net/ice/ice_rxtx_vec_common.h
+++ b/drivers/net/ice/ice_rxtx_vec_common.h
@@ -13,7 +13,7 @@
 #endif
 
 static __rte_always_inline int
-ice_tx_free_bufs_vec(struct ice_tx_queue *txq)
+ice_tx_free_bufs_vec(struct ci_tx_queue *txq)
 {
        struct ci_tx_entry *txep;
        uint32_t n;
@@ -105,7 +105,7 @@ _ice_rx_queue_release_mbufs_vec(struct ice_rx_queue *rxq)
 }
 
 static inline void
-_ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq)
+_ice_tx_queue_release_mbufs_vec(struct ci_tx_queue *txq)
 {
        uint16_t i;
 
@@ -231,7 +231,7 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq)
 }
 
 static inline int
-ice_tx_vec_queue_default(struct ice_tx_queue *txq)
+ice_tx_vec_queue_default(struct ci_tx_queue *txq)
 {
        if (!txq)
                return -1;
@@ -273,7 +273,7 @@ static inline int
 ice_tx_vec_dev_check_default(struct rte_eth_dev *dev)
 {
        int i;
-       struct ice_tx_queue *txq;
+       struct ci_tx_queue *txq;
        int ret = 0;
        int result = 0;
 
diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c 
b/drivers/net/ice/ice_rxtx_vec_sse.c
index 364207e8a8..a62a32a552 100644
--- a/drivers/net/ice/ice_rxtx_vec_sse.c
+++ b/drivers/net/ice/ice_rxtx_vec_sse.c
@@ -697,7 +697,7 @@ static uint16_t
 ice_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                         uint16_t nb_pkts)
 {
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
        volatile struct ice_tx_desc *txdp;
        struct ci_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -766,7 +766,7 @@ ice_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                  uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue;
+       struct ci_tx_queue *txq = (struct ci_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
@@ -793,7 +793,7 @@ ice_rxq_vec_setup(struct ice_rx_queue *rxq)
 }
 
 int __rte_cold
-ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
+ice_txq_vec_setup(struct ci_tx_queue __rte_unused *txq)
 {
        if (!txq)
                return -1;
-- 
2.43.0


Reply via email to