Merge in additional fields used by the ixgbe driver and then convert it
over to using the common Tx queue structure.

Signed-off-by: Bruce Richardson <bruce.richard...@intel.com>
---
 drivers/common/intel_eth/ieth_rxtx.h          | 14 +++-
 drivers/net/ixgbe/ixgbe_ethdev.c              |  4 +-
 .../ixgbe/ixgbe_recycle_mbufs_vec_common.c    |  2 +-
 drivers/net/ixgbe/ixgbe_rxtx.c                | 64 +++++++++----------
 drivers/net/ixgbe/ixgbe_rxtx.h                | 56 ++--------------
 drivers/net/ixgbe/ixgbe_rxtx_vec_common.h     | 10 +--
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c       | 10 +--
 drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c        | 10 +--
 8 files changed, 68 insertions(+), 102 deletions(-)

diff --git a/drivers/common/intel_eth/ieth_rxtx.h 
b/drivers/common/intel_eth/ieth_rxtx.h
index 986e0a6d42..9f8a1d7141 100644
--- a/drivers/common/intel_eth/ieth_rxtx.h
+++ b/drivers/common/intel_eth/ieth_rxtx.h
@@ -35,9 +35,13 @@ struct ieth_tx_queue {
                volatile struct i40e_tx_desc *i40e_tx_ring;
                volatile struct iavf_tx_desc *iavf_tx_ring;
                volatile struct ice_tx_desc *ice_tx_ring;
+               volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
        };
        volatile uint8_t *qtx_tail;               /* register address of tail */
-       struct ieth_tx_entry *sw_ring; /* virtual address of SW ring */
+       union {
+               struct ieth_tx_entry *sw_ring; /* virtual address of SW ring */
+               struct ieth_vec_tx_entry *sw_ring_v;
+       };
        rte_iova_t tx_ring_dma;        /* TX ring DMA address */
        uint16_t nb_tx_desc;           /* number of TX descriptors */
        uint16_t tx_tail; /* current value of tail register */
@@ -89,6 +93,14 @@ struct ieth_tx_queue {
                        uint8_t use_ctx : 1; /* if use the ctx desc, a packet 
needs
                                          two descriptors */
                };
+               struct { /* ixgbe specific values */
+                       const struct ixgbe_txq_ops *ops;
+                       struct ixgbe_advctx_info *ctx_cache;
+                       uint32_t ctx_curr;
+#ifdef RTE_LIB_SECURITY
+                       uint8_t using_ipsec;  /**< indicates that IPsec TX 
feature is in use */
+#endif
+               };
        };
 };
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index eb431889c3..e774c51f67 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1116,7 +1116,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
         * RX and TX function.
         */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
-               struct ixgbe_tx_queue *txq;
+               struct ieth_tx_queue *txq;
                /* TX queue function in primary, set by last queue initialized
                 * Tx queue may not initialized by primary process
                 */
@@ -1621,7 +1621,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
         * RX function
         */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
-               struct ixgbe_tx_queue *txq;
+               struct ieth_tx_queue *txq;
                /* TX queue function in primary, set by last queue initialized
                 * Tx queue may not initialized by primary process
                 */
diff --git a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c 
b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
index 546825f334..d6edc9d0aa 100644
--- a/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
+++ b/drivers/net/ixgbe/ixgbe_recycle_mbufs_vec_common.c
@@ -51,7 +51,7 @@ uint16_t
 ixgbe_recycle_tx_mbufs_reuse_vec(void *tx_queue,
                struct rte_eth_recycle_rxq_info *recycle_rxq_info)
 {
-       struct ixgbe_tx_queue *txq = tx_queue;
+       struct ieth_tx_queue *txq = tx_queue;
        struct ieth_tx_entry *txep;
        struct rte_mbuf **rxep;
        int i, n;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 96eafd52a0..e80bd6fccc 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -98,7 +98,7 @@
  * Return the total number of buffers freed.
  */
 static __rte_always_inline int
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ieth_tx_queue *txq)
 {
        struct ieth_tx_entry *txep;
        uint32_t status;
@@ -195,7 +195,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf 
**pkts)
  * Copy mbuf pointers to the S/W ring.
  */
 static inline void
-ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct rte_mbuf **pkts,
+ixgbe_tx_fill_hw_ring(struct ieth_tx_queue *txq, struct rte_mbuf **pkts,
                      uint16_t nb_pkts)
 {
        volatile union ixgbe_adv_tx_desc *txdp = 
&(txq->ixgbe_tx_ring[txq->tx_tail]);
@@ -231,7 +231,7 @@ static inline uint16_t
 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
             uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
        volatile union ixgbe_adv_tx_desc *tx_r = txq->ixgbe_tx_ring;
        uint16_t n = 0;
 
@@ -344,7 +344,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                    uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 
        while (nb_pkts) {
                uint16_t ret, num;
@@ -362,7 +362,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 }
 
 static inline void
-ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
+ixgbe_set_xmit_ctx(struct ieth_tx_queue *txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
                uint64_t ol_flags, union ixgbe_tx_offload tx_offload,
                __rte_unused uint64_t *mdata)
@@ -493,7 +493,7 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags,
+what_advctx_update(struct ieth_tx_queue *txq, uint64_t flags,
                   union ixgbe_tx_offload tx_offload)
 {
        /* If match with the current used context */
@@ -561,7 +561,7 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
 
 /* Reset transmit descriptors after they have been used */
 static inline int
-ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
+ixgbe_xmit_cleanup(struct ieth_tx_queue *txq)
 {
        struct ieth_tx_entry *sw_ring = txq->sw_ring;
        volatile union ixgbe_adv_tx_desc *txr = txq->ixgbe_tx_ring;
@@ -623,7 +623,7 @@ uint16_t
 ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        struct ieth_tx_entry *sw_ring;
        struct ieth_tx_entry *txe, *txn;
        volatile union ixgbe_adv_tx_desc *txr;
@@ -963,7 +963,7 @@ ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
        int i, ret;
        uint64_t ol_flags;
        struct rte_mbuf *m;
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
 
        for (i = 0; i < nb_pkts; i++) {
                m = tx_pkts[i];
@@ -2335,7 +2335,7 @@ ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
  **********************************************************************/
 
 static void __rte_cold
-ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs(struct ieth_tx_queue *txq)
 {
        unsigned i;
 
@@ -2350,7 +2350,7 @@ ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
 }
 
 static int
-ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, uint32_t free_cnt)
+ixgbe_tx_done_cleanup_full(struct ieth_tx_queue *txq, uint32_t free_cnt)
 {
        struct ieth_tx_entry *swr_ring = txq->sw_ring;
        uint16_t i, tx_last, tx_id;
@@ -2408,7 +2408,7 @@ ixgbe_tx_done_cleanup_full(struct ixgbe_tx_queue *txq, 
uint32_t free_cnt)
 }
 
 static int
-ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
+ixgbe_tx_done_cleanup_simple(struct ieth_tx_queue *txq,
                        uint32_t free_cnt)
 {
        int i, n, cnt;
@@ -2432,7 +2432,7 @@ ixgbe_tx_done_cleanup_simple(struct ixgbe_tx_queue *txq,
 }
 
 static int
-ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq __rte_unused,
+ixgbe_tx_done_cleanup_vec(struct ieth_tx_queue *txq __rte_unused,
                        uint32_t free_cnt __rte_unused)
 {
        return -ENOTSUP;
@@ -2441,7 +2441,7 @@ ixgbe_tx_done_cleanup_vec(struct ixgbe_tx_queue *txq 
__rte_unused,
 int
 ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
        if (txq->offloads == 0 &&
 #ifdef RTE_LIB_SECURITY
                        !(txq->using_ipsec) &&
@@ -2461,7 +2461,7 @@ ixgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t 
free_cnt)
 }
 
 static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ieth_tx_queue *txq)
 {
        if (txq != NULL &&
            txq->sw_ring != NULL)
@@ -2469,7 +2469,7 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
 }
 
 static void __rte_cold
-ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release(struct ieth_tx_queue *txq)
 {
        if (txq != NULL && txq->ops != NULL) {
                txq->ops->release_mbufs(txq);
@@ -2487,7 +2487,7 @@ ixgbe_dev_tx_queue_release(struct rte_eth_dev *dev, 
uint16_t qid)
 
 /* (Re)set dynamic ixgbe_tx_queue fields to defaults */
 static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ieth_tx_queue *txq)
 {
        static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
        struct ieth_tx_entry *txe = txq->sw_ring;
@@ -2536,7 +2536,7 @@ static const struct ixgbe_txq_ops def_txq_ops = {
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
 void __rte_cold
-ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
+ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ieth_tx_queue *txq)
 {
        /* Use a simple Tx queue (no offloads, no multi segs) if possible */
        if ((txq->offloads == 0) &&
@@ -2618,7 +2618,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                         const struct rte_eth_txconf *tx_conf)
 {
        const struct rte_memzone *tz;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        struct ixgbe_hw     *hw;
        uint16_t tx_rs_thresh, tx_free_thresh;
        uint64_t offloads;
@@ -2740,12 +2740,12 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }
 
        /* First allocate the tx queue data structure */
-       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct 
ixgbe_tx_queue) +
-                                       sizeof(struct ixgbe_advctx_info) * 
IXGBE_CTX_NUM,
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct 
ieth_tx_queue) +
+                                       sizeof(struct ixgbe_advctx_info) * 
IXGBE_CTX_NUM,
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL)
                return -ENOMEM;
-       txq->ctx_cache = RTE_PTR_ADD(txq, sizeof(struct ixgbe_tx_queue));
+       txq->ctx_cache = RTE_PTR_ADD(txq, sizeof(struct ieth_tx_queue));
 
        /*
         * Allocate TX ring hardware descriptors. A memzone large enough to
@@ -3312,7 +3312,7 @@ ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t 
offset)
 int
 ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
 {
-       struct ixgbe_tx_queue *txq = tx_queue;
+       struct ieth_tx_queue *txq = tx_queue;
        volatile uint32_t *status;
        uint32_t desc;
 
@@ -3377,7 +3377,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+               struct ieth_tx_queue *txq = dev->data->tx_queues[i];
 
                if (txq != NULL) {
                        txq->ops->release_mbufs(txq);
@@ -5284,7 +5284,7 @@ void __rte_cold
 ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        uint64_t bus_addr;
        uint32_t hlreg0;
        uint32_t txctrl;
@@ -5401,7 +5401,7 @@ int __rte_cold
 ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        struct ixgbe_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t dmatxctl;
@@ -5571,7 +5571,7 @@ int __rte_cold
 ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        uint32_t txdctl;
        int poll_ms;
 
@@ -5610,7 +5610,7 @@ int __rte_cold
 ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
        struct ixgbe_hw     *hw;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        uint32_t txdctl;
        uint32_t txtdh, txtdt;
        int poll_ms;
@@ -5684,7 +5684,7 @@ void
 ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        struct rte_eth_txq_info *qinfo)
 {
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
 
        txq = dev->data->tx_queues[queue_id];
 
@@ -5876,7 +5876,7 @@ void __rte_cold
 ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        uint64_t bus_addr;
        uint32_t txctrl;
        uint16_t i;
@@ -5917,7 +5917,7 @@ void __rte_cold
 ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw     *hw;
-       struct ixgbe_tx_queue *txq;
+       struct ieth_tx_queue *txq;
        struct ixgbe_rx_queue *rxq;
        uint32_t txdctl;
        uint32_t rxdctl;
@@ -6126,7 +6126,7 @@ ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
 }
 
 int
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
+ixgbe_txq_vec_setup(struct ieth_tx_queue __rte_unused *txq)
 {
        return -1;
 }
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 8efb46e07a..5b56e48498 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -180,56 +180,10 @@ struct ixgbe_advctx_info {
        union ixgbe_tx_offload tx_offload_mask;
 };
 
-/**
- * Structure associated with each TX queue.
- */
-struct ixgbe_tx_queue {
-       /** TX ring virtual address. */
-       volatile union ixgbe_adv_tx_desc *ixgbe_tx_ring;
-       rte_iova_t tx_ring_dma; /**< TX ring DMA address. */
-       union {
-               struct ieth_tx_entry *sw_ring; /**< address of SW ring for 
scalar PMD. */
-               struct ieth_vec_tx_entry *sw_ring_v; /**< address of SW ring 
for vector PMD */
-       };
-       volatile uint8_t *qtx_tail; /**< Address of TDT register. */
-       uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
-       uint16_t            tx_tail;       /**< current value of TDT reg. */
-       /**< Start freeing TX buffers if there are less free descriptors than
-            this value. */
-       uint16_t            tx_free_thresh;
-       /** Number of TX descriptors to use before RS bit is set. */
-       uint16_t            tx_rs_thresh;
-       /** Number of TX descriptors used since RS bit was set. */
-       uint16_t            nb_tx_used;
-       /** Index to last TX descriptor to have been cleaned. */
-       uint16_t            last_desc_cleaned;
-       /** Total number of TX descriptors ready to be allocated. */
-       uint16_t            nb_tx_free;
-       uint16_t tx_next_dd; /**< next desc to scan for DD bit */
-       uint16_t tx_next_rs; /**< next desc to set RS bit */
-       uint16_t            queue_id;      /**< TX queue index. */
-       uint16_t            reg_idx;       /**< TX queue register index. */
-       uint16_t            port_id;       /**< Device port identifier. */
-       uint8_t             pthresh;       /**< Prefetch threshold register. */
-       uint8_t             hthresh;       /**< Host threshold register. */
-       uint8_t             wthresh;       /**< Write-back threshold reg. */
-       uint64_t offloads; /**< Tx offload flags of RTE_ETH_TX_OFFLOAD_* */
-       uint32_t            ctx_curr;      /**< Hardware context states. */
-       /** Hardware context history. */
-       struct ixgbe_advctx_info *ctx_cache;
-       const struct ixgbe_txq_ops *ops;       /**< txq ops */
-       _Bool             tx_deferred_start; /**< not in global dev start. */
-#ifdef RTE_LIB_SECURITY
-       uint8_t             using_ipsec;
-       /**< indicates that IPsec TX feature is in use */
-#endif
-       const struct rte_memzone *mz;
-};
-
 struct ixgbe_txq_ops {
-       void (*release_mbufs)(struct ixgbe_tx_queue *txq);
-       void (*free_swring)(struct ixgbe_tx_queue *txq);
-       void (*reset)(struct ixgbe_tx_queue *txq);
+       void (*release_mbufs)(struct ieth_tx_queue *txq);
+       void (*free_swring)(struct ieth_tx_queue *txq);
+       void (*reset)(struct ieth_tx_queue *txq);
 };
 
 /*
@@ -250,7 +204,7 @@ struct ixgbe_txq_ops {
  * the queue parameters. Used in tx_queue_setup by primary process and then
  * in dev_init by secondary process when attaching to an existing ethdev.
  */
-void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue 
*txq);
+void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ieth_tx_queue *txq);
 
 /**
  * Sets the rx_pkt_burst callback in the ixgbe rte_eth_dev instance.
@@ -287,7 +241,7 @@ void ixgbe_recycle_rx_descriptors_refill_vec(void 
*rx_queue, uint16_t nb_mbufs);
 
 uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                                    uint16_t nb_pkts);
-int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
+int ixgbe_txq_vec_setup(struct ieth_tx_queue *txq);
 
 uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index fc254ef3d3..c2fcc51610 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -12,7 +12,7 @@
 #include "ixgbe_rxtx.h"
 
 static __rte_always_inline int
-ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_bufs(struct ieth_tx_queue *txq)
 {
        struct ieth_vec_tx_entry *txep;
        uint32_t status;
@@ -79,7 +79,7 @@ tx_backlog_entry(struct ieth_vec_tx_entry *txep,
 }
 
 static inline void
-_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_tx_queue_release_mbufs_vec(struct ieth_tx_queue *txq)
 {
        unsigned int i;
        struct ieth_vec_tx_entry *txe;
@@ -134,7 +134,7 @@ _ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue 
*rxq)
 }
 
 static inline void
-_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_tx_free_swring_vec(struct ieth_tx_queue *txq)
 {
        if (txq == NULL)
                return;
@@ -146,7 +146,7 @@ _ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq)
 }
 
 static inline void
-_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq)
+_ixgbe_reset_tx_queue_vec(struct ieth_tx_queue *txq)
 {
        static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } };
        struct ieth_vec_tx_entry *txe = txq->sw_ring_v;
@@ -199,7 +199,7 @@ ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq)
 }
 
 static inline int
-ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq,
+ixgbe_txq_vec_setup_default(struct ieth_tx_queue *txq,
                            const struct ixgbe_txq_ops *txq_ops)
 {
        if (txq->sw_ring_v == NULL)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e4381802c8..b51072b294 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -571,7 +571,7 @@ uint16_t
 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                           uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
        volatile union ixgbe_adv_tx_desc *txdp;
        struct ieth_vec_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -634,7 +634,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 }
 
 static void __rte_cold
-ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ieth_tx_queue *txq)
 {
        _ixgbe_tx_queue_release_mbufs_vec(txq);
 }
@@ -646,13 +646,13 @@ ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue 
*rxq)
 }
 
 static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ieth_tx_queue *txq)
 {
        _ixgbe_tx_free_swring_vec(txq);
 }
 
 static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ieth_tx_queue *txq)
 {
        _ixgbe_reset_tx_queue_vec(txq);
 }
@@ -670,7 +670,7 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
 }
 
 int __rte_cold
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+ixgbe_txq_vec_setup(struct ieth_tx_queue *txq)
 {
        return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
 }
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 4c8cc22f59..ddba15ad52 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -693,7 +693,7 @@ uint16_t
 ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                           uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct ieth_tx_queue *txq = (struct ieth_tx_queue *)tx_queue;
        volatile union ixgbe_adv_tx_desc *txdp;
        struct ieth_vec_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -757,7 +757,7 @@ ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 }
 
 static void __rte_cold
-ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
+ixgbe_tx_queue_release_mbufs_vec(struct ieth_tx_queue *txq)
 {
        _ixgbe_tx_queue_release_mbufs_vec(txq);
 }
@@ -769,13 +769,13 @@ ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue 
*rxq)
 }
 
 static void __rte_cold
-ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
+ixgbe_tx_free_swring(struct ieth_tx_queue *txq)
 {
        _ixgbe_tx_free_swring_vec(txq);
 }
 
 static void __rte_cold
-ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
+ixgbe_reset_tx_queue(struct ieth_tx_queue *txq)
 {
        _ixgbe_reset_tx_queue_vec(txq);
 }
@@ -793,7 +793,7 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
 }
 
 int __rte_cold
-ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
+ixgbe_txq_vec_setup(struct ieth_tx_queue *txq)
 {
        return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
 }
-- 
2.43.0

Reply via email to