Modify ixgbe to be compatible with new rte_eth_dev structures layout.

Signed-off-by: Marcin Zapolski <marcinx.a.zapol...@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c         |  30 +++---
 drivers/net/ixgbe/ixgbe_ethdev.h         |  23 ++---
 drivers/net/ixgbe/ixgbe_rxtx.c           | 111 +++++++++++++----------
 drivers/net/ixgbe/ixgbe_rxtx.h           |   9 +-
 drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c  |  22 +++--
 drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c   |  23 +++--
 drivers/net/ixgbe/ixgbe_vf_representor.c |  10 +-
 7 files changed, 127 insertions(+), 101 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 03fc1f717..32b0bee12 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1086,9 +1086,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
        PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ixgbe_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
-       eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
+       eth_dev->fcns.rx_pkt_burst = &ixgbe_recv_pkts;
+       eth_dev->fcns.tx_pkt_burst = &ixgbe_xmit_pkts;
+       eth_dev->fcns.tx_pkt_prepare = &ixgbe_prep_pkts;
 
        /*
         * For secondary processes, we don't initialise any further as primary
@@ -1328,8 +1328,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
                ixgbe_dev_close(eth_dev);
 
        eth_dev->dev_ops = NULL;
-       eth_dev->rx_pkt_burst = NULL;
-       eth_dev->tx_pkt_burst = NULL;
+       eth_dev->fcns.rx_pkt_burst = NULL;
+       eth_dev->fcns.tx_pkt_burst = NULL;
 
        /* Unlock any pending hardware semaphore */
        ixgbe_swfw_lock_reset(hw);
@@ -1619,8 +1619,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+       eth_dev->fcns.rx_pkt_burst = &ixgbe_recv_pkts;
+       eth_dev->fcns.tx_pkt_burst = &ixgbe_xmit_pkts;
 
        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
@@ -1777,8 +1777,8 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
                ixgbevf_dev_close(eth_dev);
 
        eth_dev->dev_ops = NULL;
-       eth_dev->rx_pkt_burst = NULL;
-       eth_dev->tx_pkt_burst = NULL;
+       eth_dev->fcns.rx_pkt_burst = NULL;
+       eth_dev->fcns.tx_pkt_burst = NULL;
 
        /* Disable the interrupts for VF */
        ixgbevf_intr_disable(eth_dev);
@@ -3888,15 +3888,15 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
                RTE_PTYPE_UNKNOWN
        };
 
-       if (dev->rx_pkt_burst == ixgbe_recv_pkts ||
-           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
-           dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
-           dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
+       if (dev->fcns.rx_pkt_burst == ixgbe_recv_pkts ||
+           dev->fcns.rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc ||
+           dev->fcns.rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc ||
+           dev->fcns.rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc)
                return ptypes;
 
 #if defined(RTE_ARCH_X86)
-       if (dev->rx_pkt_burst == ixgbe_recv_pkts_vec ||
-           dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
+       if (dev->fcns.rx_pkt_burst == ixgbe_recv_pkts_vec ||
+           dev->fcns.rx_pkt_burst == ixgbe_recv_scattered_pkts_vec)
                return ptypes;
 #endif
        return NULL;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 6e9ed2e10..d3010b99d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -619,25 +619,26 @@ void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
 
 void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
 
-uint16_t ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts);
+uint16_t ixgbe_recv_pkts(void *eth_dev, uint16_t rx_queue_id,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
-uint16_t ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+uint16_t ixgbe_recv_pkts_bulk_alloc(void *eth_dev, uint16_t rx_queue_id,
+                                   struct rte_mbuf **rx_pkts,
                                    uint16_t nb_pkts);
 
-uint16_t ixgbe_recv_pkts_lro_single_alloc(void *rx_queue,
+uint16_t ixgbe_recv_pkts_lro_single_alloc(void *eth_dev, uint16_t rx_queue_id,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue,
+uint16_t ixgbe_recv_pkts_lro_bulk_alloc(void *eth_dev, uint16_t rx_queue_id,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
-uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts(void *eth_dev, uint16_t tx_queue_id,
+               struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
-uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
+uint16_t ixgbe_xmit_pkts_simple(void *eth_dev, uint16_t tx_queue_id,
+                struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
-uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
+uint16_t ixgbe_prep_pkts(void *eth_dev, uint16_t tx_queue_id,
+               struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
 int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
                              struct rte_eth_rss_conf *rss_conf);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index edcfa60ce..8d8d2a912 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -88,7 +88,8 @@
 #endif
 
 #ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t ixgbe_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                                   struct rte_mbuf **tx_pkts,
                                    uint16_t nb_pkts);
 #endif
 
@@ -233,10 +234,11 @@ ixgbe_tx_fill_hw_ring(struct ixgbe_tx_queue *txq, struct 
rte_mbuf **pkts,
 }
 
 static inline uint16_t
-tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+tx_xmit_pkts(void *eth_dev, uint16_t tx_queue_id, struct rte_mbuf **tx_pkts,
             uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile union ixgbe_adv_tx_desc *tx_r = txq->tx_ring;
        uint16_t n = 0;
 
@@ -319,14 +321,14 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 }
 
 uint16_t
-ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+ixgbe_xmit_pkts_simple(void *eth_dev, uint16_t tx_queue_id, struct rte_mbuf 
**tx_pkts,
                       uint16_t nb_pkts)
 {
        uint16_t nb_tx;
 
        /* Try to transmit at least chunks of TX_MAX_BURST pkts */
        if (likely(nb_pkts <= RTE_PMD_IXGBE_TX_MAX_BURST))
-               return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
+               return tx_xmit_pkts(eth_dev, tx_queue_id, tx_pkts, nb_pkts);
 
        /* transmit more than the max burst, in chunks of TX_MAX_BURST */
        nb_tx = 0;
@@ -334,7 +336,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                uint16_t ret, n;
 
                n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST);
-               ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n);
+               ret = tx_xmit_pkts(eth_dev, tx_queue_id, &(tx_pkts[nb_tx]), n);
                nb_tx = (uint16_t)(nb_tx + ret);
                nb_pkts = (uint16_t)(nb_pkts - ret);
                if (ret < n)
@@ -346,18 +348,19 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
 #ifdef RTE_IXGBE_INC_VECTOR
 static uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                   uint16_t nb_pkts)
+ixgbe_xmit_pkts_vec(void *eth_dev, uint16_t tx_queue_id,
+                   struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
        while (nb_pkts) {
                uint16_t ret, num;
 
                num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
-               ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
-                                                num);
+               ret = ixgbe_xmit_fixed_burst_vec(eth_dev, tx_queue_id,
+                                                &tx_pkts[nb_tx], num);
                nb_tx += ret;
                nb_pkts -= ret;
                if (ret < num)
@@ -628,10 +631,11 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
 }
 
 uint16_t
-ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ixgbe_xmit_pkts(void *eth_dev, uint16_t tx_queue_id, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        struct ixgbe_tx_entry *sw_ring;
        struct ixgbe_tx_entry *txe, *txn;
        volatile union ixgbe_adv_tx_desc *txr;
@@ -658,7 +662,6 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
        tx_offload.data[0] = 0;
        tx_offload.data[1] = 0;
-       txq = tx_queue;
        sw_ring = txq->sw_ring;
        txr     = txq->tx_ring;
        tx_id   = txq->tx_tail;
@@ -965,12 +968,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  *
  **********************************************************************/
 uint16_t
-ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+ixgbe_prep_pkts(void *eth_dev, uint16_t tx_queue_id, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
 {
        int i, ret;
        uint64_t ol_flags;
        struct rte_mbuf *m;
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
        for (i = 0; i < nb_pkts; i++) {
                m = tx_pkts[i];
@@ -1647,10 +1652,11 @@ ixgbe_rx_fill_from_stage(struct ixgbe_rx_queue *rxq, 
struct rte_mbuf **rx_pkts,
 }
 
 static inline uint16_t
-rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+rx_recv_pkts(void *eth_dev, uint16_t rx_queue_id, struct rte_mbuf **rx_pkts,
             uint16_t nb_pkts)
 {
-       struct ixgbe_rx_queue *rxq = (struct ixgbe_rx_queue *)rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint16_t nb_rx = 0;
 
        /* Any previously recv'd pkts will be returned from the Rx stage */
@@ -1709,8 +1715,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
 /* split requests into chunks of size RTE_PMD_IXGBE_RX_MAX_BURST */
 uint16_t
-ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
-                          uint16_t nb_pkts)
+ixgbe_recv_pkts_bulk_alloc(void *eth_dev, uint16_t rx_queue_id,
+                          struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
        uint16_t nb_rx;
 
@@ -1718,7 +1724,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                return 0;
 
        if (likely(nb_pkts <= RTE_PMD_IXGBE_RX_MAX_BURST))
-               return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+               return rx_recv_pkts(eth_dev, rx_queue_id, rx_pkts, nb_pkts);
 
        /* request is relatively large, chunk it up */
        nb_rx = 0;
@@ -1726,7 +1732,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                uint16_t ret, n;
 
                n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST);
-               ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+               ret = rx_recv_pkts(eth_dev, rx_queue_id, &rx_pkts[nb_rx], n);
                nb_rx = (uint16_t)(nb_rx + ret);
                nb_pkts = (uint16_t)(nb_pkts - ret);
                if (ret < n)
@@ -1737,10 +1743,11 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct 
rte_mbuf **rx_pkts,
 }
 
 uint16_t
-ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ixgbe_recv_pkts(void *eth_dev, uint16_t rx_queue_id, struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
-       struct ixgbe_rx_queue *rxq;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        volatile union ixgbe_adv_rx_desc *rx_ring;
        volatile union ixgbe_adv_rx_desc *rxdp;
        struct ixgbe_rx_entry *sw_ring;
@@ -1760,7 +1767,6 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
        nb_rx = 0;
        nb_hold = 0;
-       rxq = rx_queue;
        rx_id = rxq->rx_tail;
        rx_ring = rxq->rx_ring;
        sw_ring = rxq->sw_ring;
@@ -2012,10 +2018,12 @@ ixgbe_fill_cluster_head_buf(
  * receive" interface).
  */
 static inline uint16_t
-ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t 
nb_pkts,
+ixgbe_recv_pkts_lro(void *eth_dev, uint16_t rx_queue_id,
+                   struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
                    bool bulk_alloc)
 {
-       struct ixgbe_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        volatile union ixgbe_adv_rx_desc *rx_ring = rxq->rx_ring;
        struct ixgbe_rx_entry *sw_ring = rxq->sw_ring;
        struct ixgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
@@ -2272,17 +2280,18 @@ ixgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts,
 }
 
 uint16_t
-ixgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
-                                uint16_t nb_pkts)
+ixgbe_recv_pkts_lro_single_alloc(void *eth_dev, uint16_t rx_queue_id,
+                                struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
+       return ixgbe_recv_pkts_lro(eth_dev, rx_queue_id, rx_pkts, nb_pkts,
+                                  false);
 }
 
 uint16_t
-ixgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
-                              uint16_t nb_pkts)
+ixgbe_recv_pkts_lro_bulk_alloc(void *eth_dev, uint16_t rx_queue_id,
+                              struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return ixgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
+       return ixgbe_recv_pkts_lro(eth_dev, rx_queue_id, rx_pkts, nb_pkts, 
true);
 }
 
 /*********************************************************************
@@ -2391,16 +2400,16 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct 
ixgbe_tx_queue *txq)
 #endif
                        (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
                PMD_INIT_LOG(DEBUG, "Using simple tx code path");
-               dev->tx_pkt_prepare = NULL;
+               dev->fcns.tx_pkt_prepare = NULL;
 #ifdef RTE_IXGBE_INC_VECTOR
                if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
                                (rte_eal_process_type() != RTE_PROC_PRIMARY ||
                                        ixgbe_txq_vec_setup(txq) == 0)) {
                        PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
-                       dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
+                       dev->fcns.tx_pkt_burst = ixgbe_xmit_pkts_vec;
                } else
 #endif
-               dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
+               dev->fcns.tx_pkt_burst = ixgbe_xmit_pkts_simple;
        } else {
                PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
                PMD_INIT_LOG(DEBUG,
@@ -2410,8 +2419,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct 
ixgbe_tx_queue *txq)
                                " - tx_rs_thresh = %lu " 
"[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
                                (unsigned long)txq->tx_rs_thresh,
                                (unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
-               dev->tx_pkt_burst = ixgbe_xmit_pkts;
-               dev->tx_pkt_prepare = ixgbe_prep_pkts;
+               dev->fcns.tx_pkt_burst = ixgbe_xmit_pkts;
+               dev->fcns.tx_pkt_prepare = ixgbe_prep_pkts;
        }
 }
 
@@ -4655,11 +4664,11 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                if (adapter->rx_bulk_alloc_allowed) {
                        PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
                                           "allocation version");
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+                       dev->fcns.rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
                } else {
                        PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
                                           "allocation version");
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+                       dev->fcns.rx_pkt_burst = 
ixgbe_recv_pkts_lro_single_alloc;
                }
        } else if (dev->data->scattered_rx) {
                /*
@@ -4671,12 +4680,12 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                                            "callback (port=%d).",
                                     dev->data->port_id);
 
-                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
+                       dev->fcns.rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
                } else if (adapter->rx_bulk_alloc_allowed) {
                        PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
                                           "allocation callback (port=%d).",
                                     dev->data->port_id);
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
+                       dev->fcns.rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
                } else {
                        PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
                                            "single allocation) "
@@ -4684,7 +4693,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                                            "(port=%d).",
                                     dev->data->port_id);
 
-                       dev->rx_pkt_burst = ixgbe_recv_pkts_lro_single_alloc;
+                       dev->fcns.rx_pkt_burst = 
ixgbe_recv_pkts_lro_single_alloc;
                }
        /*
         * Below we set "simple" callbacks according to port/queues parameters.
@@ -4700,28 +4709,28 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev)
                             RTE_IXGBE_DESCS_PER_LOOP,
                             dev->data->port_id);
 
-               dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
+               dev->fcns.rx_pkt_burst = ixgbe_recv_pkts_vec;
        } else if (adapter->rx_bulk_alloc_allowed) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
                                    "satisfied. Rx Burst Bulk Alloc function "
                                    "will be used on port=%d.",
                             dev->data->port_id);
 
-               dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
+               dev->fcns.rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
        } else {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
                                    "satisfied, or Scattered Rx is requested "
                                    "(port=%d).",
                             dev->data->port_id);
 
-               dev->rx_pkt_burst = ixgbe_recv_pkts;
+               dev->fcns.rx_pkt_burst = ixgbe_recv_pkts;
        }
 
        /* Propagate information about RX function choice through all queues. */
 
        rx_using_sse =
-               (dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
-               dev->rx_pkt_burst == ixgbe_recv_pkts_vec);
+               (dev->fcns.rx_pkt_burst == ixgbe_recv_scattered_pkts_vec ||
+               dev->fcns.rx_pkt_burst == ixgbe_recv_pkts_vec);
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
@@ -5817,7 +5826,8 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev 
__rte_unused *dev)
 
 __rte_weak uint16_t
 ixgbe_recv_pkts_vec(
-       void __rte_unused *rx_queue,
+       void __rte_unused *eth_dev,
+       uint16_t __rte_unused rx_queue_id,
        struct rte_mbuf __rte_unused **rx_pkts,
        uint16_t __rte_unused nb_pkts)
 {
@@ -5826,7 +5836,8 @@ ixgbe_recv_pkts_vec(
 
 __rte_weak uint16_t
 ixgbe_recv_scattered_pkts_vec(
-       void __rte_unused *rx_queue,
+       void __rte_unused *eth_dev,
+       uint16_t __rte_unused rx_queue_id,
        struct rte_mbuf __rte_unused **rx_pkts,
        uint16_t __rte_unused nb_pkts)
 {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 505d344b9..0f11a2bf2 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -277,9 +277,9 @@ void ixgbe_set_tx_function(struct rte_eth_dev *dev, struct 
ixgbe_tx_queue *txq);
 void ixgbe_set_rx_function(struct rte_eth_dev *dev);
 
 int ixgbe_check_supported_loopback_mode(struct rte_eth_dev *dev);
-uint16_t ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts);
-uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue,
+uint16_t ixgbe_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ixgbe_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq);
@@ -290,7 +290,8 @@ extern const uint32_t 
ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
 
 #ifdef RTE_IXGBE_INC_VECTOR
 
-uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t ixgbe_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                                   struct rte_mbuf **tx_pkts,
                                    uint16_t nb_pkts);
 int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
 #endif /* RTE_IXGBE_INC_VECTOR */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index edb138354..59045035a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -331,10 +331,12 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  * - don't support ol_flags for rss and csum err
  */
 uint16_t
-ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts)
+ixgbe_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                   struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+       return _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, NULL);
 }
 
 /*
@@ -348,10 +350,11 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
 uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts)
+ixgbe_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                             struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       struct ixgbe_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
 
        /* get some new buffers */
@@ -402,10 +405,11 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
 }
 
 uint16_t
-ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                          uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                          struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile union ixgbe_adv_tx_desc *txdp;
        struct ixgbe_tx_entry_v *txep;
        uint16_t n, nb_commit, tx_id;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c 
b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index c9ba48246..697561298 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -566,10 +566,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
 uint16_t
-ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts)
+ixgbe_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                   struct rte_mbuf **rx_pkts,
+                   uint16_t nb_pkts)
 {
-       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+
+       return _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, NULL);
 }
 
 /*
@@ -582,10 +586,12 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
  * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
  */
 uint16_t
-ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-               uint16_t nb_pkts)
+ixgbe_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                             struct rte_mbuf **rx_pkts,
+                             uint16_t nb_pkts)
 {
-       struct ixgbe_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0};
 
        /* get some new buffers */
@@ -635,10 +641,11 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
 }
 
 uint16_t
-ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ixgbe_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id, struct 
rte_mbuf **tx_pkts,
                           uint16_t nb_pkts)
 {
-       struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct ixgbe_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile union ixgbe_adv_tx_desc *txdp;
        struct ixgbe_tx_entry_v *txep;
        uint16_t n, nb_commit, tx_id;
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c 
b/drivers/net/ixgbe/ixgbe_vf_representor.c
index 2c01f6e33..1a575398a 100644
--- a/drivers/net/ixgbe/ixgbe_vf_representor.c
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -154,14 +154,16 @@ static const struct eth_dev_ops 
ixgbe_vf_representor_dev_ops = {
 };
 
 static uint16_t
-ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ixgbe_vf_representor_rx_burst(__rte_unused void *eth_dev,
+       __rte_unused uint16_t rx_queue_id,
        __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
 {
        return 0;
 }
 
 static uint16_t
-ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ixgbe_vf_representor_tx_burst(__rte_unused void *eth_dev,
+       __rte_unused uint16_t tx_queue_id,
        __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
 {
        return 0;
@@ -200,8 +202,8 @@ ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void 
*init_params)
        /* No data-path, but need stub Rx/Tx functions to avoid crash
         * when testing with the likes of testpmd.
         */
-       ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst;
-       ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst;
+       ethdev->fcns.rx_pkt_burst = ixgbe_vf_representor_rx_burst;
+       ethdev->fcns.tx_pkt_burst = ixgbe_vf_representor_tx_burst;
 
        /* Setting the number queues allocated to the VF */
        ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
-- 
2.17.1

Reply via email to