Modify i40e to be compatible with new rte_eth_dev structures layout.

Signed-off-by: Marcin Zapolski <marcinx.a.zapol...@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c           |  10 +-
 drivers/net/i40e/i40e_ethdev.h           |   1 +
 drivers/net/i40e/i40e_ethdev_vf.c        |   8 +-
 drivers/net/i40e/i40e_rxtx.c             | 119 ++++++++++++-----------
 drivers/net/i40e/i40e_rxtx.h             |  33 ++++---
 drivers/net/i40e/i40e_rxtx_vec_altivec.c |  23 +++--
 drivers/net/i40e/i40e_rxtx_vec_avx2.c    |  45 +++++----
 drivers/net/i40e/i40e_rxtx_vec_neon.c    |  23 +++--
 drivers/net/i40e/i40e_rxtx_vec_sse.c     |  23 +++--
 drivers/net/i40e/i40e_vf_representor.c   |  12 ++-
 10 files changed, 166 insertions(+), 131 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4e40b7ab5..08c3a7cc3 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1273,9 +1273,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void 
*init_params __rte_unused)
        PMD_INIT_FUNC_TRACE();
 
        dev->dev_ops = &i40e_eth_dev_ops;
-       dev->rx_pkt_burst = i40e_recv_pkts;
-       dev->tx_pkt_burst = i40e_xmit_pkts;
-       dev->tx_pkt_prepare = i40e_prep_pkts;
+       dev->fcns.rx_pkt_burst = i40e_recv_pkts;
+       dev->fcns.tx_pkt_burst = i40e_xmit_pkts;
+       dev->fcns.tx_pkt_prepare = i40e_prep_pkts;
 
        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
@@ -1717,8 +1717,8 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
                i40e_dev_close(dev);
 
        dev->dev_ops = NULL;
-       dev->rx_pkt_burst = NULL;
-       dev->tx_pkt_burst = NULL;
+       dev->fcns.rx_pkt_burst = NULL;
+       dev->fcns.tx_pkt_burst = NULL;
 
        /* Clear PXE mode */
        i40e_clear_pxe_mode(hw);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 38ac3ead6..a64857dab 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -8,6 +8,7 @@
 #include <stdint.h>
 
 #include <rte_time.h>
+#include <rte_ethdev_driver.h>
 #include <rte_kvargs.h>
 #include <rte_hash.h>
 #include <rte_flow.h>
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c 
b/drivers/net/i40e/i40e_ethdev_vf.c
index 308fb9835..c0db43444 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1473,8 +1473,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
 
        /* assign ops func pointer */
        eth_dev->dev_ops = &i40evf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &i40e_recv_pkts;
-       eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+       eth_dev->fcns.rx_pkt_burst = &i40e_recv_pkts;
+       eth_dev->fcns.tx_pkt_burst = &i40e_xmit_pkts;
 
        /*
         * For secondary processes, we don't initialise any further as primary
@@ -1535,8 +1535,8 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
                return -EPERM;
 
        eth_dev->dev_ops = NULL;
-       eth_dev->rx_pkt_burst = NULL;
-       eth_dev->tx_pkt_burst = NULL;
+       eth_dev->fcns.rx_pkt_burst = NULL;
+       eth_dev->fcns.tx_pkt_burst = NULL;
 
        if (i40evf_uninit_vf(eth_dev) != 0) {
                PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 692c3bab4..4181d4fc8 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -576,10 +576,11 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
 }
 
 static inline uint16_t
-rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+rx_recv_pkts(void *eth_dev, uint16_t rx_queue_id, struct rte_mbuf **rx_pkts,
+            uint16_t nb_pkts)
 {
-       struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
-       struct rte_eth_dev *dev;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint16_t nb_rx = 0;
 
        if (!nb_pkts)
@@ -597,7 +598,6 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                if (i40e_rx_alloc_bufs(rxq) != 0) {
                        uint16_t i, j;
 
-                       dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
                        dev->data->rx_mbuf_alloc_failed +=
                                rxq->rx_free_thresh;
 
@@ -620,7 +620,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
 }
 
 static uint16_t
-i40e_recv_pkts_bulk_alloc(void *rx_queue,
+i40e_recv_pkts_bulk_alloc(void *eth_dev, uint16_t rx_queue_id,
                          struct rte_mbuf **rx_pkts,
                          uint16_t nb_pkts)
 {
@@ -630,11 +630,11 @@ i40e_recv_pkts_bulk_alloc(void *rx_queue,
                return 0;
 
        if (likely(nb_pkts <= RTE_PMD_I40E_RX_MAX_BURST))
-               return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
+               return rx_recv_pkts(eth_dev, rx_queue_id, rx_pkts, nb_pkts);
 
        while (nb_pkts) {
                n = RTE_MIN(nb_pkts, RTE_PMD_I40E_RX_MAX_BURST);
-               count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
+               count = rx_recv_pkts(eth_dev, rx_queue_id, &rx_pkts[nb_rx], n);
                nb_rx = (uint16_t)(nb_rx + count);
                nb_pkts = (uint16_t)(nb_pkts - count);
                if (count < n)
@@ -645,7 +645,8 @@ i40e_recv_pkts_bulk_alloc(void *rx_queue,
 }
 #else
 static uint16_t
-i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
+i40e_recv_pkts_bulk_alloc(void __rte_unused *eth_dev,
+                         uint16_t __rte_unused rx_queue_id,
                          struct rte_mbuf __rte_unused **rx_pkts,
                          uint16_t __rte_unused nb_pkts)
 {
@@ -654,15 +655,16 @@ i40e_recv_pkts_bulk_alloc(void __rte_unused *rx_queue,
 #endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
 
 uint16_t
-i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+i40e_recv_pkts(void *eth_dev, uint16_t rx_queue_id, struct rte_mbuf **rx_pkts,
+              uint16_t nb_pkts)
 {
-       struct i40e_rx_queue *rxq;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        volatile union i40e_rx_desc *rx_ring;
        volatile union i40e_rx_desc *rxdp;
        union i40e_rx_desc rxd;
        struct i40e_rx_entry *sw_ring;
        struct i40e_rx_entry *rxe;
-       struct rte_eth_dev *dev;
        struct rte_mbuf *rxm;
        struct rte_mbuf *nmb;
        uint16_t nb_rx;
@@ -676,7 +678,6 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
 
        nb_rx = 0;
        nb_hold = 0;
-       rxq = rx_queue;
        rx_id = rxq->rx_tail;
        rx_ring = rxq->rx_ring;
        sw_ring = rxq->sw_ring;
@@ -694,7 +695,6 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
 
                nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb)) {
-                       dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
                        dev->data->rx_mbuf_alloc_failed++;
                        break;
                }
@@ -776,11 +776,12 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
 }
 
 uint16_t
-i40e_recv_scattered_pkts(void *rx_queue,
+i40e_recv_scattered_pkts(void *eth_dev, uint16_t rx_queue_id,
                         struct rte_mbuf **rx_pkts,
                         uint16_t nb_pkts)
 {
-       struct i40e_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        volatile union i40e_rx_desc *rx_ring = rxq->rx_ring;
        volatile union i40e_rx_desc *rxdp;
        union i40e_rx_desc rxd;
@@ -791,7 +792,6 @@ i40e_recv_scattered_pkts(void *rx_queue,
        struct rte_mbuf *nmb, *rxm;
        uint16_t rx_id = rxq->rx_tail;
        uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
-       struct rte_eth_dev *dev;
        uint32_t rx_status;
        uint64_t qword1;
        uint64_t dma_addr;
@@ -810,7 +810,6 @@ i40e_recv_scattered_pkts(void *rx_queue,
 
                nmb = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(!nmb)) {
-                       dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
                        dev->data->rx_mbuf_alloc_failed++;
                        break;
                }
@@ -997,9 +996,11 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union 
i40e_tx_offload tx_offload)
 }
 
 uint16_t
-i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+i40e_xmit_pkts(void *eth_dev, uint16_t tx_queue_id, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        struct i40e_tx_entry *sw_ring;
        struct i40e_tx_entry *txe, *txn;
        volatile struct i40e_tx_desc *txd;
@@ -1020,7 +1021,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
        uint64_t buf_dma_addr;
        union i40e_tx_offload tx_offload = {0};
 
-       txq = tx_queue;
        sw_ring = txq->sw_ring;
        txr = txq->tx_ring;
        tx_id = txq->tx_tail;
@@ -1372,11 +1372,14 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
 }
 
 static uint16_t
-i40e_xmit_pkts_simple(void *tx_queue,
+i40e_xmit_pkts_simple(void *eth_dev,
+                     uint16_t tx_queue_id,
                      struct rte_mbuf **tx_pkts,
                      uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
+       struct rte_eth_dev *dev = eth_dev;
+       void *tx_queue = dev->data->tx_queues[tx_queue_id];
 
        if (likely(nb_pkts <= I40E_TX_MAX_BURST))
                return tx_xmit_pkts((struct i40e_tx_queue *)tx_queue,
@@ -1398,17 +1401,18 @@ i40e_xmit_pkts_simple(void *tx_queue,
 }
 
 static uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                  uint16_t nb_pkts)
+i40e_xmit_pkts_vec(void *eth_dev, uint16_t tx_queue_id,
+                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
        while (nb_pkts) {
                uint16_t ret, num;
 
                num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
-               ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+               ret = i40e_xmit_fixed_burst_vec(eth_dev, tx_queue_id, 
&tx_pkts[nb_tx],
                                                num);
                nb_tx += ret;
                nb_pkts -= ret;
@@ -1425,8 +1429,8 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf 
**tx_pkts,
  *
  **********************************************************************/
 uint16_t
-i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts)
+i40e_prep_pkts(__rte_unused void *eth_dev, __rte_unused uint16_t tx_queue_id,
+               struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        int i, ret;
        uint64_t ol_flags;
@@ -1674,15 +1678,15 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
                RTE_PTYPE_UNKNOWN
        };
 
-       if (dev->rx_pkt_burst == i40e_recv_pkts ||
+       if (dev->fcns.rx_pkt_burst == i40e_recv_pkts ||
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
-           dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
+           dev->fcns.rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
 #endif
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
-           dev->rx_pkt_burst == i40e_recv_pkts_vec ||
-           dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
-           dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2)
+           dev->fcns.rx_pkt_burst == i40e_recv_scattered_pkts ||
+           dev->fcns.rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+           dev->fcns.rx_pkt_burst == i40e_recv_pkts_vec ||
+           dev->fcns.rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+           dev->fcns.rx_pkt_burst == i40e_recv_pkts_vec_avx2)
                return ptypes;
        return NULL;
 }
@@ -2443,8 +2447,8 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq)
         *  vPMD tx will not set sw_ring's mbuf to NULL after free,
         *  so need to free remains more carefully.
         */
-       if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
-                       dev->tx_pkt_burst == i40e_xmit_pkts_vec) {
+       if (dev->fcns.tx_pkt_burst == i40e_xmit_pkts_vec_avx2 ||
+                       dev->fcns.tx_pkt_burst == i40e_xmit_pkts_vec) {
                i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
                if (txq->tx_tail < i) {
                        for (; i < txq->nb_tx_desc; i++) {
@@ -2990,10 +2994,10 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
                                dev->data->port_id);
                if (ad->use_latest_vec)
-                       dev->rx_pkt_burst =
+                       dev->fcns.rx_pkt_burst =
                        i40e_get_latest_rx_vec(dev->data->scattered_rx);
                else
-                       dev->rx_pkt_burst =
+                       dev->fcns.rx_pkt_burst =
                        i40e_get_recommend_rx_vec(dev->data->scattered_rx);
        } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -3001,12 +3005,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
                                    "will be used on port=%d.",
                             dev->data->port_id);
 
-               dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+               dev->fcns.rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
        } else {
                /* Simple Rx Path. */
                PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
                             dev->data->port_id);
-               dev->rx_pkt_burst = dev->data->scattered_rx ?
+               dev->fcns.rx_pkt_burst = dev->data->scattered_rx ?
                                        i40e_recv_scattered_pkts :
                                        i40e_recv_pkts;
        }
@@ -3014,10 +3018,10 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
        /* Propagate information about RX function choice through all queues. */
        if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
                rx_using_sse =
-                       (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
-                        dev->rx_pkt_burst == i40e_recv_pkts_vec ||
-                        dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 
||
-                        dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
+                       (dev->fcns.rx_pkt_burst == i40e_recv_scattered_pkts_vec 
||
+                        dev->fcns.rx_pkt_burst == i40e_recv_pkts_vec ||
+                        dev->fcns.rx_pkt_burst == 
i40e_recv_scattered_pkts_vec_avx2 ||
+                        dev->fcns.rx_pkt_burst == i40e_recv_pkts_vec_avx2);
 
                for (i = 0; i < dev->data->nb_rx_queues; i++) {
                        struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
@@ -3104,20 +3108,20 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
                if (ad->tx_vec_allowed) {
                        PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
                        if (ad->use_latest_vec)
-                               dev->tx_pkt_burst =
+                               dev->fcns.tx_pkt_burst =
                                        i40e_get_latest_tx_vec();
                        else
-                               dev->tx_pkt_burst =
+                               dev->fcns.tx_pkt_burst =
                                        i40e_get_recommend_tx_vec();
                } else {
                        PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
-                       dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+                       dev->fcns.tx_pkt_burst = i40e_xmit_pkts_simple;
                }
-               dev->tx_pkt_prepare = NULL;
+               dev->fcns.tx_pkt_prepare = NULL;
        } else {
                PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
-               dev->tx_pkt_burst = i40e_xmit_pkts;
-               dev->tx_pkt_prepare = i40e_prep_pkts;
+               dev->fcns.tx_pkt_burst = i40e_xmit_pkts;
+               dev->fcns.tx_pkt_prepare = i40e_prep_pkts;
        }
 }
 
@@ -3201,7 +3205,8 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev 
__rte_unused *dev)
 
 uint16_t
 i40e_recv_pkts_vec(
-       void __rte_unused *rx_queue,
+       void __rte_unused *eth_dev,
+       uint16_t __rte_unused rx_queue_id,
        struct rte_mbuf __rte_unused **rx_pkts,
        uint16_t __rte_unused nb_pkts)
 {
@@ -3210,7 +3215,8 @@ i40e_recv_pkts_vec(
 
 uint16_t
 i40e_recv_scattered_pkts_vec(
-       void __rte_unused *rx_queue,
+       void __rte_unused *eth_dev,
+       uint16_t __rte_unused rx_queue_id,
        struct rte_mbuf __rte_unused **rx_pkts,
        uint16_t __rte_unused nb_pkts)
 {
@@ -3236,7 +3242,8 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue 
__rte_unused*rxq)
 }
 
 uint16_t
-i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+i40e_xmit_fixed_burst_vec(void __rte_unused * eth_dev,
+                         uint16_t __rte_unused tx_queue_id,
                          struct rte_mbuf __rte_unused **tx_pkts,
                          uint16_t __rte_unused nb_pkts)
 {
@@ -3246,7 +3253,8 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
 
 #ifndef CC_AVX2_SUPPORT
 uint16_t
-i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+i40e_recv_pkts_vec_avx2(void __rte_unused *eth_dev,
+                       uint16_t __rte_unused rx_queue_id,
                        struct rte_mbuf __rte_unused **rx_pkts,
                        uint16_t __rte_unused nb_pkts)
 {
@@ -3254,7 +3262,8 @@ i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
 }
 
 uint16_t
-i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *eth_dev,
+                       uint16_t __rte_unused rx_queue_id,
                        struct rte_mbuf __rte_unused **rx_pkts,
                        uint16_t __rte_unused nb_pkts)
 {
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 3fc619af9..35a0196be 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -184,17 +184,20 @@ int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                            const struct rte_eth_txconf *tx_conf);
 void i40e_dev_rx_queue_release(void *rxq);
 void i40e_dev_tx_queue_release(void *txq);
-uint16_t i40e_recv_pkts(void *rx_queue,
+uint16_t i40e_recv_pkts(void *eth_dev,
+                       uint16_t rx_queue_id,
                        struct rte_mbuf **rx_pkts,
                        uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts(void *rx_queue,
+uint16_t i40e_recv_scattered_pkts(void *eth_dev,
+                                 uint16_t rx_queue_id,
                                  struct rte_mbuf **rx_pkts,
                                  uint16_t nb_pkts);
-uint16_t i40e_xmit_pkts(void *tx_queue,
+uint16_t i40e_xmit_pkts(void *eth_dev,
+                       uint16_t tx_queue_id,
                        struct rte_mbuf **tx_pkts,
                        uint16_t nb_pkts);
-uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-               uint16_t nb_pkts);
+uint16_t i40e_prep_pkts(void *eth_dev, uint16_t tx_queue_id,
+               struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 int i40e_tx_queue_init(struct i40e_tx_queue *txq);
 int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
 void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -213,29 +216,29 @@ int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t 
offset);
 int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
 int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
 
-uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                           uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts_vec(void *rx_queue,
+uint16_t i40e_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                           struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
                                      struct rte_mbuf **rx_pkts,
                                      uint16_t nb_pkts);
 int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
 int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
 int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
 void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
-uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                                  uint16_t nb_pkts);
+uint16_t i40e_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                                  struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 void i40e_set_rx_function(struct rte_eth_dev *dev);
 void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
                               struct i40e_tx_queue *txq);
 void i40e_set_tx_function(struct rte_eth_dev *dev);
 void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
 void i40e_set_default_pctype_table(struct rte_eth_dev *dev);
-uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
-       uint16_t nb_pkts);
-uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue,
+uint16_t i40e_recv_pkts_vec_avx2(void *eth_dev, uint16_t rx_queue_id,
        struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
-uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
-       uint16_t nb_pkts);
+uint16_t i40e_recv_scattered_pkts_vec_avx2(void *eth_dev, uint16_t rx_queue_id,
+       struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t i40e_xmit_pkts_vec_avx2(void *eth_dev, uint16_t tx_queue_id,
+       struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
 
 /* For each value it means, datasheet of hardware can tell more details
  *
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c 
b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
index 310ce1ee2..787314475 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -453,10 +453,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
   *   numbers of DD bits
   */
 uint16_t
-i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                  uint16_t nb_pkts)
+i40e_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+{      return _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, NULL);
 }
 
  /* vPMD receive routine that reassembles scattered packets
@@ -466,10 +468,12 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
   *   numbers of DD bits
   */
 uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                            uint16_t nb_pkts)
+i40e_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                            struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_rx_queue *rxq = rx_queue;
+
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
 
        /* get some new buffers */
@@ -524,10 +528,11 @@ vtx(volatile struct i40e_tx_desc *txdp,
 }
 
 uint16_t
-i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                         uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                         struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile struct i40e_tx_desc *txdp;
        struct i40e_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c 
b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index c4dc990e0..599c21a0c 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -549,10 +549,12 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
 uint16_t
-i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
-                  uint16_t nb_pkts)
+i40e_recv_pkts_vec_avx2(void *eth_dev, uint16_t rx_queue_id,
+                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL);
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+       return _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, NULL);
 }
 
 /*
@@ -561,10 +563,11 @@ i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf 
**rx_pkts,
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
 static uint16_t
-i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
-                            uint16_t nb_pkts)
+i40e_recv_scattered_burst_vec_avx2(void *eth_dev, uint16_t rx_queue_id,
+                            struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
 
        /* get some new buffers */
@@ -602,20 +605,21 @@ i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct 
rte_mbuf **rx_pkts,
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  */
 uint16_t
-i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
-                            uint16_t nb_pkts)
+i40e_recv_scattered_pkts_vec_avx2(void *eth_dev, uint16_t rx_queue_id,
+                            struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
        uint16_t retval = 0;
        while (nb_pkts > RTE_I40E_VPMD_RX_BURST) {
-               uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue,
-                               rx_pkts + retval, RTE_I40E_VPMD_RX_BURST);
+               uint16_t burst = i40e_recv_scattered_burst_vec_avx2(eth_dev,
+                               rx_queue_id, rx_pkts + retval,
+                               RTE_I40E_VPMD_RX_BURST);
                retval += burst;
                nb_pkts -= burst;
                if (burst < RTE_I40E_VPMD_RX_BURST)
                        return retval;
        }
-       return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue,
-                               rx_pkts + retval, nb_pkts);
+       return retval + i40e_recv_scattered_burst_vec_avx2(eth_dev,
+                               rx_queue_id, rx_pkts + retval, nb_pkts);
 }
 
 
@@ -674,10 +678,12 @@ vtx(volatile struct i40e_tx_desc *txdp,
 }
 
 static inline uint16_t
-i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
-                         uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec_avx2(void *eth_dev, uint16_t tx_queue_id,
+                         struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile struct i40e_tx_desc *txdp;
        struct i40e_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
@@ -741,18 +747,19 @@ i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct 
rte_mbuf **tx_pkts,
 }
 
 uint16_t
-i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
+i40e_xmit_pkts_vec_avx2(void *eth_dev, uint16_t tx_queue_id, struct rte_mbuf 
**tx_pkts,
                   uint16_t nb_pkts)
 {
        uint16_t nb_tx = 0;
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
 
        while (nb_pkts) {
                uint16_t ret, num;
 
                num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
-               ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx],
-                                               num);
+               ret = i40e_xmit_fixed_burst_vec_avx2(eth_dev, tx_queue_id,
+                                               &tx_pkts[nb_tx], num);
                nb_tx += ret;
                nb_pkts -= ret;
                if (ret < num)
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c 
b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 83572ef84..eb91db503 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -435,12 +435,15 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  *   numbers of DD bits
  */
 uint16_t
-i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                  uint16_t nb_pkts)
+i40e_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+{      return _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, NULL);
 }
 
+
  /* vPMD receive routine that reassembles scattered packets
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
@@ -448,11 +451,12 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
  *   numbers of DD bits
  */
 uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                            uint16_t nb_pkts)
+i40e_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                            struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
 
-       struct i40e_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
 
        /* get some new buffers */
@@ -506,10 +510,11 @@ vtx(volatile struct i40e_tx_desc *txdp,
 }
 
 uint16_t
-i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                         uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                         struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile struct i40e_tx_desc *txdp;
        struct i40e_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c 
b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 3b22588c5..3f594aae3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -463,10 +463,12 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct 
rte_mbuf **rx_pkts,
  *   numbers of DD bits
  */
 uint16_t
-i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                  uint16_t nb_pkts)
+i40e_recv_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                  struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
+       return _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, NULL);
 }
 
  /* vPMD receive routine that reassembles scattered packets
@@ -476,11 +478,11 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf 
**rx_pkts,
  *   numbers of DD bits
  */
 uint16_t
-i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
-                            uint16_t nb_pkts)
+i40e_recv_scattered_pkts_vec(void *eth_dev, uint16_t rx_queue_id,
+                            struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-
-       struct i40e_rx_queue *rxq = rx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
        uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
 
        /* get some new buffers */
@@ -535,10 +537,11 @@ vtx(volatile struct i40e_tx_desc *txdp,
 }
 
 uint16_t
-i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
-                         uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *eth_dev, uint16_t tx_queue_id,
+                         struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
-       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       struct rte_eth_dev *dev = eth_dev;
+       struct i40e_tx_queue *txq = dev->data->tx_queues[tx_queue_id];
        volatile struct i40e_tx_desc *txdp;
        struct i40e_tx_entry *txep;
        uint16_t n, nb_commit, tx_id;
diff --git a/drivers/net/i40e/i40e_vf_representor.c 
b/drivers/net/i40e/i40e_vf_representor.c
index 633dca6c3..ae7c3af05 100644
--- a/drivers/net/i40e/i40e_vf_representor.c
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -3,7 +3,7 @@
  */
 
 #include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_pci.h>
 #include <rte_malloc.h>
 
@@ -452,14 +452,16 @@ static const struct eth_dev_ops i40e_representor_dev_ops 
= {
 };
 
 static uint16_t
-i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
+i40e_vf_representor_rx_burst(__rte_unused void *eth_dev,
+       __rte_unused uint16_t rx_queue_id,
        __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
 {
        return 0;
 }
 
 static uint16_t
-i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
+i40e_vf_representor_tx_burst(__rte_unused void *eth_dev,
+       __rte_unused uint16_t tx_queue_id,
        __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
 {
        return 0;
@@ -493,8 +495,8 @@ i40e_vf_representor_init(struct rte_eth_dev *ethdev, void 
*init_params)
        /* No data-path, but need stub Rx/Tx functions to avoid crash
         * when testing with the likes of testpmd.
         */
-       ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
-       ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+       ethdev->fcns.rx_pkt_burst = i40e_vf_representor_rx_burst;
+       ethdev->fcns.tx_pkt_burst = i40e_vf_representor_tx_burst;
 
        vf = &pf->vfs[representor->vf_id];
 
-- 
2.17.1

Reply via email to