Signed-off-by: Keith Wiles <keith.wiles at intel.com>
---
 lib/librte_pmd_af_packet/rte_eth_af_packet.c |  38 +--
 lib/librte_pmd_bond/rte_eth_bond_8023ad.c    |  18 +-
 lib/librte_pmd_bond/rte_eth_bond_alb.c       |  10 +-
 lib/librte_pmd_bond/rte_eth_bond_api.c       | 142 +++++-----
 lib/librte_pmd_bond/rte_eth_bond_args.c      |   2 +-
 lib/librte_pmd_bond/rte_eth_bond_pmd.c       | 164 +++++------
 lib/librte_pmd_bond/rte_eth_bond_private.h   |   2 +-
 lib/librte_pmd_e1000/em_ethdev.c             | 156 +++++------
 lib/librte_pmd_e1000/em_rxtx.c               |  94 +++----
 lib/librte_pmd_e1000/igb_ethdev.c            | 302 ++++++++++-----------
 lib/librte_pmd_e1000/igb_pf.c                |  86 +++---
 lib/librte_pmd_e1000/igb_rxtx.c              | 168 ++++++------
 lib/librte_pmd_enic/enic.h                   |   4 +-
 lib/librte_pmd_enic/enic_ethdev.c            | 140 +++++-----
 lib/librte_pmd_enic/enic_main.c              |  30 +-
 lib/librte_pmd_fm10k/fm10k_ethdev.c          | 154 +++++------
 lib/librte_pmd_fm10k/fm10k_rxtx.c            |   4 +-
 lib/librte_pmd_i40e/i40e_ethdev.c            | 172 ++++++------
 lib/librte_pmd_i40e/i40e_ethdev_vf.c         | 194 ++++++-------
 lib/librte_pmd_i40e/i40e_fdir.c              |  28 +-
 lib/librte_pmd_i40e/i40e_pf.c                |   8 +-
 lib/librte_pmd_i40e/i40e_rxtx.c              |  88 +++---
 lib/librte_pmd_ixgbe/ixgbe_ethdev.c          | 392 +++++++++++++--------------
 lib/librte_pmd_ixgbe/ixgbe_fdir.c            |  50 ++--
 lib/librte_pmd_ixgbe/ixgbe_pf.c              | 114 ++++----
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c            | 276 +++++++++----------
 lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c        |   6 +-
 lib/librte_pmd_mlx4/mlx4.c                   |   2 +-
 lib/librte_pmd_null/rte_eth_null.c           |  36 +--
 lib/librte_pmd_pcap/rte_eth_pcap.c           |   2 +-
 lib/librte_pmd_ring/rte_eth_ring.c           |  36 +--
 lib/librte_pmd_virtio/virtio_ethdev.c        | 120 ++++----
 lib/librte_pmd_virtio/virtio_rxtx.c          |  20 +-
 lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c      |  64 ++---
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c        |  40 +--
 lib/librte_pmd_xenvirt/rte_eth_xenvirt.c     |   2 +-
 36 files changed, 1580 insertions(+), 1584 deletions(-)

diff --git a/lib/librte_pmd_af_packet/rte_eth_af_packet.c 
b/lib/librte_pmd_af_packet/rte_eth_af_packet.c
index 2ac50ba..4735fb3 100644
--- a/lib/librte_pmd_af_packet/rte_eth_af_packet.c
+++ b/lib/librte_pmd_af_packet/rte_eth_af_packet.c
@@ -231,7 +231,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, 
uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = 1;
+       ETH_DATA(dev)->dev_link.link_status = 1;
        return 0;
 }

@@ -243,7 +243,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 {
        unsigned i;
        int sockfd;
-       struct pmd_internals *internals = dev->data->dev_private;
+       struct pmd_internals *internals = _DD_PRIVATE(dev);

        for (i = 0; i < internals->nb_queues; i++) {
                sockfd = internals->rx_queue[i].sockfd;
@@ -254,7 +254,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
                        close(sockfd);
        }

-       dev->data->dev_link.link_status = 0;
+       ETH_DATA(dev)->dev_link.link_status = 0;
 }

 static int
@@ -266,16 +266,16 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 static void
 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct pmd_internals *internals = dev->data->dev_private;
+       struct pmd_internals *internals = _DD_PRIVATE(dev);

-       dev_info->driver_name = drivername;
-       dev_info->if_index = internals->if_index;
+       dev_info->di.driver_name = drivername;
+       dev_info->di.if_index = internals->if_index;
        dev_info->max_mac_addrs = 1;
        dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
        dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
        dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
        dev_info->min_rx_bufsize = 0;
-       dev_info->pci_dev = NULL;
+       dev_info->di.pci_dev = NULL;
 }

 static void
@@ -283,7 +283,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats 
*igb_stats)
 {
        unsigned i, imax;
        unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
-       const struct pmd_internals *internal = dev->data->dev_private;
+       const struct pmd_internals *internal = _DD_PRIVATE(dev);

        imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
                internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
@@ -310,7 +310,7 @@ static void
 eth_stats_reset(struct rte_eth_dev *dev)
 {
        unsigned i;
-       struct pmd_internals *internal = dev->data->dev_private;
+       struct pmd_internals *internal = _DD_PRIVATE(dev);

        for (i = 0; i < internal->nb_queues; i++)
                internal->rx_queue[i].rx_pkts = 0;
@@ -346,7 +346,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
                    const struct rte_eth_rxconf *rx_conf __rte_unused,
                    struct rte_mempool *mb_pool)
 {
-       struct pmd_internals *internals = dev->data->dev_private;
+       struct pmd_internals *internals = _DD_PRIVATE(dev);
        struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
        struct rte_pktmbuf_pool_private *mbp_priv;
        uint16_t buf_size;
@@ -361,11 +361,11 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
        if (ETH_FRAME_LEN > buf_size) {
                RTE_LOG(ERR, PMD,
                        "%s: %d bytes will not fit in mbuf (%d bytes)\n",
-                       dev->data->name, ETH_FRAME_LEN, buf_size);
+                       _DD(dev, name), ETH_FRAME_LEN, buf_size);
                return -ENOMEM;
        }

-       dev->data->rx_queues[rx_queue_id] = pkt_q;
+       _DD(dev, rx_queues[rx_queue_id]) = pkt_q;

        return 0;
 }
@@ -378,9 +378,9 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
                    const struct rte_eth_txconf *tx_conf __rte_unused)
 {

-       struct pmd_internals *internals = dev->data->dev_private;
+       struct pmd_internals *internals = _DD_PRIVATE(dev);

-       dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+       _DD(dev, tx_queues[tx_queue_id]) = &internals->tx_queue[tx_queue_id];
        return 0;
 }

@@ -650,7 +650,7 @@ rte_pmd_init_internals(const char *name,
        }

        /* reserve an ethdev entry */
-       *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+       *eth_dev = rte_eth_dev_allocate(name, RTE_DEV_VIRTUAL);
        if (*eth_dev == NULL)
                goto error;

@@ -664,10 +664,10 @@ rte_pmd_init_internals(const char *name,

        (*internals)->nb_queues = nb_queues;

-       data->dev_private = *internals;
-       data->port_id = (*eth_dev)->data->port_id;
-       data->nb_rx_queues = (uint16_t)nb_queues;
-       data->nb_tx_queues = (uint16_t)nb_queues;
+       data->dd.dev_private = *internals;
+       data->dd.port_id = _DD((*eth_dev), port_id);
+       data->dd.nb_rx_queues = (uint16_t)nb_queues;
+       data->dd.nb_tx_queues = (uint16_t)nb_queues;
        data->dev_link = pmd_link;
        data->mac_addrs = &(*internals)->eth_addr;

diff --git a/lib/librte_pmd_bond/rte_eth_bond_8023ad.c 
b/lib/librte_pmd_bond/rte_eth_bond_8023ad.c
index 97a828e..3b6424e 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_8023ad.c
+++ b/lib/librte_pmd_bond/rte_eth_bond_8023ad.c
@@ -741,7 +741,7 @@ static void
 bond_mode_8023ad_periodic_cb(void *arg)
 {
        struct rte_eth_dev *bond_dev = arg;
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);
        struct port *port;
        struct rte_eth_link link_info;
        struct ether_addr slave_addr;
@@ -837,7 +837,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
 void
 bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);

        struct port *port = &mode_8023ad_ports[slave_id];
        struct port_params initial = {
@@ -927,7 +927,7 @@ int
 bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev,
                uint8_t slave_id)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);
        void *pkt = NULL;
        struct port *port;
        uint8_t i;
@@ -966,7 +966,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev 
*bond_dev,
 void
 bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);
        struct ether_addr slave_addr;
        struct port *slave, *agg_slave;
        uint8_t slave_id, i, j;
@@ -994,7 +994,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev 
*bond_dev)
                }
        }

-       if (bond_dev->data->dev_started)
+       if (ETH_DATA(bond_dev)->dev_started)
                bond_mode_8023ad_start(bond_dev);
 }

@@ -1002,7 +1002,7 @@ void
 bond_mode_8023ad_conf_get(struct rte_eth_dev *dev,
                struct rte_eth_bond_8023ad_conf *conf)
 {
-       struct bond_dev_private *internals = dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);
        struct mode8023ad_private *mode4 = &internals->mode4;
        uint64_t ms_ticks = rte_get_tsc_hz() / 1000;

@@ -1020,7 +1020,7 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
                struct rte_eth_bond_8023ad_conf *conf)
 {
        struct rte_eth_bond_8023ad_conf def_conf;
-       struct bond_dev_private *internals = dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);
        struct mode8023ad_private *mode4 = &internals->mode4;
        uint64_t ms_ticks = rte_get_tsc_hz() / 1000;

@@ -1049,7 +1049,7 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev,
 int
 bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);
        uint8_t i;

        for (i = 0; i < internals->active_slave_count; i++)
@@ -1196,7 +1196,7 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t 
slave_id,

        bond_dev = &rte_eth_devices[port_id];

-       internals = bond_dev->data->dev_private;
+       internals = _DD(bond_dev, dev_private);
        if (find_slave_by_id(internals->active_slaves,
                        internals->active_slave_count, slave_id) ==
                                internals->active_slave_count)
diff --git a/lib/librte_pmd_bond/rte_eth_bond_alb.c 
b/lib/librte_pmd_bond/rte_eth_bond_alb.c
index 5778b25..476231f 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_alb.c
+++ b/lib/librte_pmd_bond/rte_eth_bond_alb.c
@@ -60,7 +60,7 @@ calculate_slave(struct bond_dev_private *internals)
 int
 bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);
        struct client_data *hash_table = internals->mode6.client_table;

        uint16_t element_size;
@@ -80,7 +80,7 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
                 * The value is chosen to be cache aligned.
                 */
                element_size = 256 + sizeof(struct rte_mbuf) + 
RTE_PKTMBUF_HEADROOM;
-               snprintf(mem_name, sizeof(mem_name), "%s_MODE6", 
bond_dev->data->name);
+               snprintf(mem_name, sizeof(mem_name), "%s_MODE6", _DD(bond_dev, 
name));
                internals->mode6.mempool = rte_mempool_create(mem_name,
                                512 * RTE_MAX_ETHPORTS,
                                element_size,
@@ -91,11 +91,11 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)

                if (internals->mode6.mempool == NULL) {
                        RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB 
mempool.\n",
-                                       bond_dev->data->name);
+                                       _DD(bond_dev, name));
                        rte_panic(
                                        "Failed to allocate memory pool 
('%s')\n"
                                        "for bond device '%s'\n",
-                                       mem_name, bond_dev->data->name);
+                                       mem_name, _DD(bond_dev, name));
                }
        }

@@ -265,7 +265,7 @@ bond_mode_alb_arp_upd(struct client_data *client_info,
 void
 bond_mode_alb_client_list_upd(struct rte_eth_dev *bond_dev)
 {
-       struct bond_dev_private *internals = bond_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bond_dev, dev_private);
        struct client_data *client_info;

        int i;
diff --git a/lib/librte_pmd_bond/rte_eth_bond_api.c 
b/lib/librte_pmd_bond/rte_eth_bond_api.c
index 13f3941..07eebd7 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_api.c
+++ b/lib/librte_pmd_bond/rte_eth_bond_api.c
@@ -45,21 +45,21 @@
 #define DEFAULT_POLLING_INTERVAL_10_MS (10)

 int
-valid_bonded_ethdev(struct rte_eth_dev *eth_dev)
+valid_bonded_ethdev(struct rte_eth_dev *dev)
 {
        size_t len;

        /* Check valid pointer */
-       if (eth_dev->driver->pci_drv.name == NULL || driver_name == NULL)
+       if (dev->driver->pci_drv.name == NULL || driver_name == NULL)
                return -1;

        /* Check string lengths are equal */
        len = strlen(driver_name);
-       if (strlen(eth_dev->driver->pci_drv.name) != len)
+       if (strlen(dev->driver->pci_drv.name) != len)
                return -1;

        /* Compare strings */
-       return strncmp(eth_dev->driver->pci_drv.name, driver_name, len);
+       return strncmp(dev->driver->pci_drv.name, driver_name, len);
 }

 int
@@ -85,7 +85,7 @@ valid_bonded_port_id(uint8_t port_id)

        /* Verify that bonded_port_id refers to a bonded port */
        if (valid_bonded_ethdev(&rte_eth_devices[port_id])) {
-               RTE_BOND_LOG(ERR, "Specified port Id %d is not a bonded eth_dev 
device",
+               RTE_BOND_LOG(ERR, "Specified port Id %d is not a bonded dev 
device",
                                port_id);
                return -1;
        }
@@ -108,13 +108,13 @@ valid_slave_port_id(uint8_t port_id)
 }

 void
-activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+activate_slave(struct rte_eth_dev *dev, uint8_t port_id)
 {
-       struct bond_dev_private *internals = eth_dev->data->dev_private;
+       struct bond_dev_private *internals = ETH_DATA(dev)->dd.dev_private;
        uint8_t active_count = internals->active_slave_count;

        if (internals->mode == BONDING_MODE_8023AD)
-               bond_mode_8023ad_activate_slave(eth_dev, port_id);
+               bond_mode_8023ad_activate_slave(dev, port_id);

        if (internals->mode == BONDING_MODE_TLB
                        || internals->mode == BONDING_MODE_ALB) {
@@ -131,19 +131,19 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t 
port_id)
        if (internals->mode == BONDING_MODE_TLB)
                bond_tlb_activate_slave(internals);
        if (internals->mode == BONDING_MODE_ALB)
-               bond_mode_alb_client_list_upd(eth_dev);
+               bond_mode_alb_client_list_upd(dev);
 }

 void
-deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id)
+deactivate_slave(struct rte_eth_dev *dev, uint8_t port_id)
 {
        uint8_t slave_pos;
-       struct bond_dev_private *internals = eth_dev->data->dev_private;
+       struct bond_dev_private *internals = ETH_DATA(dev)->dd.dev_private;
        uint8_t active_count = internals->active_slave_count;

        if (internals->mode == BONDING_MODE_8023AD) {
-               bond_mode_8023ad_stop(eth_dev);
-               bond_mode_8023ad_deactivate_slave(eth_dev, port_id);
+               bond_mode_8023ad_stop(dev);
+               bond_mode_8023ad_deactivate_slave(dev, port_id);
        } else if (internals->mode == BONDING_MODE_TLB
                        || internals->mode == BONDING_MODE_ALB)
                bond_tlb_disable(internals);
@@ -164,14 +164,14 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t 
port_id)
        RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves));
        internals->active_slave_count = active_count;

-       if (eth_dev->data->dev_started) {
+       if (ETH_DATA(dev)->dev_started) {
                if (internals->mode == BONDING_MODE_8023AD) {
-                       bond_mode_8023ad_start(eth_dev);
+                       bond_mode_8023ad_start(dev);
                } else if (internals->mode == BONDING_MODE_TLB) {
                        bond_tlb_enable(internals);
                } else if (internals->mode == BONDING_MODE_ALB) {
                        bond_tlb_enable(internals);
-                       bond_mode_alb_client_list_upd(eth_dev);
+                       bond_mode_alb_client_list_upd(dev);
                }
        }
 }
@@ -199,11 +199,11 @@ rte_eth_bond_create(const char *name, uint8_t mode, 
uint8_t socket_id)
 {
        struct rte_pci_device *pci_dev = NULL;
        struct bond_dev_private *internals = NULL;
-       struct rte_eth_dev *eth_dev = NULL;
+       struct rte_eth_dev *dev = NULL;
        struct eth_driver *eth_drv = NULL;
        struct rte_pci_driver *pci_drv = NULL;
        struct rte_pci_id *pci_id_table = NULL;
-       /* now do all data allocation - for eth_dev structure, dummy pci driver
+       /* now do all data allocation - for dev structure, dummy pci driver
         * and internal (private) data
         */

@@ -254,8 +254,8 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t 
socket_id)
        }

        /* reserve an ethdev entry */
-       eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
-       if (eth_dev == NULL) {
+       dev = rte_eth_dev_allocate(name, RTE_DEV_VIRTUAL);
+       if (dev == NULL) {
                RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
                goto err;
        }
@@ -263,29 +263,29 @@ rte_eth_bond_create(const char *name, uint8_t mode, 
uint8_t socket_id)
        pci_dev->numa_node = socket_id;
        pci_drv->name = driver_name;

-       eth_dev->driver = eth_drv;
-       eth_dev->data->dev_private = internals;
-       eth_dev->data->nb_rx_queues = (uint16_t)1;
-       eth_dev->data->nb_tx_queues = (uint16_t)1;
+       dev->driver = eth_drv;
+       ETH_DATA(dev)->dd.dev_private = internals;
+       ETH_DATA(dev)->dd.nb_rx_queues = (uint16_t)1;
+       ETH_DATA(dev)->dd.nb_tx_queues = (uint16_t)1;

-       TAILQ_INIT(&(eth_dev->link_intr_cbs));
+       TAILQ_INIT(&(dev->link_intr_cbs));

-       eth_dev->data->dev_link.link_status = 0;
+       ETH_DATA(dev)->dev_link.link_status = 0;

-       eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
                        socket_id);

-       eth_dev->data->dev_started = 0;
-       eth_dev->data->promiscuous = 0;
-       eth_dev->data->scattered_rx = 0;
-       eth_dev->data->all_multicast = 0;
+       ETH_DATA(dev)->dev_started = 0;
+       ETH_DATA(dev)->promiscuous = 0;
+       ETH_DATA(dev)->scattered_rx = 0;
+       ETH_DATA(dev)->all_multicast = 0;

-       eth_dev->dev_ops = &default_dev_ops;
-       eth_dev->pci_dev = pci_dev;
+       dev->dev_ops = &default_dev_ops;
+       dev->pci_dev = pci_dev;

        rte_spinlock_init(&internals->lock);

-       internals->port_id = eth_dev->data->port_id;
+       internals->port_id = ETH_DATA(dev)->dd.port_id;
        internals->mode = BONDING_MODE_INVALID;
        internals->current_primary_port = 0;
        internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
@@ -308,14 +308,14 @@ rte_eth_bond_create(const char *name, uint8_t mode, 
uint8_t socket_id)
        memset(internals->slaves, 0, sizeof(internals->slaves));

        /* Set mode 4 default configuration */
-       bond_mode_8023ad_setup(eth_dev, NULL);
-       if (bond_ethdev_mode_set(eth_dev, mode)) {
+       bond_mode_8023ad_setup(dev, NULL);
+       if (bond_ethdev_mode_set(dev, mode)) {
                RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
-                                eth_dev->data->port_id, mode);
+                                ETH_DATA(dev)->dd.port_id, mode);
                goto err;
        }

-       return eth_dev->data->port_id;
+       return ETH_DATA(dev)->dd.port_id;

 err:
        if (pci_dev)
@@ -344,13 +344,13 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
                return -1;

        bonded_eth_dev = &rte_eth_devices[bonded_port_id];
-       internals = bonded_eth_dev->data->dev_private;
+       internals = ETH_DATA(bonded_eth_dev)->dd.dev_private;

        /* Verify that new slave device is not already a slave of another
         * bonded device */
        for (i = rte_eth_dev_count()-1; i >= 0; i--) {
                if (valid_bonded_ethdev(&rte_eth_devices[i]) == 0) {
-                       temp_internals = rte_eth_devices[i].data->dev_private;
+                       temp_internals = 
ETH_DATA(&rte_eth_devices[i])->dd.dev_private;

                        for (j = 0; j < temp_internals->slave_count; j++) {
                                /* Device already a slave of a bonded device */
@@ -375,11 +375,11 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
                /* if MAC is not user defined then use MAC of first slave add to
                 * bonded device */
                if (!internals->user_defined_mac)
-                       mac_address_set(bonded_eth_dev, 
slave_eth_dev->data->mac_addrs);
+                       mac_address_set(bonded_eth_dev, 
ETH_DATA(slave_eth_dev)->mac_addrs);

                /* Inherit eth dev link properties from first slave */
                link_properties_set(bonded_eth_dev,
-                               &(slave_eth_dev->data->dev_link));
+                               &(ETH_DATA(slave_eth_dev)->dev_link));

                /* Make primary slave */
                internals->primary_port = slave_port_id;
@@ -392,8 +392,8 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
                /* Check slave link properties are supported if props are set,
                 * all slaves must be the same */
                if (internals->link_props_set) {
-                       if 
(link_properties_valid(&(bonded_eth_dev->data->dev_link),
-                                                                         
&(slave_eth_dev->data->dev_link))) {
+                       if 
(link_properties_valid(&(ETH_DATA(bonded_eth_dev)->dev_link),
+                                                                         
&(ETH_DATA(slave_eth_dev)->dev_link))) {
                                RTE_BOND_LOG(ERR,
                                                "Slave port %d link 
speed/duplex not supported",
                                                slave_port_id);
@@ -401,7 +401,7 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
                        }
                } else {
                        link_properties_set(bonded_eth_dev,
-                                       &(slave_eth_dev->data->dev_link));
+                                       &(ETH_DATA(slave_eth_dev)->dev_link));
                }
                internals->rx_offload_capa &= dev_info.rx_offload_capa;
                internals->tx_offload_capa &= dev_info.tx_offload_capa;
@@ -412,7 +412,7 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
        /* Update all slave devices MACs*/
        mac_address_slaves_update(bonded_eth_dev);

-       if (bonded_eth_dev->data->dev_started) {
+       if (ETH_DATA(bonded_eth_dev)->dev_started) {
                if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
                        RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
                                        slave_port_id);
@@ -422,12 +422,12 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)

        /* Register link status change callback with bonded device pointer as
         * argument*/
-       rte_eth_dev_callback_register(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
-                       bond_ethdev_lsc_event_callback, 
&bonded_eth_dev->data->port_id);
+       rte_eth_dev_callback_register(slave_port_id, RTE_DEV_EVENT_INTR_LSC,
+                       bond_ethdev_lsc_event_callback, 
&ETH_DATA(bonded_eth_dev)->dd.port_id);

        /* If bonded device is started then we can add the slave to our active
         * slave array */
-       if (bonded_eth_dev->data->dev_started) {
+       if (ETH_DATA(bonded_eth_dev)->dev_started) {
                rte_eth_link_get_nowait(slave_port_id, &link_props);

                 if (link_props.link_status == 1)
@@ -450,7 +450,7 @@ rte_eth_bond_slave_add(uint8_t bonded_port_id, uint8_t 
slave_port_id)
                return -1;

        bonded_eth_dev = &rte_eth_devices[bonded_port_id];
-       internals = bonded_eth_dev->data->dev_private;
+       internals = ETH_DATA(bonded_eth_dev)->dd.dev_private;

        rte_spinlock_lock(&internals->lock);

@@ -473,7 +473,7 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
                return -1;

        bonded_eth_dev = &rte_eth_devices[bonded_port_id];
-       internals = bonded_eth_dev->data->dev_private;
+       internals = ETH_DATA(bonded_eth_dev)->dd.dev_private;

        /* first remove from active slave list */
        slave_idx = find_slave_by_id(internals->active_slaves,
@@ -498,9 +498,9 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)

        /* Un-register link status change callback with bonded device pointer as
         * argument*/
-       rte_eth_dev_callback_unregister(slave_port_id, RTE_ETH_EVENT_INTR_LSC,
+       rte_eth_dev_callback_unregister(slave_port_id, RTE_DEV_EVENT_INTR_LSC,
                        bond_ethdev_lsc_event_callback,
-                       &rte_eth_devices[bonded_port_id].data->port_id);
+                       
&(ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.port_id));

        /* Restore original MAC address of slave device */
        mac_address_set(&rte_eth_devices[slave_port_id],
@@ -527,8 +527,8 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, 
uint8_t slave_port_id)
                 * user defined then clear MAC of bonded device as it will be 
reset
                 * when a new slave is added */
                if (internals->slave_count < 1 && !internals->user_defined_mac)
-                       memset(rte_eth_devices[bonded_port_id].data->mac_addrs, 
0,
-                                       
sizeof(*(rte_eth_devices[bonded_port_id].data->mac_addrs)));
+                       
memset(ETH_DATA(&rte_eth_devices[bonded_port_id])->mac_addrs, 0,
+                                       
sizeof(*(ETH_DATA(&rte_eth_devices[bonded_port_id])->mac_addrs)));
        }
        if (internals->slave_count == 0) {
                internals->rx_offload_capa = 0;
@@ -548,7 +548,7 @@ rte_eth_bond_slave_remove(uint8_t bonded_port_id, uint8_t 
slave_port_id)
                return -1;

        bonded_eth_dev = &rte_eth_devices[bonded_port_id];
-       internals = bonded_eth_dev->data->dev_private;
+       internals = ETH_DATA(bonded_eth_dev)->dd.dev_private;

        rte_spinlock_lock(&internals->lock);

@@ -576,7 +576,7 @@ rte_eth_bond_mode_get(uint8_t bonded_port_id)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        return internals->mode;
 }
@@ -592,7 +592,7 @@ rte_eth_bond_primary_set(uint8_t bonded_port_id, uint8_t 
slave_port_id)
        if (valid_slave_port_id(slave_port_id) != 0)
                return -1;

-       internals =  rte_eth_devices[bonded_port_id].data->dev_private;
+       internals =  ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        internals->user_defined_primary_port = 1;
        internals->primary_port = slave_port_id;
@@ -610,7 +610,7 @@ rte_eth_bond_primary_get(uint8_t bonded_port_id)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        if (internals->slave_count < 1)
                return -1;
@@ -630,7 +630,7 @@ rte_eth_bond_slaves_get(uint8_t bonded_port_id, uint8_t 
slaves[], uint8_t len)
        if (slaves == NULL)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        if (internals->slave_count > len)
                return -1;
@@ -653,7 +653,7 @@ rte_eth_bond_active_slaves_get(uint8_t bonded_port_id, 
uint8_t slaves[],
        if (slaves == NULL)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        if (internals->active_slave_count > len)
                return -1;
@@ -674,7 +674,7 @@ rte_eth_bond_mac_address_set(uint8_t bonded_port_id,
                return -1;

        bonded_eth_dev = &rte_eth_devices[bonded_port_id];
-       internals = bonded_eth_dev->data->dev_private;
+       internals = ETH_DATA(bonded_eth_dev)->dd.dev_private;

        /* Set MAC Address of Bonded Device */
        if (mac_address_set(bonded_eth_dev, mac_addr))
@@ -699,7 +699,7 @@ rte_eth_bond_mac_address_reset(uint8_t bonded_port_id)
                return -1;

        bonded_eth_dev = &rte_eth_devices[bonded_port_id];
-       internals = bonded_eth_dev->data->dev_private;
+       internals = ETH_DATA(bonded_eth_dev)->dd.dev_private;

        internals->user_defined_mac = 0;

@@ -726,7 +726,7 @@ rte_eth_bond_xmit_policy_set(uint8_t bonded_port_id, 
uint8_t policy)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        switch (policy) {
        case BALANCE_XMIT_POLICY_LAYER2:
@@ -756,7 +756,7 @@ rte_eth_bond_xmit_policy_get(uint8_t bonded_port_id)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        return internals->balance_xmit_policy;
 }
@@ -769,7 +769,7 @@ rte_eth_bond_link_monitoring_set(uint8_t bonded_port_id, 
uint32_t internal_ms)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;
        internals->link_status_polling_interval_ms = internal_ms;

        return 0;
@@ -783,7 +783,7 @@ rte_eth_bond_link_monitoring_get(uint8_t bonded_port_id)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        return internals->link_status_polling_interval_ms;
 }
@@ -797,7 +797,7 @@ rte_eth_bond_link_down_prop_delay_set(uint8_t 
bonded_port_id, uint32_t delay_ms)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;
        internals->link_down_delay_ms = delay_ms;

        return 0;
@@ -811,7 +811,7 @@ rte_eth_bond_link_down_prop_delay_get(uint8_t 
bonded_port_id)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        return internals->link_down_delay_ms;
 }
@@ -825,7 +825,7 @@ rte_eth_bond_link_up_prop_delay_set(uint8_t bonded_port_id, 
uint32_t delay_ms)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;
        internals->link_up_delay_ms = delay_ms;

        return 0;
@@ -839,7 +839,7 @@ rte_eth_bond_link_up_prop_delay_get(uint8_t bonded_port_id)
        if (valid_bonded_port_id(bonded_port_id) != 0)
                return -1;

-       internals = rte_eth_devices[bonded_port_id].data->dev_private;
+       internals = ETH_DATA(&rte_eth_devices[bonded_port_id])->dd.dev_private;

        return internals->link_up_delay_ms;
 }
diff --git a/lib/librte_pmd_bond/rte_eth_bond_args.c 
b/lib/librte_pmd_bond/rte_eth_bond_args.c
index 02ecde6..5fbb709 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_args.c
+++ b/lib/librte_pmd_bond/rte_eth_bond_args.c
@@ -82,7 +82,7 @@ find_port_id_by_dev_name(const char *name)
                if (rte_eth_devices[i].data == NULL)
                        continue;

-               if (strcmp(rte_eth_devices[i].data->name, name) == 0)
+               if (strcmp(_DD(&rte_eth_devices[i], name), name) == 0)
                        return i;
        }
        return -1;
diff --git a/lib/librte_pmd_bond/rte_eth_bond_pmd.c 
b/lib/librte_pmd_bond/rte_eth_bond_pmd.c
index c937e6b..75ecacf 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_pmd.c
+++ b/lib/librte_pmd_bond/rte_eth_bond_pmd.c
@@ -699,7 +699,7 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf 
**bufs, uint16_t nb_pkts)
                                sizeof(internals->tlb_slaves_order[0]) * 
num_of_slaves);


-       ether_addr_copy(primary_port->data->mac_addrs, &primary_slave_addr);
+       ether_addr_copy(ETH_DATA(primary_port)->mac_addrs, &primary_slave_addr);

        if (nb_pkts > 3) {
                for (i = 0; i < 3; i++)
@@ -1102,11 +1102,11 @@ void
 link_properties_set(struct rte_eth_dev *bonded_eth_dev,
                struct rte_eth_link *slave_dev_link)
 {
-       struct rte_eth_link *bonded_dev_link = &bonded_eth_dev->data->dev_link;
-       struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+       struct rte_eth_link *bonded_dev_link = 
&ETH_DATA(bonded_eth_dev)->dev_link;
+       struct bond_dev_private *internals = 
ETH_DATA(bonded_eth_dev)->dd.dev_private;

        if (slave_dev_link->link_status &&
-               bonded_eth_dev->data->dev_started) {
+               ETH_DATA(bonded_eth_dev)->dev_started) {
                bonded_dev_link->link_duplex = slave_dev_link->link_duplex;
                bonded_dev_link->link_speed = slave_dev_link->link_speed;

@@ -1117,10 +1117,10 @@ link_properties_set(struct rte_eth_dev *bonded_eth_dev,
 void
 link_properties_reset(struct rte_eth_dev *bonded_eth_dev)
 {
-       struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+       struct bond_dev_private *internals = 
ETH_DATA(bonded_eth_dev)->dd.dev_private;

-       memset(&(bonded_eth_dev->data->dev_link), 0,
-                       sizeof(bonded_eth_dev->data->dev_link));
+       memset(&(ETH_DATA(bonded_eth_dev)->dev_link), 0,
+                       sizeof(ETH_DATA(bonded_eth_dev)->dev_link));

        internals->link_props_set = 0;
 }
@@ -1151,7 +1151,7 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct 
ether_addr *dst_mac_addr)
                return -1;
        }

-       mac_addr = eth_dev->data->mac_addrs;
+       mac_addr = ETH_DATA(eth_dev)->mac_addrs;

        ether_addr_copy(mac_addr, dst_mac_addr);
        return 0;
@@ -1172,7 +1172,7 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct 
ether_addr *new_mac_addr)
                return -1;
        }

-       mac_addr = eth_dev->data->mac_addrs;
+       mac_addr = ETH_DATA(eth_dev)->mac_addrs;

        /* If new MAC is different to current MAC then update */
        if (memcmp(mac_addr, new_mac_addr, sizeof(*mac_addr)) != 0)
@@ -1184,7 +1184,7 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct 
ether_addr *new_mac_addr)
 int
 mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
 {
-       struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+       struct bond_dev_private *internals = 
ETH_DATA(bonded_eth_dev)->dd.dev_private;
        int i;

        /* Update slave devices MAC addresses */
@@ -1197,7 +1197,7 @@ mac_address_slaves_update(struct rte_eth_dev 
*bonded_eth_dev)
        case BONDING_MODE_BROADCAST:
                for (i = 0; i < internals->slave_count; i++) {
                        if 
(mac_address_set(&rte_eth_devices[internals->slaves[i].port_id],
-                                       bonded_eth_dev->data->mac_addrs)) {
+                                       ETH_DATA(bonded_eth_dev)->mac_addrs)) {
                                RTE_BOND_LOG(ERR, "Failed to update port Id %d 
MAC address",
                                                internals->slaves[i].port_id);
                                return -1;
@@ -1215,7 +1215,7 @@ mac_address_slaves_update(struct rte_eth_dev 
*bonded_eth_dev)
                        if (internals->slaves[i].port_id ==
                                        internals->current_primary_port) {
                                if 
(mac_address_set(&rte_eth_devices[internals->primary_port],
-                                               
bonded_eth_dev->data->mac_addrs)) {
+                                               
ETH_DATA(bonded_eth_dev)->mac_addrs)) {
                                        RTE_BOND_LOG(ERR, "Failed to update 
port Id %d MAC address",
                                                        
internals->current_primary_port);
                                        return -1;
@@ -1240,7 +1240,7 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int 
mode)
 {
        struct bond_dev_private *internals;

-       internals = eth_dev->data->dev_private;
+       internals = ETH_DATA(eth_dev)->dd.dev_private;

        switch (mode) {
        case BONDING_MODE_ROUND_ROBIN:
@@ -1300,60 +1300,60 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
        uint16_t q_id;

        /* Stop slave */
-       rte_eth_dev_stop(slave_eth_dev->data->port_id);
+       rte_eth_dev_stop(ETH_DATA(slave_eth_dev)->dd.port_id);

        /* Enable interrupts on slave device if supported */
        if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
-               slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
+               ETH_DATA(slave_eth_dev)->dev_conf.intr_conf.lsc = 1;

        /* Configure device */
-       errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
-                       bonded_eth_dev->data->nb_rx_queues,
-                       bonded_eth_dev->data->nb_tx_queues,
-                       &(slave_eth_dev->data->dev_conf));
+       errval = rte_eth_dev_configure(ETH_DATA(slave_eth_dev)->dd.port_id,
+                       ETH_DATA(bonded_eth_dev)->dd.nb_rx_queues,
+                       ETH_DATA(bonded_eth_dev)->dd.nb_tx_queues,
+                       &(ETH_DATA(slave_eth_dev)->dev_conf));
        if (errval != 0) {
                RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err 
(%d)",
-                               slave_eth_dev->data->port_id, errval);
+                               ETH_DATA(slave_eth_dev)->dd.port_id, errval);
                return errval;
        }

        /* Setup Rx Queues */
-       for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
-               bd_rx_q = (struct bond_rx_queue 
*)bonded_eth_dev->data->rx_queues[q_id];
+       for (q_id = 0; q_id < ETH_DATA(bonded_eth_dev)->dd.nb_rx_queues; 
q_id++) {
+               bd_rx_q = (struct bond_rx_queue 
*)ETH_DATA(bonded_eth_dev)->dd.rx_queues[q_id];

-               errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, 
q_id,
+               errval = 
rte_eth_rx_queue_setup(ETH_DATA(slave_eth_dev)->dd.port_id, q_id,
                                bd_rx_q->nb_rx_desc,
-                               
rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+                               
rte_eth_dev_socket_id(ETH_DATA(slave_eth_dev)->dd.port_id),
                                &(bd_rx_q->rx_conf), bd_rx_q->mb_pool);
                if (errval != 0) {
                        RTE_BOND_LOG(ERR,
                                        "rte_eth_rx_queue_setup: port=%d 
queue_id %d, err (%d)",
-                                       slave_eth_dev->data->port_id, q_id, 
errval);
+                                       ETH_DATA(slave_eth_dev)->dd.port_id, 
q_id, errval);
                        return errval;
                }
        }

        /* Setup Tx Queues */
-       for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
-               bd_tx_q = (struct bond_tx_queue 
*)bonded_eth_dev->data->tx_queues[q_id];
+       for (q_id = 0; q_id < ETH_DATA(bonded_eth_dev)->dd.nb_tx_queues; 
q_id++) {
+               bd_tx_q = (struct bond_tx_queue 
*)ETH_DATA(bonded_eth_dev)->dd.tx_queues[q_id];

-               errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, 
q_id,
+               errval = 
rte_eth_tx_queue_setup(ETH_DATA(slave_eth_dev)->dd.port_id, q_id,
                                bd_tx_q->nb_tx_desc,
-                               
rte_eth_dev_socket_id(slave_eth_dev->data->port_id),
+                               
rte_eth_dev_socket_id(ETH_DATA(slave_eth_dev)->dd.port_id),
                                &bd_tx_q->tx_conf);
                if (errval != 0) {
                        RTE_BOND_LOG(ERR,
                                        "rte_eth_tx_queue_setup: port=%d 
queue_id %d, err (%d)",
-                                       slave_eth_dev->data->port_id, q_id, 
errval);
+                                       ETH_DATA(slave_eth_dev)->dd.port_id, 
q_id, errval);
                        return errval;
                }
        }

        /* Start device */
-       errval = rte_eth_dev_start(slave_eth_dev->data->port_id);
+       errval = rte_eth_dev_start(ETH_DATA(slave_eth_dev)->dd.port_id);
        if (errval != 0) {
                RTE_BOND_LOG(ERR, "rte_eth_dev_start: port=%u, err (%d)",
-                               slave_eth_dev->data->port_id, errval);
+                               ETH_DATA(slave_eth_dev)->dd.port_id, errval);
                return -1;
        }

@@ -1368,7 +1368,7 @@ slave_remove(struct bond_dev_private *internals,

        for (i = 0; i < internals->slave_count; i++)
                if (internals->slaves[i].port_id ==
-                               slave_eth_dev->data->port_id)
+                               ETH_DATA(slave_eth_dev)->dd.port_id)
                        break;

        if (i < (internals->slave_count - 1))
@@ -1389,7 +1389,7 @@ slave_add(struct bond_dev_private *internals,
        struct bond_slave_details *slave_details =
                        &internals->slaves[internals->slave_count];

-       slave_details->port_id = slave_eth_dev->data->port_id;
+       slave_details->port_id = _DD(slave_eth_dev, port_id);
        slave_details->last_link_status = 0;

        /* If slave device doesn't support interrupts then we need to enabled
@@ -1408,7 +1408,7 @@ slave_add(struct bond_dev_private *internals,

        slave_details->link_status_wait_to_complete = 0;
        /* clean tlb_last_obytes when adding port for bonding device */
-       memcpy(&(slave_details->persisted_mac_addr), 
slave_eth_dev->data->mac_addrs,
+       memcpy(&(slave_details->persisted_mac_addr), 
ETH_DATA(slave_eth_dev)->mac_addrs,
                        sizeof(struct ether_addr));
 }

@@ -1440,14 +1440,14 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
        /* slave eth dev will be started by bonded device */
        if (valid_bonded_ethdev(eth_dev)) {
                RTE_BOND_LOG(ERR, "User tried to explicitly start a slave 
eth_dev (%d)",
-                               eth_dev->data->port_id);
+                               _DD(eth_dev, port_id));
                return -1;
        }

-       eth_dev->data->dev_link.link_status = 0;
-       eth_dev->data->dev_started = 1;
+       ETH_DATA(eth_dev)->dev_link.link_status = 0;
+       ETH_DATA(eth_dev)->dev_started = 1;

-       internals = eth_dev->data->dev_private;
+       internals = _DD_PRIVATE(eth_dev);

        if (internals->slave_count == 0) {
                RTE_BOND_LOG(ERR, "Cannot start port since there are no slave 
devices");
@@ -1466,7 +1466,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)

                if (mac_address_set(eth_dev, new_mac_addr) != 0) {
                        RTE_BOND_LOG(ERR, "bonded port (%d) failed to update 
MAC address",
-                                       eth_dev->data->port_id);
+                                       _DD(eth_dev, port_id));
                        return -1;
                }
        }
@@ -1485,7 +1485,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
                                
&(rte_eth_devices[internals->slaves[i].port_id])) != 0) {
                        RTE_BOND_LOG(ERR,
                                        "bonded port (%d) failed to reconfigure 
slave device (%d)",
-                                       eth_dev->data->port_id, 
internals->slaves[i].port_id);
+                                       _DD(eth_dev, port_id), 
internals->slaves[i].port_id);
                        return -1;
                }
        }
@@ -1506,7 +1506,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
 static void
 bond_ethdev_stop(struct rte_eth_dev *eth_dev)
 {
-       struct bond_dev_private *internals = eth_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(eth_dev);
        uint8_t i;

        if (internals->mode == BONDING_MODE_8023AD) {
@@ -1539,8 +1539,8 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
        internals->active_slave_count = 0;
        internals->link_status_polling_enabled = 0;

-       eth_dev->data->dev_link.link_status = 0;
-       eth_dev->data->dev_started = 0;
+       ETH_DATA(eth_dev)->dev_link.link_status = 0;
+       ETH_DATA(eth_dev)->dev_started = 0;
 }

 static void
@@ -1554,9 +1554,9 @@ static int bond_ethdev_configure(struct rte_eth_dev *dev);
 static void
 bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct bond_dev_private *internals = dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);

-       dev_info->driver_name = driver_name;
+       dev_info->di.driver_name = driver_name;
        dev_info->max_mac_addrs = 1;

        dev_info->max_rx_pktlen = (uint32_t)2048;
@@ -1565,7 +1565,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
        dev_info->max_tx_queues = (uint16_t)512;

        dev_info->min_rx_bufsize = 0;
-       dev_info->pci_dev = dev->pci_dev;
+       dev_info->di.pci_dev = dev->pci_dev;

        dev_info->rx_offload_capa = internals->rx_offload_capa;
        dev_info->tx_offload_capa = internals->tx_offload_capa;
@@ -1583,14 +1583,14 @@ bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t rx_queue_id,
                return -1;

        bd_rx_q->queue_id = rx_queue_id;
-       bd_rx_q->dev_private = dev->data->dev_private;
+       bd_rx_q->dev_private = _DD_PRIVATE(dev);

        bd_rx_q->nb_rx_desc = nb_rx_desc;

        memcpy(&(bd_rx_q->rx_conf), rx_conf, sizeof(struct rte_eth_rxconf));
        bd_rx_q->mb_pool = mb_pool;

-       dev->data->rx_queues[rx_queue_id] = bd_rx_q;
+       _DD(dev, rx_queues[rx_queue_id]) = bd_rx_q;

        return 0;
 }
@@ -1608,12 +1608,12 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
                return -1;

        bd_tx_q->queue_id = tx_queue_id;
-       bd_tx_q->dev_private = dev->data->dev_private;
+       bd_tx_q->dev_private = _DD_PRIVATE(dev);

        bd_tx_q->nb_tx_desc = nb_tx_desc;
        memcpy(&(bd_tx_q->tx_conf), tx_conf, sizeof(bd_tx_q->tx_conf));

-       dev->data->tx_queues[tx_queue_id] = bd_tx_q;
+       _DD(dev, tx_queues[tx_queue_id]) = bd_tx_q;

        return 0;
 }
@@ -1650,9 +1650,9 @@ bond_ethdev_slave_link_status_change_monitor(void *cb_arg)
                return;

        bonded_ethdev = (struct rte_eth_dev *)cb_arg;
-       internals = (struct bond_dev_private *)bonded_ethdev->data->dev_private;
+       internals = (struct bond_dev_private *)_DD(bonded_ethdev, dev_private);

-       if (!bonded_ethdev->data->dev_started ||
+       if (!ETH_DATA(bonded_ethdev)->dev_started ||
                !internals->link_status_polling_enabled)
                return;

@@ -1670,19 +1670,19 @@ bond_ethdev_slave_link_status_change_monitor(void 
*cb_arg)
                        polling_slave_found = 1;

                        /* Update slave link status */
-                       (*slave_ethdev->dev_ops->link_update)(slave_ethdev,
+                       ETH_OPS(slave_ethdev, link_update)(slave_ethdev,
                                        
internals->slaves[i].link_status_wait_to_complete);

                        /* if link status has changed since last checked then 
call lsc
                         * event callback */
-                       if (slave_ethdev->data->dev_link.link_status !=
+                       if (ETH_DATA(slave_ethdev)->dev_link.link_status !=
                                        internals->slaves[i].last_link_status) {
                                internals->slaves[i].last_link_status =
-                                               
slave_ethdev->data->dev_link.link_status;
+                                               
ETH_DATA(slave_ethdev)->dev_link.link_status;

                                
bond_ethdev_lsc_event_callback(internals->slaves[i].port_id,
-                                               RTE_ETH_EVENT_INTR_LSC,
-                                               &bonded_ethdev->data->port_id);
+                                               RTE_DEV_EVENT_INTR_LSC,
+                                               &_DD(bonded_ethdev, port_id));
                        }
                }
                rte_spinlock_unlock(&internals->lock);
@@ -1698,11 +1698,11 @@ static int
 bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev,
                int wait_to_complete)
 {
-       struct bond_dev_private *internals = bonded_eth_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD(bonded_eth_dev, dev_private);

-       if (!bonded_eth_dev->data->dev_started ||
+       if (!ETH_DATA(bonded_eth_dev)->dev_started ||
                internals->active_slave_count == 0) {
-               bonded_eth_dev->data->dev_link.link_status = 0;
+               ETH_DATA(bonded_eth_dev)->dev_link.link_status = 0;
                return 0;
        } else {
                struct rte_eth_dev *slave_eth_dev;
@@ -1711,15 +1711,15 @@ bond_ethdev_link_update(struct rte_eth_dev 
*bonded_eth_dev,
                for (i = 0; i < internals->active_slave_count; i++) {
                        slave_eth_dev = 
&rte_eth_devices[internals->active_slaves[i]];

-                       (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev,
+                       ETH_OPS(slave_eth_dev, link_update)(slave_eth_dev,
                                        wait_to_complete);
-                       if (slave_eth_dev->data->dev_link.link_status == 1) {
+                       if (ETH_DATA(slave_eth_dev)->dev_link.link_status == 1) 
{
                                link_up = 1;
                                break;
                        }
                }

-               bonded_eth_dev->data->dev_link.link_status = link_up;
+               ETH_DATA(bonded_eth_dev)->dev_link.link_status = link_up;
        }

        return 0;
@@ -1728,7 +1728,7 @@ bond_ethdev_link_update(struct rte_eth_dev 
*bonded_eth_dev,
 static void
 bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       struct bond_dev_private *internals = dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);
        struct rte_eth_stats slave_stats;

        int i;
@@ -1759,7 +1759,7 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static void
 bond_ethdev_stats_reset(struct rte_eth_dev *dev)
 {
-       struct bond_dev_private *internals = dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);
        int i;

        for (i = 0; i < internals->slave_count; i++)
@@ -1769,7 +1769,7 @@ bond_ethdev_stats_reset(struct rte_eth_dev *dev)
 static void
 bond_ethdev_promiscuous_enable(struct rte_eth_dev *eth_dev)
 {
-       struct bond_dev_private *internals = eth_dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(eth_dev);
        int i;

        internals->promiscuous_en = 1;
@@ -1797,7 +1797,7 @@ bond_ethdev_promiscuous_enable(struct rte_eth_dev 
*eth_dev)
 static void
 bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct bond_dev_private *internals = dev->data->dev_private;
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);
        int i;

        internals->promiscuous_en = 0;
@@ -1829,11 +1829,11 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
                return;

        _rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
-                       RTE_ETH_EVENT_INTR_LSC);
+                       RTE_DEV_EVENT_INTR_LSC);
 }

 void
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_dev_event_type type,
                void *param)
 {
        struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
@@ -1844,7 +1844,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,
        uint8_t active_pos;
        uint8_t lsc_flag = 0;

-       if (type != RTE_ETH_EVENT_INTR_LSC || param == NULL)
+       if (type != RTE_DEV_EVENT_INTR_LSC || param == NULL)
                return;

        bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
@@ -1853,10 +1853,10 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,
        if (valid_bonded_ethdev(bonded_eth_dev))
                return;

-       internals = bonded_eth_dev->data->dev_private;
+       internals = _DD(bonded_eth_dev, dev_private);

        /* If the device isn't started don't handle interrupts */
-       if (!bonded_eth_dev->data->dev_started)
+       if (!ETH_DATA(bonded_eth_dev)->dev_started)
                return;

        /* verify that port_id is a valid slave of bonded port */
@@ -1882,7 +1882,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,
                /* if no active slave ports then set this port to be primary 
port */
                if (internals->active_slave_count < 1) {
                        /* If first active slave, then change link status */
-                       bonded_eth_dev->data->dev_link.link_status = 1;
+                       ETH_DATA(bonded_eth_dev)->dev_link.link_status = 1;
                        internals->current_primary_port = port_id;
                        lsc_flag = 1;

@@ -1890,7 +1890,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,

                        /* Inherit eth dev link properties from first active 
slave */
                        link_properties_set(bonded_eth_dev,
-                                       &(slave_eth_dev->data->dev_link));
+                                       &(ETH_DATA(slave_eth_dev)->dev_link));
                }

                activate_slave(bonded_eth_dev, port_id);
@@ -1910,7 +1910,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,
                 * link properties */
                if (internals->active_slave_count < 1) {
                        lsc_flag = 1;
-                       bonded_eth_dev->data->dev_link.link_status = 0;
+                       ETH_DATA(bonded_eth_dev)->dev_link.link_status = 0;

                        link_properties_reset(bonded_eth_dev);
                }
@@ -1933,14 +1933,14 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,
                        
rte_eal_alarm_cancel(bond_ethdev_delayed_lsc_propagation,
                                        bonded_eth_dev);

-               if (bonded_eth_dev->data->dev_link.link_status) {
+               if (ETH_DATA(bonded_eth_dev)->dev_link.link_status) {
                        if (internals->link_up_delay_ms > 0)
                                rte_eal_alarm_set(internals->link_up_delay_ms * 
1000,
                                                
bond_ethdev_delayed_lsc_propagation,
                                                (void *)bonded_eth_dev);
                        else
                                _rte_eth_dev_callback_process(bonded_eth_dev,
-                                               RTE_ETH_EVENT_INTR_LSC);
+                                               RTE_DEV_EVENT_INTR_LSC);

                } else {
                        if (internals->link_down_delay_ms > 0)
@@ -1949,7 +1949,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum 
rte_eth_event_type type,
                                                (void *)bonded_eth_dev);
                        else
                                _rte_eth_dev_callback_process(bonded_eth_dev,
-                                               RTE_ETH_EVENT_INTR_LSC);
+                                               RTE_DEV_EVENT_INTR_LSC);
                }
        }
 }
@@ -2025,7 +2025,7 @@ bond_init(const char *name, const char *params)
                                "socket %u.\n", name, bonding_mode, socket_id);
                goto parse_error;
        }
-       internals = rte_eth_devices[port_id].data->dev_private;
+       internals = _DD(&rte_eth_devices[port_id], dev_private);
        internals->kvlist = kvlist;

        RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
@@ -2043,8 +2043,8 @@ parse_error:
 static int
 bond_ethdev_configure(struct rte_eth_dev *dev)
 {
-       char *name = dev->data->name;
-       struct bond_dev_private *internals = dev->data->dev_private;
+       char *name = _DD(dev, name);
+       struct bond_dev_private *internals = _DD_PRIVATE(dev);
        struct rte_kvargs *kvlist = internals->kvlist;
        int arg_count;
        uint8_t port_id = dev - rte_eth_devices;
diff --git a/lib/librte_pmd_bond/rte_eth_bond_private.h 
b/lib/librte_pmd_bond/rte_eth_bond_private.h
index 45e5c65..f285518 100644
--- a/lib/librte_pmd_bond/rte_eth_bond_private.h
+++ b/lib/librte_pmd_bond/rte_eth_bond_private.h
@@ -244,7 +244,7 @@ bond_ethdev_primary_set(struct bond_dev_private *internals,
                uint8_t slave_port_id);

 void
-bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
+bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_dev_event_type type,
                void *param);

 int
diff --git a/lib/librte_pmd_e1000/em_ethdev.c b/lib/librte_pmd_e1000/em_ethdev.c
index 76f45c9..afa81b8 100644
--- a/lib/librte_pmd_e1000/em_ethdev.c
+++ b/lib/librte_pmd_e1000/em_ethdev.c
@@ -180,7 +180,7 @@ rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
        struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
+       struct rte_eth_link *src = &(ETH_DATA(dev)->dev_link);

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
                                        *(uint64_t *)src) == 0)
@@ -205,7 +205,7 @@ static inline int
 rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -216,26 +216,26 @@ rte_em_dev_atomic_write_link_status(struct rte_eth_dev 
*dev,
 }

 static int
-eth_em_dev_init(struct rte_eth_dev *eth_dev)
+eth_em_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);

-       pci_dev = eth_dev->pci_dev;
-       eth_dev->dev_ops = &eth_em_ops;
-       eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
-       eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
+       pci_dev = dev->pci_dev;
+       dev->dev_ops = &eth_em_ops;
+       dev->rx_pkt_burst = (dev_rx_burst_t)&eth_em_recv_pkts;
+       dev->tx_pkt_burst = (dev_tx_burst_t)&eth_em_xmit_pkts;

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst =
-                               (eth_rx_burst_t)&eth_em_recv_scattered_pkts;
+               if (ETH_DATA(dev)->scattered_rx)
+                       dev->rx_pkt_burst =
+                               (dev_rx_burst_t)&eth_em_recv_scattered_pkts;
                return 0;
        }

@@ -248,15 +248,15 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
                        em_hw_init(hw) != 0) {
                PMD_INIT_LOG(ERR, "port_id %d vendorID=0x%x deviceID=0x%x: "
                        "failed to init HW",
-                       eth_dev->data->port_id, pci_dev->id.vendor_id,
+                       ETH_DATA(dev)->dd.port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);
                return -(ENODEV);
        }

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("e1000", ETHER_ADDR_LEN *
                        hw->mac.rar_entry_count, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
                        "store MAC addresses",
                        ETHER_ADDR_LEN * hw->mac.rar_entry_count);
@@ -265,17 +265,17 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)

        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.addr,
-               eth_dev->data->mac_addrs);
+               ETH_DATA(dev)->mac_addrs);

        /* initialize the vfta */
        memset(shadow_vfta, 0, sizeof(*shadow_vfta));

        PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
-                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    ETH_DATA(dev)->dd.port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id);

        rte_intr_callback_register(&(pci_dev->intr_handle),
-               eth_em_interrupt_handler, (void *)eth_dev);
+               eth_em_interrupt_handler, (void *)dev);

        return (0);
 }
@@ -286,7 +286,7 @@ static struct eth_driver rte_em_pmd = {
                .id_table = pci_id_em_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
-       .eth_dev_init = eth_em_dev_init,
+       .dev_init = eth_em_dev_init,
        .dev_private_size = sizeof(struct e1000_adapter),
 };

@@ -386,7 +386,7 @@ static int
 eth_em_configure(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();
        intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
@@ -447,7 +447,7 @@ static int
 eth_em_start(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret, mask;

        PMD_INIT_FUNC_TRACE();
@@ -503,47 +503,47 @@ eth_em_start(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_ITR, UINT16_MAX);

        /* Setup link speed and duplex */
-       switch (dev->data->dev_conf.link_speed) {
+       switch (ETH_DATA(dev)->dev_conf.link_speed) {
        case ETH_LINK_SPEED_AUTONEG:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+               if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex ==
+               else if (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_HALF_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex ==
+               else if (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_FULL_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
                else
                        goto error_invalid_config;
                break;
        case ETH_LINK_SPEED_10:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+               if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
-               else if (dev->data->dev_conf.link_duplex ==
+               else if (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_HALF_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
-               else if (dev->data->dev_conf.link_duplex ==
+               else if (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_FULL_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
                else
                        goto error_invalid_config;
                break;
        case ETH_LINK_SPEED_100:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+               if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
-               else if (dev->data->dev_conf.link_duplex ==
+               else if (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_HALF_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
-               else if (dev->data->dev_conf.link_duplex ==
+               else if (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_FULL_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
                else
                        goto error_invalid_config;
                break;
        case ETH_LINK_SPEED_1000:
-               if ((dev->data->dev_conf.link_duplex ==
+               if ((ETH_DATA(dev)->dev_conf.link_duplex ==
                                ETH_LINK_AUTONEG_DUPLEX) ||
-                       (dev->data->dev_conf.link_duplex ==
+                       (ETH_DATA(dev)->dev_conf.link_duplex ==
                                        ETH_LINK_FULL_DUPLEX))
                        hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
                else
@@ -556,7 +556,7 @@ eth_em_start(struct rte_eth_dev *dev)
        e1000_setup_link(hw);

        /* check if lsc interrupt feature is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0) {
+       if (ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0) {
                ret = eth_em_interrupt_setup(dev);
                if (ret) {
                        PMD_INIT_LOG(ERR, "Unable to setup interrupts");
@@ -571,8 +571,8 @@ eth_em_start(struct rte_eth_dev *dev)

 error_invalid_config:
        PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
-                    dev->data->dev_conf.link_speed,
-                    dev->data->dev_conf.link_duplex, dev->data->port_id);
+                    ETH_DATA(dev)->dev_conf.link_speed,
+                    ETH_DATA(dev)->dev_conf.link_duplex, 
ETH_DATA(dev)->dd.port_id);
        em_dev_clear_queues(dev);
        return (-EINVAL);
 }
@@ -587,7 +587,7 @@ static void
 eth_em_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        em_intr_disable(hw);
        e1000_reset_hw(hw);
@@ -607,7 +607,7 @@ eth_em_stop(struct rte_eth_dev *dev)
 static void
 eth_em_close(struct rte_eth_dev *dev)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        eth_em_stop(dev);
        e1000_phy_hw_reset(hw);
@@ -696,9 +696,9 @@ em_hardware_init(struct e1000_hw *hw)
 static void
 eth_em_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_hw_stats *stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
E1000_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);
        int pause_frames;

        if(hw->phy.media_type == e1000_media_type_copper ||
@@ -823,7 +823,7 @@ static void
 eth_em_stats_reset(struct rte_eth_dev *dev)
 {
        struct e1000_hw_stats *hw_stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
E1000_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* HW registers are cleared on read */
        eth_em_stats_get(dev, NULL);
@@ -858,7 +858,7 @@ em_get_max_pktlen(const struct e1000_hw *hw)
 static void
 eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
        dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
@@ -890,7 +890,7 @@ static int
 eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_link link, old;
        int link_check, count;

@@ -1042,7 +1042,7 @@ static void
 eth_em_promiscuous_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

        rctl = E1000_READ_REG(hw, E1000_RCTL);
@@ -1054,12 +1054,12 @@ static void
 eth_em_promiscuous_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

        rctl = E1000_READ_REG(hw, E1000_RCTL);
        rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP);
-       if (dev->data->all_multicast == 1)
+       if (ETH_DATA(dev)->all_multicast == 1)
                rctl |= E1000_RCTL_MPE;
        else
                rctl &= (~E1000_RCTL_MPE);
@@ -1070,7 +1070,7 @@ static void
 eth_em_allmulticast_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

        rctl = E1000_READ_REG(hw, E1000_RCTL);
@@ -1082,10 +1082,10 @@ static void
 eth_em_allmulticast_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

-       if (dev->data->promiscuous == 1)
+       if (ETH_DATA(dev)->promiscuous == 1)
                return; /* must remain in all_multicast mode */
        rctl = E1000_READ_REG(hw, E1000_RCTL);
        rctl &= (~E1000_RCTL_MPE);
@@ -1096,9 +1096,9 @@ static int
 eth_em_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t vfta;
        uint32_t vid_idx;
        uint32_t vid_bit;
@@ -1123,7 +1123,7 @@ static void
 em_vlan_hw_filter_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* Filter Table Disable */
@@ -1137,9 +1137,9 @@ static void
 em_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;
        int i;

@@ -1158,7 +1158,7 @@ static void
 em_vlan_hw_strip_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* VLAN Mode Disable */
@@ -1172,7 +1172,7 @@ static void
 em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* VLAN Mode Enable */
@@ -1185,14 +1185,14 @@ static void
 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        if(mask & ETH_VLAN_STRIP_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                        em_vlan_hw_strip_enable(dev);
                else
                        em_vlan_hw_strip_disable(dev);
        }

        if(mask & ETH_VLAN_FILTER_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter)
                        em_vlan_hw_filter_enable(dev);
                else
                        em_vlan_hw_filter_disable(dev);
@@ -1219,7 +1219,7 @@ static int
 eth_em_interrupt_setup(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
        rte_intr_enable(&(dev->pci_dev->intr_handle));
@@ -1242,9 +1242,9 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev)
 {
        uint32_t icr;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        /* read-on-clear nic registers here */
        icr = E1000_READ_REG(hw, E1000_ICR);
@@ -1269,9 +1269,9 @@ static int
 eth_em_interrupt_action(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        uint32_t tctl, rctl;
        struct rte_eth_link link;
        int ret;
@@ -1294,11 +1294,11 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
        rte_em_dev_atomic_read_link_status(dev, &link);
        if (link.link_status) {
                PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
-                            dev->data->port_id, (unsigned)link.link_speed,
+                            ETH_DATA(dev)->dd.port_id, 
(unsigned)link.link_speed,
                             link.link_duplex == ETH_LINK_FULL_DUPLEX ?
                             "full-duplex" : "half-duplex");
        } else {
-               PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
+               PMD_INIT_LOG(INFO, " Port %d: Link Down", 
ETH_DATA(dev)->dd.port_id);
        }
        PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
                     dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
@@ -1340,7 +1340,7 @@ eth_em_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,

        eth_em_interrupt_get_status(dev);
        eth_em_interrupt_action(dev);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+       _rte_eth_dev_callback_process(dev, RTE_DEV_EVENT_INTR_LSC);
 }

 static int
@@ -1348,7 +1348,7 @@ eth_em_led_on(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
 }

@@ -1357,7 +1357,7 @@ eth_em_led_off(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
 }

@@ -1369,7 +1369,7 @@ eth_em_flow_ctrl_get(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)
        int tx_pause;
        int rx_pause;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        fc_conf->pause_time = hw->fc.pause_time;
        fc_conf->high_water = hw->fc.high_water;
        fc_conf->low_water = hw->fc.low_water;
@@ -1418,7 +1418,7 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)
        uint32_t max_high_water;
        uint32_t rctl;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        if (fc_conf->autoneg != hw->mac.autoneg)
                return -ENOTSUP;
        rx_buf_size = em_get_rx_buffer_size(hw);
@@ -1467,7 +1467,7 @@ static void
 eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                uint32_t index, __rte_unused uint32_t pool)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        e1000_rar_set(hw, mac_addr->addr_bytes, index);
 }
@@ -1476,7 +1476,7 @@ static void
 eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
 {
        uint8_t addr[ETHER_ADDR_LEN];
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        memset(addr, 0, sizeof(addr));

@@ -1500,25 +1500,25 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)

        /* refuse mtu that requires the support of scattered packets when this
         * feature has not been enabled before. */
-       if (!dev->data->scattered_rx &&
-           frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+       if (!ETH_DATA(dev)->scattered_rx &&
+           frame_size > ETH_DATA(dev)->dd.min_rx_buf_size - 
RTE_PKTMBUF_HEADROOM)
                return -EINVAL;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        rctl = E1000_READ_REG(hw, E1000_RCTL);

        /* switch to jumbo mode if needed */
        if (frame_size > ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame = 1;
                rctl |= E1000_RCTL_LPE;
        } else {
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame = 0;
                rctl &= ~E1000_RCTL_LPE;
        }
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);

        /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        return 0;
 }

diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 8e20920..3b373a6 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -740,7 +740,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                   "queue_id=%u",
                                   (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed++;
                        break;
                }

@@ -919,7 +919,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed++;
                        break;
                }

@@ -1111,7 +1111,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char 
*ring_name,
        char z_name[RTE_MEMZONE_NAMESIZE];

        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-               dev->driver->pci_drv.name, ring_name, dev->data->port_id,
+               dev->driver->pci_drv.name, ring_name, ETH_DATA(dev)->dd.port_id,
                queue_id);

        if ((mz = rte_memzone_lookup(z_name)) != 0)
@@ -1204,7 +1204,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
        uint32_t tsize;
        uint16_t tx_rs_thresh, tx_free_thresh;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Validate number of transmit descriptors.
@@ -1232,7 +1232,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
                             "number of TX descriptors minus 3. "
                             "(tx_free_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
-                            (int)dev->data->port_id, (int)queue_idx);
+                            (int)ETH_DATA(dev)->dd.port_id, (int)queue_idx);
                return -(EINVAL);
        }
        if (tx_rs_thresh > tx_free_thresh) {
@@ -1241,7 +1241,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
                             "tx_rs_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
                             (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return -(EINVAL);
        }
@@ -1256,14 +1256,14 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
                PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
                             "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
                             "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id, (int)queue_idx);
+                            (int)ETH_DATA(dev)->dd.port_id, (int)queue_idx);
                return -(EINVAL);
        }

        /* Free memory prior to re-allocation if needed... */
-       if (dev->data->tx_queues[queue_idx] != NULL) {
-               em_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.tx_queues[queue_idx] != NULL) {
+               em_tx_queue_release(ETH_DATA(dev)->dd.tx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.tx_queues[queue_idx] = NULL;
        }

        /*
@@ -1296,7 +1296,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
        txq->hthresh = tx_conf->tx_thresh.hthresh;
        txq->wthresh = tx_conf->tx_thresh.wthresh;
        txq->queue_id = queue_idx;
-       txq->port_id = dev->data->port_id;
+       txq->port_id = ETH_DATA(dev)->dd.port_id;

        txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
 #ifndef RTE_LIBRTE_XEN_DOM0
@@ -1311,7 +1311,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,

        em_reset_tx_queue(txq);

-       dev->data->tx_queues[queue_idx] = txq;
+       ETH_DATA(dev)->dd.tx_queues[queue_idx] = txq;
        return (0);
 }

@@ -1369,7 +1369,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
        struct e1000_hw     *hw;
        uint32_t rsize;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Validate number of receive descriptors.
@@ -1392,9 +1392,9 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
        }

        /* Free memory prior to re-allocation if needed. */
-       if (dev->data->rx_queues[queue_idx] != NULL) {
-               em_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.rx_queues[queue_idx] != NULL) {
+               em_rx_queue_release(ETH_DATA(dev)->dd.rx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.rx_queues[queue_idx] = NULL;
        }

        /* Allocate RX ring for max possible mumber of hardware descriptors. */
@@ -1423,8 +1423,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->wthresh = rx_conf->rx_thresh.wthresh;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
-       rxq->port_id = dev->data->port_id;
-       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+       rxq->port_id = ETH_DATA(dev)->dd.port_id;
+       rxq->crc_len = (uint8_t) ((ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc) 
?
                                0 : ETHER_CRC_LEN);

        rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
@@ -1439,7 +1439,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);

-       dev->data->rx_queues[queue_idx] = rxq;
+       ETH_DATA(dev)->dd.rx_queues[queue_idx] = rxq;
        em_reset_rx_queue(rxq);

        return (0);
@@ -1453,12 +1453,12 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
        struct em_rx_queue *rxq;
        uint32_t desc = 0;

-       if (rx_queue_id >= dev->data->nb_rx_queues) {
+       if (rx_queue_id >= ETH_DATA(dev)->dd.nb_rx_queues) {
                PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
                return 0;
        }

-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);

        while ((desc < rxq->nb_rx_desc) &&
@@ -1497,16 +1497,16 @@ em_dev_clear_queues(struct rte_eth_dev *dev)
        struct em_tx_queue *txq;
        struct em_rx_queue *rxq;

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                if (txq != NULL) {
                        em_tx_queue_release_mbufs(txq);
                        em_reset_tx_queue(txq);
                }
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                if (rxq != NULL) {
                        em_rx_queue_release_mbufs(rxq);
                        em_reset_rx_queue(rxq);
@@ -1634,7 +1634,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
        uint16_t i;
        int ret;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Make sure receives are disabled while setting
@@ -1663,15 +1663,15 @@ eth_em_rx_init(struct rte_eth_dev *dev)
        if (hw->mac.type == e1000_82573)
                E1000_WRITE_REG(hw, E1000_RDTR, 0x20);

-       dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
+       dev->rx_pkt_burst = (dev_rx_burst_t)eth_em_recv_pkts;

        /* Determine RX bufsize. */
        rctl_bsize = EM_MAX_BUF_SIZE;
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                struct rte_pktmbuf_pool_private *mbp_priv;
                uint32_t buf_size;

-               rxq = dev->data->rx_queues[i];
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
                buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
                rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
@@ -1680,11 +1680,11 @@ eth_em_rx_init(struct rte_eth_dev *dev)
        rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);

        /* Configure and enable each RX queue. */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                uint64_t bus_addr;
                uint32_t rxdctl;

-               rxq = dev->data->rx_queues[i];
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                /* Allocate buffers for descriptor rings and setup queue */
                ret = em_alloc_rx_queue_mbufs(rxq);
@@ -1696,7 +1696,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
                 *  call to configure
                 */
                rxq->crc_len =
-                       (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
+                       (uint8_t)(ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc ?
                                                        0 : ETHER_CRC_LEN);

                bus_addr = rxq->rx_ring_phys_addr;
@@ -1727,21 +1727,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
                 * to avoid splitting packets that don't fit into
                 * one buffer.
                 */
-               if (dev->data->dev_conf.rxmode.jumbo_frame ||
+               if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame ||
                                rctl_bsize < ETHER_MAX_LEN) {
-                       if (!dev->data->scattered_rx)
+                       if (!ETH_DATA(dev)->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->rx_pkt_burst =
-                               (eth_rx_burst_t)eth_em_recv_scattered_pkts;
-                       dev->data->scattered_rx = 1;
+                               (dev_rx_burst_t)eth_em_recv_scattered_pkts;
+                       ETH_DATA(dev)->scattered_rx = 1;
                }
        }

-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
+       if (ETH_DATA(dev)->dev_conf.rxmode.enable_scatter) {
+               if (!ETH_DATA(dev)->scattered_rx)
                        PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
-               dev->data->scattered_rx = 1;
+               ETH_DATA(dev)->scattered_rx = 1;
        }

        /*
@@ -1750,7 +1750,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
         */
        rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);

-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_ip_checksum)
                rxcsum |= E1000_RXCSUM_IPOFL;
        else
                rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1762,21 +1762,21 @@ eth_em_rx_init(struct rte_eth_dev *dev)
        if ((hw->mac.type == e1000_ich9lan ||
                        hw->mac.type == e1000_pch2lan ||
                        hw->mac.type == e1000_ich10lan) &&
-                       dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+                       ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1) {
                u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
                E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
                E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
        }

        if (hw->mac.type == e1000_pch2lan) {
-               if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+               if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1)
                        e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
                else
                        e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
        }

        /* Setup the Receive Control Register. */
-       if (dev->data->dev_conf.rxmode.hw_strip_crc)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc)
                rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
        else
                rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
@@ -1796,7 +1796,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure support of jumbo frames, if any.
         */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+       if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1)
                rctl |= E1000_RCTL_LPE;
        else
                rctl &= ~E1000_RCTL_LPE;
@@ -1821,13 +1821,13 @@ eth_em_tx_init(struct rte_eth_dev *dev)
        uint32_t txdctl;
        uint16_t i;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Setup the Base and Length of the Tx Descriptor Rings. */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
                uint64_t bus_addr;

-               txq = dev->data->tx_queues[i];
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                bus_addr = txq->tx_ring_phys_addr;
                E1000_WRITE_REG(hw, E1000_TDLEN(i),
                                txq->nb_tx_desc *
diff --git a/lib/librte_pmd_e1000/igb_ethdev.c 
b/lib/librte_pmd_e1000/igb_ethdev.c
index b3892a5..937ebaf 100644
--- a/lib/librte_pmd_e1000/igb_ethdev.c
+++ b/lib/librte_pmd_e1000/igb_ethdev.c
@@ -275,7 +275,7 @@ static struct eth_dev_ops eth_igb_ops = {
  * dev_ops for virtual function, bare necessities for basic vf
  * operation have been implemented
  */
-static struct eth_dev_ops igbvf_eth_dev_ops = {
+static struct eth_dev_ops igbvf_dev_ops = {
        .dev_configure        = igbvf_dev_configure,
        .dev_start            = igbvf_dev_start,
        .dev_stop             = igbvf_dev_stop,
@@ -308,7 +308,7 @@ rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
        struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
+       struct rte_eth_link *src = &(ETH_DATA(dev)->dev_link);

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
                                        *(uint64_t *)src) == 0)
@@ -333,7 +333,7 @@ static inline int
 rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -347,9 +347,9 @@ static inline void
 igb_intr_enable(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        E1000_WRITE_REG(hw, E1000_IMS, intr->mask);
        E1000_WRITE_FLUSH(hw);
@@ -383,7 +383,7 @@ static void
 igb_identify_hardware(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        hw->vendor_id = dev->pci_dev->id.vendor_id;
        hw->device_id = dev->pci_dev->id.device_id;
@@ -452,35 +452,35 @@ igb_reset_swfw_lock(struct e1000_hw *hw)
 }

 static int
-eth_igb_dev_init(struct rte_eth_dev *eth_dev)
+eth_igb_dev_init(struct rte_eth_dev *dev)
 {
        int error = 0;
        struct rte_pci_device *pci_dev;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-                       E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+                       
E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl_ext;

-       pci_dev = eth_dev->pci_dev;
-       eth_dev->dev_ops = &eth_igb_ops;
-       eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
-       eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+       pci_dev = dev->pci_dev;
+       dev->dev_ops = &eth_igb_ops;
+       dev->rx_pkt_burst = &eth_igb_recv_pkts;
+       dev->tx_pkt_burst = &eth_igb_xmit_pkts;

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+               if (ETH_DATA(dev)->scattered_rx)
+                       dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
                return 0;
        }

        hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;

-       igb_identify_hardware(eth_dev);
+       igb_identify_hardware(dev);
        if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
                error = -EIO;
                goto err_late;
@@ -539,9 +539,9 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        }

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("e1000",
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("e1000",
                ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
                                                "store MAC addresses",
                                ETHER_ADDR_LEN * hw->mac.rar_entry_count);
@@ -550,7 +550,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        }

        /* Copy the permanent MAC address */
-       ether_addr_copy((struct ether_addr *)hw->mac.addr, 
&eth_dev->data->mac_addrs[0]);
+       ether_addr_copy((struct ether_addr *)hw->mac.addr, 
&ETH_DATA(dev)->mac_addrs[0]);

        /* initialize the vfta */
        memset(shadow_vfta, 0, sizeof(*shadow_vfta));
@@ -558,8 +558,8 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        /* Now initialize the hardware */
        if (igb_hardware_init(hw) != 0) {
                PMD_INIT_LOG(ERR, "Hardware initialization failed");
-               rte_free(eth_dev->data->mac_addrs);
-               eth_dev->data->mac_addrs = NULL;
+               rte_free(ETH_DATA(dev)->mac_addrs);
+               ETH_DATA(dev)->mac_addrs = NULL;
                error = -ENODEV;
                goto err_late;
        }
@@ -572,7 +572,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        }

        /* initialize PF if max_vfs not zero */
-       igb_pf_host_init(eth_dev);
+       igb_pf_host_init(dev);

        ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
        /* Set PF Reset Done bit so PF/VF Mail Ops can work */
@@ -581,17 +581,17 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
        E1000_WRITE_FLUSH(hw);

        PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x",
-                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    ETH_DATA(dev)->dd.port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id);

        rte_intr_callback_register(&(pci_dev->intr_handle),
-               eth_igb_interrupt_handler, (void *)eth_dev);
+               eth_igb_interrupt_handler, (void *)dev);

        /* enable uio intr after callback register */
        rte_intr_enable(&(pci_dev->intr_handle));

        /* enable support intr */
-       igb_intr_enable(eth_dev);
+       igb_intr_enable(dev);

        TAILQ_INIT(&filter_info->flex_list);
        filter_info->flex_mask = 0;
@@ -612,29 +612,29 @@ err_late:
  * Virtual Function device init
  */
 static int
-eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
+eth_igbvf_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int diag;

        PMD_INIT_FUNC_TRACE();

-       eth_dev->dev_ops = &igbvf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
-       eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+       dev->dev_ops = &igbvf_dev_ops;
+       dev->rx_pkt_burst = &eth_igb_recv_pkts;
+       dev->tx_pkt_burst = &eth_igb_xmit_pkts;

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+               if (ETH_DATA(dev)->scattered_rx)
+                       dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
                return 0;
        }

-       pci_dev = eth_dev->pci_dev;
+       pci_dev = dev->pci_dev;

        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -657,9 +657,9 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
        diag = hw->mac.ops.reset_hw(hw);

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("igbvf", ETHER_ADDR_LEN *
                hw->mac.rar_entry_count, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                        "Failed to allocate %d bytes needed to store MAC "
                        "addresses",
@@ -669,11 +669,11 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)

        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
-                       &eth_dev->data->mac_addrs[0]);
+                       &ETH_DATA(dev)->mac_addrs[0]);

        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x "
                     "mac.type=%s",
-                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    ETH_DATA(dev)->dd.port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id, "igb_mac_82576_vf");

        return 0;
@@ -685,7 +685,7 @@ static struct eth_driver rte_igb_pmd = {
                .id_table = pci_id_igb_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
-       .eth_dev_init = eth_igb_dev_init,
+       .dev_init = eth_igb_dev_init,
        .dev_private_size = sizeof(struct e1000_adapter),
 };

@@ -698,7 +698,7 @@ static struct eth_driver rte_igbvf_pmd = {
                .id_table = pci_id_igbvf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
-       .eth_dev_init = eth_igbvf_dev_init,
+       .dev_init = eth_igbvf_dev_init,
        .dev_private_size = sizeof(struct e1000_adapter),
 };

@@ -713,7 +713,7 @@ static void
 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        /* RCTL: enable VLAN filter since VMDq always use VLAN filter */
        uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
        rctl |= E1000_RCTL_VFE;
@@ -738,7 +738,7 @@ static int
 eth_igb_configure(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();
        intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
@@ -751,7 +751,7 @@ static int
 eth_igb_start(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret, i, mask;
        uint32_t ctrl_ext;

@@ -814,7 +814,7 @@ eth_igb_start(struct rte_eth_dev *dev)
                        ETH_VLAN_EXTEND_MASK;
        eth_igb_vlan_offload_set(dev, mask);

-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable VLAN filter since VMDq always use VLAN filter */
                igb_vmdq_vlan_hw_filter_enable(dev);
        }
@@ -851,40 +851,40 @@ eth_igb_start(struct rte_eth_dev *dev)
        }

        /* Setup link speed and duplex */
-       switch (dev->data->dev_conf.link_speed) {
+       switch (ETH_DATA(dev)->dev_conf.link_speed) {
        case ETH_LINK_SPEED_AUTONEG:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+               if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex == 
ETH_LINK_HALF_DUPLEX)
+               else if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_HALF_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
-               else if (dev->data->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX)
+               else if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
                else
                        goto error_invalid_config;
                break;
        case ETH_LINK_SPEED_10:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+               if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
-               else if (dev->data->dev_conf.link_duplex == 
ETH_LINK_HALF_DUPLEX)
+               else if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_HALF_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
-               else if (dev->data->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX)
+               else if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
                else
                        goto error_invalid_config;
                break;
        case ETH_LINK_SPEED_100:
-               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+               if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX)
                        hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
-               else if (dev->data->dev_conf.link_duplex == 
ETH_LINK_HALF_DUPLEX)
+               else if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_HALF_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
-               else if (dev->data->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX)
+               else if (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX)
                        hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
                else
                        goto error_invalid_config;
                break;
        case ETH_LINK_SPEED_1000:
-               if ((dev->data->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX) ||
-                               (dev->data->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX))
+               if ((ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_AUTONEG_DUPLEX) ||
+                               (ETH_DATA(dev)->dev_conf.link_duplex == 
ETH_LINK_FULL_DUPLEX))
                        hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
                else
                        goto error_invalid_config;
@@ -896,7 +896,7 @@ eth_igb_start(struct rte_eth_dev *dev)
        e1000_setup_link(hw);

        /* check if lsc interrupt feature is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0)
+       if (ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0)
                ret = eth_igb_lsc_interrupt_setup(dev);

        /* resume enabled intr since hw reset */
@@ -908,8 +908,8 @@ eth_igb_start(struct rte_eth_dev *dev)

 error_invalid_config:
        PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
-                    dev->data->dev_conf.link_speed,
-                    dev->data->dev_conf.link_duplex, dev->data->port_id);
+                    ETH_DATA(dev)->dev_conf.link_speed,
+                    ETH_DATA(dev)->dev_conf.link_duplex, 
ETH_DATA(dev)->dd.port_id);
        igb_dev_clear_queues(dev);
        return (-EINVAL);
 }
@@ -923,9 +923,9 @@ error_invalid_config:
 static void
 eth_igb_stop(struct rte_eth_dev *dev)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_link link;
        struct e1000_flex_filter *p_flex;
        struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
@@ -985,7 +985,7 @@ eth_igb_stop(struct rte_eth_dev *dev)
 static void
 eth_igb_close(struct rte_eth_dev *dev)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_link link;

        eth_igb_stop(dev);
@@ -1088,9 +1088,9 @@ igb_hardware_init(struct e1000_hw *hw)
 static void
 eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_hw_stats *stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
E1000_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);
        int pause_frames;

        if(hw->phy.media_type == e1000_media_type_copper ||
@@ -1226,7 +1226,7 @@ static void
 eth_igb_stats_reset(struct rte_eth_dev *dev)
 {
        struct e1000_hw_stats *hw_stats =
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
E1000_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* HW registers are cleared on read */
        eth_igb_stats_get(dev, NULL);
@@ -1238,9 +1238,9 @@ eth_igb_stats_reset(struct rte_eth_dev *dev)
 static void
 eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
-                         E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                         
E1000_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* Good Rx packets, include VF loopback */
        UPDATE_VF_STAT(E1000_VFGPRC,
@@ -1297,7 +1297,7 @@ static void
 eth_igbvf_stats_reset(struct rte_eth_dev *dev)
 {
        struct e1000_vf_stats *hw_stats = (struct e1000_vf_stats*)
-                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
E1000_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* Sync HW register to the last stats */
        eth_igbvf_stats_get(dev, NULL);
@@ -1311,7 +1311,7 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev)
 static void
 eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
        dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
@@ -1403,7 +1403,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 static void
 eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
        dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
@@ -1456,7 +1456,7 @@ static int
 eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_link link, old;
        int link_check, count;

@@ -1593,7 +1593,7 @@ static void
 eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

        rctl = E1000_READ_REG(hw, E1000_RCTL);
@@ -1605,12 +1605,12 @@ static void
 eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

        rctl = E1000_READ_REG(hw, E1000_RCTL);
        rctl &= (~E1000_RCTL_UPE);
-       if (dev->data->all_multicast == 1)
+       if (ETH_DATA(dev)->all_multicast == 1)
                rctl |= E1000_RCTL_MPE;
        else
                rctl &= (~E1000_RCTL_MPE);
@@ -1621,7 +1621,7 @@ static void
 eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

        rctl = E1000_READ_REG(hw, E1000_RCTL);
@@ -1633,10 +1633,10 @@ static void
 eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rctl;

-       if (dev->data->promiscuous == 1)
+       if (ETH_DATA(dev)->promiscuous == 1)
                return; /* must remain in all_multicast mode */
        rctl = E1000_READ_REG(hw, E1000_RCTL);
        rctl &= (~E1000_RCTL_MPE);
@@ -1647,9 +1647,9 @@ static int
 eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t vfta;
        uint32_t vid_idx;
        uint32_t vid_bit;
@@ -1674,7 +1674,7 @@ static void
 eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg = ETHER_TYPE_VLAN ;

        reg |= (tpid << 16);
@@ -1685,7 +1685,7 @@ static void
 igb_vlan_hw_filter_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* Filter Table Disable */
@@ -1699,9 +1699,9 @@ static void
 igb_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;
        int i;

@@ -1720,7 +1720,7 @@ static void
 igb_vlan_hw_strip_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* VLAN Mode Disable */
@@ -1733,7 +1733,7 @@ static void
 igb_vlan_hw_strip_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* VLAN Mode Enable */
@@ -1746,7 +1746,7 @@ static void
 igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* CTRL_EXT: Extended VLAN */
@@ -1755,9 +1755,9 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);

        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+       if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1)
                E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                                VLAN_TAG_SIZE);
 }

@@ -1765,7 +1765,7 @@ static void
 igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* CTRL_EXT: Extended VLAN */
@@ -1774,9 +1774,9 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);

        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+       if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1)
                E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                                2 * VLAN_TAG_SIZE);
 }

@@ -1784,21 +1784,21 @@ static void
 eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        if(mask & ETH_VLAN_STRIP_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                        igb_vlan_hw_strip_enable(dev);
                else
                        igb_vlan_hw_strip_disable(dev);
        }

        if(mask & ETH_VLAN_FILTER_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter)
                        igb_vlan_hw_filter_enable(dev);
                else
                        igb_vlan_hw_filter_disable(dev);
        }

        if(mask & ETH_VLAN_EXTEND_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_extend)
                        igb_vlan_hw_extend_enable(dev);
                else
                        igb_vlan_hw_extend_disable(dev);
@@ -1820,7 +1820,7 @@ static int
 eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        intr->mask |= E1000_ICR_LSC;

@@ -1843,9 +1843,9 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
 {
        uint32_t icr;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        igb_intr_disable(hw);

@@ -1877,9 +1877,9 @@ static int
 eth_igb_interrupt_action(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        uint32_t tctl, rctl;
        struct rte_eth_link link;
        int ret;
@@ -1908,13 +1908,13 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
                if (link.link_status) {
                        PMD_INIT_LOG(INFO,
                                     " Port %d: Link Up - speed %u Mbps - %s",
-                                    dev->data->port_id,
+                                    ETH_DATA(dev)->dd.port_id,
                                     (unsigned)link.link_speed,
                                     link.link_duplex == ETH_LINK_FULL_DUPLEX ?
                                     "full-duplex" : "half-duplex");
                } else {
                        PMD_INIT_LOG(INFO, " Port %d: Link Down",
-                                    dev->data->port_id);
+                                    ETH_DATA(dev)->dd.port_id);
                }
                PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
                             dev->pci_dev->addr.domain,
@@ -1935,7 +1935,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_TCTL, tctl);
                E1000_WRITE_REG(hw, E1000_RCTL, rctl);
                E1000_WRITE_FLUSH(hw);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+               _rte_eth_dev_callback_process(dev, RTE_DEV_EVENT_INTR_LSC);
        }

        return 0;
@@ -1967,7 +1967,7 @@ eth_igb_led_on(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
 }

@@ -1976,7 +1976,7 @@ eth_igb_led_off(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
 }

@@ -1988,7 +1988,7 @@ eth_igb_flow_ctrl_get(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)
        int tx_pause;
        int rx_pause;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        fc_conf->pause_time = hw->fc.pause_time;
        fc_conf->high_water = hw->fc.high_water;
        fc_conf->low_water = hw->fc.low_water;
@@ -2037,7 +2037,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)
        uint32_t max_high_water;
        uint32_t rctl;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        if (fc_conf->autoneg != hw->mac.autoneg)
                return -ENOTSUP;
        rx_buf_size = igb_get_rx_buffer_size(hw);
@@ -2087,7 +2087,7 @@ static void
 eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                uint32_t index, __rte_unused uint32_t pool)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rah;

        e1000_rar_set(hw, mac_addr->addr_bytes, index);
@@ -2100,7 +2100,7 @@ static void
 eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
 {
        uint8_t addr[ETHER_ADDR_LEN];
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        memset(addr, 0, sizeof(addr));

@@ -2127,7 +2127,7 @@ igbvf_stop_adapter(struct rte_eth_dev *dev)
        u32 reg_val;
        u16 i;
        struct rte_eth_dev_info dev_info;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        memset(&dev_info, 0, sizeof(dev_info));
        eth_igbvf_infos_get(dev, &dev_info);
@@ -2193,10 +2193,10 @@ out:
 static int
 igbvf_dev_configure(struct rte_eth_dev *dev)
 {
-       struct rte_eth_conf* conf = &dev->data->dev_conf;
+       struct rte_eth_conf* conf = &ETH_DATA(dev)->dev_conf;

        PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
-                    dev->data->port_id);
+                    ETH_DATA(dev)->dd.port_id);

        /*
         * VF has no ability to enable/disable HW CRC
@@ -2221,7 +2221,7 @@ static int
 igbvf_dev_start(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        PMD_INIT_FUNC_TRACE();
@@ -2263,7 +2263,7 @@ igbvf_dev_stop(struct rte_eth_dev *dev)
 static void
 igbvf_dev_close(struct rte_eth_dev *dev)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -2290,9 +2290,9 @@ static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t 
vid, bool on)
 static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        int i = 0, j = 0, vfta = 0, mask = 1;

        for (i = 0; i < IGB_VFTA_SIZE; i++){
@@ -2314,9 +2314,9 @@ static int
 igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vfta * shadow_vfta =
-               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t vid_idx = 0;
        uint32_t vid_bit = 0;
        int ret = 0;
@@ -2349,7 +2349,7 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
        uint8_t i, j, mask;
        uint32_t reta, r;
        uint16_t idx, shift;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (reta_size != ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
@@ -2390,7 +2390,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
        uint8_t i, j, mask;
        uint32_t reta;
        uint16_t idx, shift;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (reta_size != ETH_RSS_RETA_SIZE_128) {
                PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
@@ -2429,7 +2429,7 @@ eth_igb_syn_filter_set(struct rte_eth_dev *dev,
                        struct rte_eth_syn_filter *filter,
                        bool add)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t synqf, rfctl;

        if (filter->queue >= IGB_MAX_RX_QUEUE_NUM)
@@ -2466,7 +2466,7 @@ static int
 eth_igb_syn_filter_get(struct rte_eth_dev *dev,
                        struct rte_eth_syn_filter *filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t synqf, rfctl;

        synqf = E1000_READ_REG(hw, E1000_SYNQF(0));
@@ -2486,7 +2486,7 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
                        enum rte_filter_op filter_op,
                        void *arg)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        MAC_TYPE_FILTER_SUP(hw->mac.type);
@@ -2606,9 +2606,9 @@ static int
 igb_add_2tuple_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ntuple_filter *ntuple_filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_2tuple_filter *filter;
        uint32_t ttqf = E1000_TTQF_DISABLE_MASK;
        uint32_t imir, imir_ext = E1000_IMIREXT_SIZE_BP;
@@ -2704,9 +2704,9 @@ static int
 igb_remove_2tuple_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ntuple_filter *ntuple_filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_2tuple_filter_info filter_2tuple;
        struct e1000_2tuple_filter *filter;
        int ret;
@@ -2754,9 +2754,9 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
                        struct rte_eth_flex_filter *filter,
                        bool add)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_flex_filter *flex_filter, *it;
        uint32_t wufc, queueing, mask;
        uint32_t reg_off;
@@ -2860,9 +2860,9 @@ static int
 eth_igb_get_flex_filter(struct rte_eth_dev *dev,
                        struct rte_eth_flex_filter *filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_flex_filter flex_filter, *it;
        uint32_t wufc, queueing, wufc_en = 0;

@@ -2907,7 +2907,7 @@ eth_igb_flex_filter_handle(struct rte_eth_dev *dev,
                        enum rte_filter_op filter_op,
                        void *arg)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_flex_filter *filter;
        int ret = 0;

@@ -3068,9 +3068,9 @@ static int
 igb_add_5tuple_filter_82576(struct rte_eth_dev *dev,
                        struct rte_eth_ntuple_filter *ntuple_filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_5tuple_filter *filter;
        uint32_t ftqf = E1000_FTQF_VF_BP | E1000_FTQF_MASK;
        uint32_t spqf, imir, imir_ext = E1000_IMIREXT_SIZE_BP;
@@ -3179,9 +3179,9 @@ static int
 igb_remove_5tuple_filter_82576(struct rte_eth_dev *dev,
                                struct rte_eth_ntuple_filter *ntuple_filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_5tuple_filter_info filter_5tuple;
        struct e1000_5tuple_filter *filter;
        int ret;
@@ -3222,7 +3222,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        uint32_t frame_size = mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN +
                                     VLAN_TAG_SIZE);

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

 #ifdef RTE_LIBRTE_82571_SUPPORT
        /* XXX: not bigger than max_rx_pktlen */
@@ -3238,27 +3238,27 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)

        /* refuse mtu that requires the support of scattered packets when this
         * feature has not been enabled before. */
-       if (!dev->data->scattered_rx &&
-           frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+       if (!ETH_DATA(dev)->scattered_rx &&
+           frame_size > ETH_DATA(dev)->dd.min_rx_buf_size - 
RTE_PKTMBUF_HEADROOM)
                return -EINVAL;

        rctl = E1000_READ_REG(hw, E1000_RCTL);

        /* switch to jumbo mode if needed */
        if (frame_size > ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame = 1;
                rctl |= E1000_RCTL_LPE;
        } else {
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame = 0;
                rctl &= ~E1000_RCTL_LPE;
        }
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);

        /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len = frame_size;

        E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len);

        return 0;
 }
@@ -3280,7 +3280,7 @@ igb_add_del_ntuple_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ntuple_filter *ntuple_filter,
                        bool add)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        switch (ntuple_filter->flags) {
@@ -3327,9 +3327,9 @@ static int
 igb_get_ntuple_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ntuple_filter *ntuple_filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct e1000_5tuple_filter_info filter_5tuple;
        struct e1000_2tuple_filter_info filter_2tuple;
        struct e1000_5tuple_filter *p_5tuple_filter;
@@ -3395,7 +3395,7 @@ igb_ntuple_filter_handle(struct rte_eth_dev *dev,
                                enum rte_filter_op filter_op,
                                void *arg)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        MAC_TYPE_FILTER_SUP(hw->mac.type);
@@ -3479,9 +3479,9 @@ igb_add_del_ethertype_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ethertype_filter *filter,
                        bool add)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t etqf = 0;
        int ret;

@@ -3539,9 +3539,9 @@ static int
 igb_get_ethertype_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ethertype_filter *filter)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_filter_info *filter_info =
-               E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t etqf;
        int ret;

@@ -3575,7 +3575,7 @@ igb_ethertype_filter_handle(struct rte_eth_dev *dev,
                                enum rte_filter_op filter_op,
                                void *arg)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        MAC_TYPE_FILTER_SUP(hw->mac.type);
diff --git a/lib/librte_pmd_e1000/igb_pf.c b/lib/librte_pmd_e1000/igb_pf.c
index bc3816a..70e629e 100644
--- a/lib/librte_pmd_e1000/igb_pf.c
+++ b/lib/librte_pmd_e1000/igb_pf.c
@@ -55,9 +55,9 @@
 #include "e1000_ethdev.h"

 static inline uint16_t
-dev_num_vf(struct rte_eth_dev *eth_dev)
+dev_num_vf(struct rte_eth_dev *dev)
 {
-       return eth_dev->pci_dev->max_vfs;
+       return dev->pci_dev->max_vfs;
 }

 static inline
@@ -65,7 +65,7 @@ int igb_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t 
vf_num)
 {
        unsigned char vf_mac_addr[ETHER_ADDR_LEN];
        struct e1000_vf_info *vfinfo =
-               *E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
        uint16_t vfn;

        for (vfn = 0; vfn < vf_num; vfn++) {
@@ -82,24 +82,24 @@ static inline int
 igb_mb_intr_setup(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
-               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        intr->mask |= E1000_ICR_VMMB;

        return 0;
 }

-void igb_pf_host_init(struct rte_eth_dev *eth_dev)
+void igb_pf_host_init(struct rte_eth_dev *dev)
 {
        struct e1000_vf_info **vfinfo =
-               E1000_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint16_t vf_num;
        uint8_t nb_queue;

-       RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
-       if (0 == (vf_num = dev_num_vf(eth_dev)))
+       ETH_SRIOV(dev).active = 0;
+       if (0 == (vf_num = dev_num_vf(dev)))
                return;

        if (hw->mac.type == e1000_i350)
@@ -114,37 +114,37 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev)
        if (*vfinfo == NULL)
                rte_panic("Cannot allocate memory for private VF data\n");

-       RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_8_POOLS;
-       RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
-       RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
-       RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * 
nb_queue);
+       ETH_SRIOV(dev).active = ETH_8_POOLS;
+       ETH_SRIOV(dev).nb_q_per_pool = nb_queue;
+       ETH_SRIOV(dev).def_vmdq_idx = vf_num;
+       ETH_SRIOV(dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);

-       igb_vf_perm_addr_gen(eth_dev, vf_num);
+       igb_vf_perm_addr_gen(dev, vf_num);

        /* set mb interrupt mask */
-       igb_mb_intr_setup(eth_dev);
+       igb_mb_intr_setup(dev);

        return;
 }

 #define E1000_RAH_POOLSEL_SHIFT    (18)
-int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
+int igb_pf_host_configure(struct rte_eth_dev *dev)
 {
        uint32_t vtctl;
        uint16_t vf_num;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t vlanctrl;
        int i;
        uint32_t rah;

-       if (0 == (vf_num = dev_num_vf(eth_dev)))
+       if (0 == (vf_num = dev_num_vf(dev)))
                return -1;

        /* enable VMDq and set the default pool for PF */
        vtctl = E1000_READ_REG(hw, E1000_VT_CTL);
        vtctl &= ~E1000_VT_CTL_DEFAULT_POOL_MASK;
-       vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+       vtctl |= ETH_SRIOV(dev).def_vmdq_idx
                << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
        vtctl |= E1000_VT_CTL_VM_REPL_EN;
        E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl);
@@ -171,7 +171,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev)

        /* set VMDq map to default PF pool */
        rah = E1000_READ_REG(hw, E1000_RAH(0));
-       rah |= (0x1 << (RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx +
+       rah |= (0x1 << (ETH_SRIOV(dev).def_vmdq_idx +
                        E1000_RAH_POOLSEL_SHIFT));
        E1000_WRITE_REG(hw, E1000_RAH(0), rah);

@@ -196,9 +196,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev)
 static void
 set_rx_mode(struct rte_eth_dev *dev)
 {
-       struct rte_eth_dev_data *dev_data =
-               (struct rte_eth_dev_data*)dev->data->dev_private;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE;
        uint16_t vfn = dev_num_vf(dev);

@@ -212,11 +210,11 @@ set_rx_mode(struct rte_eth_dev *dev)
        /* clear the bits we are changing the status of */
        fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);

-       if (dev_data->promiscuous) {
+       if (ETH_DATA(dev)->promiscuous) {
                fctrl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
                vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
        } else {
-               if (dev_data->all_multicast) {
+               if (ETH_DATA(dev)->all_multicast) {
                        fctrl |= E1000_RCTL_MPE;
                        vmolr |= E1000_VMOLR_MPME;
                } else {
@@ -239,9 +237,9 @@ static inline void
 igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vf_info *vfinfo =
-               *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        uint32_t vmolr = E1000_READ_REG(hw, E1000_VMOLR(vf));

        vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE |
@@ -260,7 +258,7 @@ igb_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
 static inline void
 igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        /* enable transmit and receive for vf */
@@ -278,9 +276,9 @@ igb_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
 static int
 igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vf_info *vfinfo =
-               *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
        int rar_entry = hw->mac.rar_entry_count - (vf + 1);
        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
@@ -304,9 +302,9 @@ igb_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t 
*msgbuf)
 static int
 igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 {
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vf_info *vfinfo =
-               *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        int rar_entry = hw->mac.rar_entry_count - (vf + 1);
        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);

@@ -328,9 +326,9 @@ igb_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused 
uint32_t vf, uint32_t
        int entries = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >>
                E1000_VT_MSGINFO_SHIFT;
        uint16_t *hash_list = (uint16_t *)&msgbuf[1];
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vf_info *vfinfo =
-               *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));

        /* only so many hash values supported */
        entries = RTE_MIN(entries, E1000_MAX_VF_MC_ENTRIES);
@@ -365,9 +363,9 @@ static int
 igb_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 {
        int add, vid;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct e1000_vf_info *vfinfo =
-               *(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(E1000_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        uint32_t vid_idx, vid_bit, vfta;

        add = (msgbuf[0] & E1000_VT_MSGINFO_MASK)
@@ -400,7 +398,7 @@ igb_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
        uint16_t mbx_size = E1000_VFMAILBOX_SIZE;
        uint32_t msgbuf[E1000_VFMAILBOX_SIZE];
        int32_t retval;
-       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        retval = e1000_read_mbx(hw, msgbuf, mbx_size, vf);
        if (retval) {
@@ -456,28 +454,28 @@ igb_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
 {
        uint32_t msg = E1000_VT_MSGTYPE_NACK;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        e1000_write_mbx(hw, &msg, 1, vf);
 }

-void igb_pf_mbx_process(struct rte_eth_dev *eth_dev)
+void igb_pf_mbx_process(struct rte_eth_dev *dev)
 {
        uint16_t vf;
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+       for (vf = 0; vf < dev_num_vf(dev); vf++) {
                /* check & process vf function level reset */
                if (!e1000_check_for_rst(hw, vf))
-                       igb_vf_reset_event(eth_dev, vf);
+                       igb_vf_reset_event(dev, vf);

                /* check & process vf mailbox messages */
                if (!e1000_check_for_msg(hw, vf))
-                       igb_rcv_msg_from_vf(eth_dev, vf);
+                       igb_rcv_msg_from_vf(dev, vf);

                /* check & process acks from vf */
                if (!e1000_check_for_ack(hw, vf))
-                       igb_rcv_ack_from_vf(eth_dev, vf);
+                       igb_rcv_ack_from_vf(dev, vf);
        }
 }
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 946b39d..f99658e 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -728,7 +728,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed++;
                        break;
                }

@@ -909,7 +909,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed++;
                        break;
                }

@@ -1106,7 +1106,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char 
*ring_name,

        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
                        dev->driver->pci_drv.name, ring_name,
-                               dev->data->port_id, queue_id);
+                               ETH_DATA(dev)->dd.port_id, queue_id);
        mz = rte_memzone_lookup(z_name);
        if (mz)
                return mz;
@@ -1170,7 +1170,7 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct 
rte_eth_dev *dev)
        uint16_t i, prev;
        struct e1000_hw *hw;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        /* Zero out HW ring memory */
        for (i = 0; i < txq->nb_tx_desc; i++) {
                txq->tx_ring[i] = zeroed_desc;
@@ -1208,7 +1208,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
        struct e1000_hw     *hw;
        uint32_t size;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Validate number of transmit descriptors.
@@ -1236,9 +1236,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
                             "or 16.");

        /* Free memory prior to re-allocation if needed */
-       if (dev->data->tx_queues[queue_idx] != NULL) {
-               igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.tx_queues[queue_idx] != NULL) {
+               igb_tx_queue_release(ETH_DATA(dev)->dd.tx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.tx_queues[queue_idx] = NULL;
        }

        /* First allocate the tx queue data structure */
@@ -1267,9 +1267,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
        if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
                txq->wthresh = 1;
        txq->queue_id = queue_idx;
-       txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
-               queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
-       txq->port_id = dev->data->port_id;
+       txq->reg_idx = (uint16_t)((ETH_SRIOV(dev).active == 0) ?
+               queue_idx : ETH_SRIOV(dev).def_pool_q_idx + queue_idx);
+       txq->port_id = ETH_DATA(dev)->dd.port_id;

        txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
 #ifndef RTE_LIBRTE_XEN_DOM0
@@ -1291,7 +1291,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,

        igb_reset_tx_queue(txq, dev);
        dev->tx_pkt_burst = eth_igb_xmit_pkts;
-       dev->data->tx_queues[queue_idx] = txq;
+       ETH_DATA(dev)->dd.tx_queues[queue_idx] = txq;

        return (0);
 }
@@ -1357,7 +1357,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        struct e1000_hw     *hw;
        unsigned int size;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Validate number of receive descriptors.
@@ -1370,9 +1370,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        }

        /* Free memory prior to re-allocation if needed */
-       if (dev->data->rx_queues[queue_idx] != NULL) {
-               igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.rx_queues[queue_idx] != NULL) {
+               igb_rx_queue_release(ETH_DATA(dev)->dd.rx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.rx_queues[queue_idx] = NULL;
        }

        /* First allocate the RX queue data structure. */
@@ -1390,10 +1390,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
-       rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
-               queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
-       rxq->port_id = dev->data->port_id;
-       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 
:
+       rxq->reg_idx = (uint16_t)((ETH_SRIOV(dev).active == 0) ?
+               queue_idx : ETH_SRIOV(dev).def_pool_q_idx + queue_idx);
+       rxq->port_id = ETH_DATA(dev)->dd.port_id;
+       rxq->crc_len = (uint8_t) ((ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc) 
? 0 :
                                  ETHER_CRC_LEN);

        /*
@@ -1427,7 +1427,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);

-       dev->data->rx_queues[queue_idx] = rxq;
+       ETH_DATA(dev)->dd.rx_queues[queue_idx] = rxq;
        igb_reset_rx_queue(rxq);

        return 0;
@@ -1441,12 +1441,12 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        struct igb_rx_queue *rxq;
        uint32_t desc = 0;

-       if (rx_queue_id >= dev->data->nb_rx_queues) {
+       if (rx_queue_id >= ETH_DATA(dev)->dd.nb_rx_queues) {
                PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
                return 0;
        }

-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);

        while ((desc < rxq->nb_rx_desc) &&
@@ -1485,16 +1485,16 @@ igb_dev_clear_queues(struct rte_eth_dev *dev)
        struct igb_tx_queue *txq;
        struct igb_rx_queue *rxq;

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                if (txq != NULL) {
                        igb_tx_queue_release_mbufs(txq);
                        igb_reset_tx_queue(txq, dev);
                }
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                if (rxq != NULL) {
                        igb_rx_queue_release_mbufs(rxq);
                        igb_reset_rx_queue(rxq);
@@ -1538,7 +1538,7 @@ igb_rss_disable(struct rte_eth_dev *dev)
        struct e1000_hw *hw;
        uint32_t mrqc;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        mrqc = E1000_READ_REG(hw, E1000_MRQC);
        mrqc &= ~E1000_MRQC_ENABLE_MASK;
        E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
@@ -1597,7 +1597,7 @@ eth_igb_rss_hash_update(struct rte_eth_dev *dev,
        uint32_t mrqc;
        uint64_t rss_hf;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Before changing anything, first check that the update RSS operation
@@ -1629,7 +1629,7 @@ int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
        uint64_t rss_hf;
        uint16_t i;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        hash_key = rss_conf->rss_key;
        if (hash_key != NULL) {
                /* Return RSS hash key */
@@ -1679,7 +1679,7 @@ igb_rss_configure(struct rte_eth_dev *dev)
        uint32_t shift;
        uint16_t i;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Fill in redirection table. */
        shift = (hw->mac.type == e1000_82575) ? 6 : 0;
@@ -1690,8 +1690,8 @@ igb_rss_configure(struct rte_eth_dev *dev)
                } reta;
                uint8_t q_idx;

-               q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
-                                  i % dev->data->nb_rx_queues : 0);
+               q_idx = (uint8_t) ((ETH_DATA(dev)->dd.nb_rx_queues > 1) ?
+                                  i % ETH_DATA(dev)->dd.nb_rx_queues : 0);
                reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
                if ((i & 3) == 3)
                        E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
@@ -1701,7 +1701,7 @@ igb_rss_configure(struct rte_eth_dev *dev)
         * Configure the RSS key and the RSS protocols used to compute
         * the RSS hash of input packets.
         */
-       rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+       rss_conf = ETH_DATA(dev)->dev_conf.rx_adv_conf.rss_conf;
        if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
                igb_rss_disable(dev);
                return;
@@ -1718,7 +1718,7 @@ igb_rss_configure(struct rte_eth_dev *dev)
 static int
 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
 {
-       const struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       const struct e1000_hw *hw = 
E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        switch (hw->mac.type) {
        case e1000_82576:
@@ -1756,8 +1756,8 @@ igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)

        PMD_INIT_FUNC_TRACE();

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       cfg = &ETH_DATA(dev)->dev_conf.rx_adv_conf.vmdq_rx_conf;

        /* Check if mac type can support VMDq, return value of 0 means NOT 
support */
        if (igb_is_vmdq_supported(dev) == 0)
@@ -1883,10 +1883,10 @@ static int
 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
        struct e1000_hw *hw =
-               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t mrqc;

-       if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
+       if (ETH_SRIOV(dev).active == ETH_8_POOLS) {
                /*
                 * SRIOV active scheme
                 * FIXME if support RSS together with VMDq & SRIOV
@@ -1895,11 +1895,11 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
                /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
                mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
                E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
-       } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
+       } else if(ETH_SRIOV(dev).active == 0) {
                /*
                 * SRIOV inactive scheme
                 */
-               switch (dev->data->dev_conf.rxmode.mq_mode) {
+               switch (ETH_DATA(dev)->dev_conf.rxmode.mq_mode) {
                        case ETH_MQ_RX_RSS:
                                igb_rss_configure(dev);
                                break;
@@ -1932,7 +1932,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        uint16_t i;
        int ret;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        srrctl = 0;

        /*
@@ -1945,7 +1945,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure support of jumbo frames, if any.
         */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1) {
                rctl |= E1000_RCTL_LPE;

                /*
@@ -1953,7 +1953,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                 * together with enabling/disabling dual VLAN.
                 */
                E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                                VLAN_TAG_SIZE);
        } else
                rctl &= ~E1000_RCTL_LPE;
@@ -1961,11 +1961,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
        dev->rx_pkt_burst = eth_igb_recv_pkts;
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                uint64_t bus_addr;
                uint32_t rxdctl;

-               rxq = dev->data->rx_queues[i];
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                /* Allocate buffers for descriptor rings and set up queue */
                ret = igb_alloc_rx_queue_mbufs(rxq);
@@ -1977,7 +1977,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                 *  call to configure
                 */
                rxq->crc_len =
-                       (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
+                       (uint8_t)(ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc ?
                                                        0 : ETHER_CRC_LEN);

                bus_addr = rxq->rx_ring_phys_addr;
@@ -2011,13 +2011,13 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                                               E1000_SRRCTL_BSIZEPKT_SHIFT);

                        /* It adds dual VLAN length for supporting dual VLAN */
-                       if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                       if ((ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                                2 * VLAN_TAG_SIZE) > buf_size){
-                               if (!dev->data->scattered_rx)
+                               if (!ETH_DATA(dev)->scattered_rx)
                                        PMD_INIT_LOG(DEBUG,
                                                     "forcing scatter mode");
                                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-                               dev->data->scattered_rx = 1;
+                               ETH_DATA(dev)->scattered_rx = 1;
                        }
                } else {
                        /*
@@ -2025,10 +2025,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                         */
                        if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
                                rctl_bsize = buf_size;
-                       if (!dev->data->scattered_rx)
+                       if (!ETH_DATA(dev)->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-                       dev->data->scattered_rx = 1;
+                       ETH_DATA(dev)->scattered_rx = 1;
                }

                /* Set if packets are dropped when no descriptors available */
@@ -2047,11 +2047,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
        }

-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
+       if (ETH_DATA(dev)->dev_conf.rxmode.enable_scatter) {
+               if (!ETH_DATA(dev)->scattered_rx)
                        PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-               dev->data->scattered_rx = 1;
+               ETH_DATA(dev)->scattered_rx = 1;
        }

        /*
@@ -2091,14 +2091,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        rxcsum |= E1000_RXCSUM_PCSD;

        /* Enable both L3/L4 rx checksum offload */
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_ip_checksum)
                rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
        else
                rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
        E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);

        /* Setup the Receive Control Register. */
-       if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc) {
                rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */

                /* set STRCRC bit in all queues */
@@ -2106,8 +2106,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                    hw->mac.type == e1000_i210 ||
                    hw->mac.type == e1000_i211 ||
                    hw->mac.type == e1000_i354) {
-                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                               rxq = dev->data->rx_queues[i];
+                       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+                               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                                uint32_t dvmolr = E1000_READ_REG(hw,
                                        E1000_DVMOLR(rxq->reg_idx));
                                dvmolr |= E1000_DVMOLR_STRCRC;
@@ -2122,8 +2122,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                    hw->mac.type == e1000_i210 ||
                    hw->mac.type == e1000_i211 ||
                    hw->mac.type == e1000_i354) {
-                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-                               rxq = dev->data->rx_queues[i];
+                       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+                               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                                uint32_t dvmolr = E1000_READ_REG(hw,
                                        E1000_DVMOLR(rxq->reg_idx));
                                dvmolr &= ~E1000_DVMOLR_STRCRC;
@@ -2138,7 +2138,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);

        /* Make sure VLAN Filters are off. */
-       if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
+       if (ETH_DATA(dev)->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
                rctl &= ~E1000_RCTL_VFE;
        /* Don't store bad packets. */
        rctl &= ~E1000_RCTL_SBP;
@@ -2150,8 +2150,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
         * Setup the HW Rx Head and Tail Descriptor Pointers.
         * This needs to be done after enable.
         */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
                E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 
1);
        }
@@ -2173,12 +2173,12 @@ eth_igb_tx_init(struct rte_eth_dev *dev)
        uint32_t txdctl;
        uint16_t i;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Setup the Base and Length of the Tx Descriptor Rings. */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
                uint64_t bus_addr;
-               txq = dev->data->tx_queues[i];
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                bus_addr = txq->tx_ring_phys_addr;

                E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
@@ -2230,21 +2230,21 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
        uint16_t i;
        int ret;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* setup MTU */
        e1000_rlpml_set_vf(hw,
-               (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               (uint16_t)(ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                VLAN_TAG_SIZE));

        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
        dev->rx_pkt_burst = eth_igb_recv_pkts;
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                uint64_t bus_addr;
                uint32_t rxdctl;

-               rxq = dev->data->rx_queues[i];
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                /* Allocate buffers for descriptor rings and set up queue */
                ret = igb_alloc_rx_queue_mbufs(rxq);
@@ -2282,13 +2282,13 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                                               E1000_SRRCTL_BSIZEPKT_SHIFT);

                        /* It adds dual VLAN length for supporting dual VLAN */
-                       if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                       if ((ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                                2 * VLAN_TAG_SIZE) > buf_size){
-                               if (!dev->data->scattered_rx)
+                               if (!ETH_DATA(dev)->scattered_rx)
                                        PMD_INIT_LOG(DEBUG,
                                                     "forcing scatter mode");
                                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-                               dev->data->scattered_rx = 1;
+                               ETH_DATA(dev)->scattered_rx = 1;
                        }
                } else {
                        /*
@@ -2296,10 +2296,10 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                         */
                        if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
                                rctl_bsize = buf_size;
-                       if (!dev->data->scattered_rx)
+                       if (!ETH_DATA(dev)->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-                       dev->data->scattered_rx = 1;
+                       ETH_DATA(dev)->scattered_rx = 1;
                }

                /* Set if packets are dropped when no descriptors available */
@@ -2328,19 +2328,19 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
        }

-       if (dev->data->dev_conf.rxmode.enable_scatter) {
-               if (!dev->data->scattered_rx)
+       if (ETH_DATA(dev)->dev_conf.rxmode.enable_scatter) {
+               if (!ETH_DATA(dev)->scattered_rx)
                        PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
-               dev->data->scattered_rx = 1;
+               ETH_DATA(dev)->scattered_rx = 1;
        }

        /*
         * Setup the HW Rx Head and Tail Descriptor Pointers.
         * This needs to be done after enable.
         */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                E1000_WRITE_REG(hw, E1000_RDH(i), 0);
                E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
        }
@@ -2361,13 +2361,13 @@ eth_igbvf_tx_init(struct rte_eth_dev *dev)
        uint32_t txdctl;
        uint16_t i;

-       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = E1000_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Setup the Base and Length of the Tx Descriptor Rings. */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
                uint64_t bus_addr;

-               txq = dev->data->tx_queues[i];
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                bus_addr = txq->tx_ring_phys_addr;
                E1000_WRITE_REG(hw, E1000_TDLEN(i),
                                txq->nb_tx_desc *
diff --git a/lib/librte_pmd_enic/enic.h b/lib/librte_pmd_enic/enic.h
index a50bff1..594592d 100644
--- a/lib/librte_pmd_enic/enic.h
+++ b/lib/librte_pmd_enic/enic.h
@@ -149,9 +149,9 @@ static inline unsigned int enic_msix_err_intr(__rte_unused 
struct enic *enic)
        return 0;
 }

-static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
+static inline struct enic *pmd_priv(struct rte_eth_dev *dev)
 {
-       return (struct enic *)eth_dev->data->dev_private;
+       return (struct enic *)(ETH_DATA(dev)->dd.dev_private);
 }

 extern int enic_fdir_add_fltr(struct enic *enic,
diff --git a/lib/librte_pmd_enic/enic_ethdev.c 
b/lib/librte_pmd_enic/enic_ethdev.c
index 4950ede..dc770fe 100644
--- a/lib/librte_pmd_enic/enic_ethdev.c
+++ b/lib/librte_pmd_enic/enic_ethdev.c
@@ -65,30 +65,30 @@ RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, 
PCI_DEVICE_ID_CISCO_VIC_ENET_VF)
 {.vendor_id = 0, /* Sentinal */},
 };

-static int enicpmd_fdir_remove_perfect_filter(struct rte_eth_dev *eth_dev,
+static int enicpmd_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
                struct rte_fdir_filter *fdir_filter,
                __rte_unused uint16_t soft_id)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        return enic_fdir_del_fltr(enic, fdir_filter);
 }

-static int enicpmd_fdir_add_perfect_filter(struct rte_eth_dev *eth_dev,
+static int enicpmd_fdir_add_perfect_filter(struct rte_eth_dev *dev,
        struct rte_fdir_filter *fdir_filter, __rte_unused uint16_t soft_id,
        uint8_t queue, uint8_t drop)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        return enic_fdir_add_fltr(enic, fdir_filter, (uint16_t)queue, drop);
 }

-static void enicpmd_fdir_info_get(struct rte_eth_dev *eth_dev,
+static void enicpmd_fdir_info_get(struct rte_eth_dev *dev,
        struct rte_eth_fdir *fdir)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        *fdir = enic->fdir.stats;
@@ -130,17 +130,17 @@ static int enicpmd_dev_setup_intr(struct enic *enic)
        return ret;
 }

-static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *dev,
        uint16_t queue_idx,
        uint16_t nb_desc,
        unsigned int socket_id,
        __rte_unused const struct rte_eth_txconf *tx_conf)
 {
        int ret;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
-       eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+       ETH_DATA(dev)->dd.tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];

        ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
        if (ret) {
@@ -151,10 +151,10 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev 
*eth_dev,
        return enicpmd_dev_setup_intr(enic);
 }

-static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *dev,
        uint16_t queue_idx)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();

@@ -163,11 +163,11 @@ static int enicpmd_dev_tx_queue_start(struct rte_eth_dev 
*eth_dev,
        return 0;
 }

-static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *dev,
        uint16_t queue_idx)
 {
        int ret;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();

@@ -178,10 +178,10 @@ static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev 
*eth_dev,
        return ret;
 }

-static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *dev,
        uint16_t queue_idx)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();

@@ -190,11 +190,11 @@ static int enicpmd_dev_rx_queue_start(struct rte_eth_dev 
*eth_dev,
        return 0;
 }

-static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *dev,
        uint16_t queue_idx)
 {
        int ret;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();

@@ -211,7 +211,7 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
        enic_free_rq(rxq);
 }

-static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *dev,
        uint16_t queue_idx,
        uint16_t nb_desc,
        unsigned int socket_id,
@@ -219,10 +219,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev 
*eth_dev,
        struct rte_mempool *mp)
 {
        int ret;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
-       eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];
+       ETH_DATA(dev)->dd.rx_queues[queue_idx] = (void *)&enic->rq[queue_idx];

        ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc);
        if (ret) {
@@ -233,10 +233,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev 
*eth_dev,
        return enicpmd_dev_setup_intr(enic);
 }

-static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
+static int enicpmd_vlan_filter_set(struct rte_eth_dev *dev,
        uint16_t vlan_id, int on)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        if (on)
@@ -246,14 +246,14 @@ static int enicpmd_vlan_filter_set(struct rte_eth_dev 
*eth_dev,
        return 0;
 }

-static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+static void enicpmd_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();

        if (mask & ETH_VLAN_STRIP_MASK) {
-               if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                        enic->ig_vlan_strip_en = 1;
                else
                        enic->ig_vlan_strip_en = 0;
@@ -272,10 +272,10 @@ static void enicpmd_vlan_offload_set(struct rte_eth_dev 
*eth_dev, int mask)
        }
 }

-static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_configure(struct rte_eth_dev *dev)
 {
        int ret;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        ret = enic_set_vnic_res(enic);
@@ -284,23 +284,23 @@ static int enicpmd_dev_configure(struct rte_eth_dev 
*eth_dev)
                return ret;
        }

-       if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
-               eth_dev->data->dev_conf.rxmode.header_split) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.split_hdr_size &&
+               ETH_DATA(dev)->dev_conf.rxmode.header_split) {
                /* Enable header-data-split */
                enic_set_hdr_split_size(enic,
-                       eth_dev->data->dev_conf.rxmode.split_hdr_size);
+                       ETH_DATA(dev)->dev_conf.rxmode.split_hdr_size);
        }

-       enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
+       enic->hw_ip_checksum = ETH_DATA(dev)->dev_conf.rxmode.hw_ip_checksum;
        return 0;
 }

 /* Start the device.
  * It returns 0 on success.
  */
-static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_start(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        return enic_enable(enic);
@@ -309,34 +309,34 @@ static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
 /*
  * Stop device: disable rx and tx functions to allow for reconfiguring.
  */
-static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic_disable(enic);
        memset(&link, 0, sizeof(link));
-       rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
-               *(uint64_t *)&eth_dev->data->dev_link,
+       rte_atomic64_cmpset((uint64_t *)&ETH_DATA(dev)->dev_link,
+               *(uint64_t *)&ETH_DATA(dev)->dev_link,
                *(uint64_t *)&link);
 }

 /*
  * Stop device.
  */
-static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_close(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic_remove(enic);
 }

-static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_link_update(struct rte_eth_dev *dev,
        __rte_unused int wait_to_complete)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);
        int ret;
        int link_status = 0;

@@ -344,33 +344,33 @@ static int enicpmd_dev_link_update(struct rte_eth_dev 
*eth_dev,
        link_status = enic_get_link_status(enic);
        ret = (link_status == enic->link_status);
        enic->link_status = link_status;
-       eth_dev->data->dev_link.link_status = link_status;
-       eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+       ETH_DATA(dev)->dev_link.link_status = link_status;
+       ETH_DATA(dev)->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       ETH_DATA(dev)->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
        return ret;
 }

-static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
+static void enicpmd_dev_stats_get(struct rte_eth_dev *dev,
        struct rte_eth_stats *stats)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic_dev_stats_get(enic, stats);
 }

-static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_stats_reset(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic_dev_stats_clear(enic);
 }

-static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
+static void enicpmd_dev_info_get(struct rte_eth_dev *dev,
        struct rte_eth_dev_info *device_info)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        device_info->max_rx_queues = enic->rq_count;
@@ -390,55 +390,55 @@ static void enicpmd_dev_info_get(struct rte_eth_dev 
*eth_dev,
                DEV_TX_OFFLOAD_TCP_CKSUM;
 }

-static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic->promisc = 1;
        enic_add_packet_filter(enic);
 }

-static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic->promisc = 0;
        enic_add_packet_filter(enic);
 }

-static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic->allmulti = 1;
        enic_add_packet_filter(enic);
 }

-static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic->allmulti = 0;
        enic_add_packet_filter(enic);
 }

-static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
+static void enicpmd_add_mac_addr(struct rte_eth_dev *dev,
        struct ether_addr *mac_addr,
        __rte_unused uint32_t index, __rte_unused uint32_t pool)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic_set_mac_address(enic, mac_addr->addr_bytes);
 }

-static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused 
uint32_t index)
+static void enicpmd_remove_mac_addr(struct rte_eth_dev *dev, __rte_unused 
uint32_t index)
 {
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();
        enic_del_mac_address(enic);
@@ -555,20 +555,20 @@ struct enic *enicpmd_list_head = NULL;
 /* Initialize the driver
  * It returns 0 on success.
  */
-static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
+static int eth_enicpmd_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pdev;
        struct rte_pci_addr *addr;
-       struct enic *enic = pmd_priv(eth_dev);
+       struct enic *enic = pmd_priv(dev);

        ENICPMD_FUNC_TRACE();

-       enic->rte_dev = eth_dev;
-       eth_dev->dev_ops = &enicpmd_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
-       eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
+       enic->rte_dev = dev;
+       dev->dev_ops = &enicpmd_eth_dev_ops;
+       dev->rx_pkt_burst = &enicpmd_recv_pkts;
+       dev->tx_pkt_burst = &enicpmd_xmit_pkts;

-       pdev = eth_dev->pci_dev;
+       pdev = dev->pci_dev;
        enic->pdev = pdev;
        addr = &pdev->addr;

@@ -584,7 +584,7 @@ static struct eth_driver rte_enic_pmd = {
                .id_table = pci_id_enic_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
-       .eth_dev_init = eth_enicpmd_dev_init,
+       .dev_init = eth_enicpmd_dev_init,
        .dev_private_size = sizeof(struct enic),
 };

diff --git a/lib/librte_pmd_enic/enic_main.c b/lib/librte_pmd_enic/enic_main.c
index 0892b3e..551517c 100644
--- a/lib/librte_pmd_enic/enic_main.c
+++ b/lib/librte_pmd_enic/enic_main.c
@@ -574,10 +574,10 @@ enic_intr_handler(__rte_unused struct rte_intr_handle 
*handle,
 int enic_enable(struct enic *enic)
 {
        unsigned int index;
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct rte_eth_dev *dev = enic->rte_dev;

-       eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
-       eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       ETH_DATA(dev)->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+       ETH_DATA(dev)->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
        vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */

        if (enic_clsf_init(enic))
@@ -981,25 +981,25 @@ int enic_get_link_status(struct enic *enic)

 static void enic_dev_deinit(struct enic *enic)
 {
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct rte_eth_dev *dev = enic->rte_dev;

-       if (eth_dev->data->mac_addrs)
-               rte_free(eth_dev->data->mac_addrs);
+       if (ETH_DATA(dev)->mac_addrs)
+               rte_free(ETH_DATA(dev)->mac_addrs);
 }


 int enic_set_vnic_res(struct enic *enic)
 {
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct rte_eth_dev *dev = enic->rte_dev;

-       if ((enic->rq_count < eth_dev->data->nb_rx_queues) ||
-               (enic->wq_count < eth_dev->data->nb_tx_queues)) {
+       if ((enic->rq_count < ETH_DATA(dev)->dd.nb_rx_queues) ||
+               (enic->wq_count < ETH_DATA(dev)->dd.nb_tx_queues)) {
                dev_err(dev, "Not enough resources configured, aborting\n");
                return -1;
        }

-       enic->rq_count = eth_dev->data->nb_rx_queues;
-       enic->wq_count = eth_dev->data->nb_tx_queues;
+       enic->rq_count = ETH_DATA(dev)->dd.nb_rx_queues;
+       enic->wq_count = ETH_DATA(dev)->dd.nb_tx_queues;
        if (enic->cq_count < (enic->rq_count + enic->wq_count)) {
                dev_err(dev, "Not enough resources configured, aborting\n");
                return -1;
@@ -1012,7 +1012,7 @@ int enic_set_vnic_res(struct enic *enic)
 static int enic_dev_init(struct enic *enic)
 {
        int err;
-       struct rte_eth_dev *eth_dev = enic->rte_dev;
+       struct rte_eth_dev *dev = enic->rte_dev;

        vnic_dev_intr_coal_timer_info_default(enic->vdev);

@@ -1024,13 +1024,13 @@ static int enic_dev_init(struct enic *enic)
                return err;
        }

-       eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
-       if (!eth_dev->data->mac_addrs) {
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+       if (!ETH_DATA(dev)->mac_addrs) {
                dev_err(enic, "mac addr storage alloc failed, aborting.\n");
                return -1;
        }
        ether_addr_copy((struct ether_addr *) enic->mac_addr,
-               &eth_dev->data->mac_addrs[0]);
+               &ETH_DATA(dev)->mac_addrs[0]);


        /* Get available resource counts
diff --git a/lib/librte_pmd_fm10k/fm10k_ethdev.c 
b/lib/librte_pmd_fm10k/fm10k_ethdev.c
index 0c7a80c..e600ce8 100644
--- a/lib/librte_pmd_fm10k/fm10k_ethdev.c
+++ b/lib/librte_pmd_fm10k/fm10k_ethdev.c
@@ -272,7 +272,7 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 {
        PMD_INIT_FUNC_TRACE();

-       if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc == 0)
                PMD_INIT_LOG(WARNING, "fm10k always strip CRC");

        return 0;
@@ -281,8 +281,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
 static void
 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct rte_eth_conf *dev_conf = &ETH_DATA(dev)->dev_conf;
        uint32_t mrqc, *key, i, reta, j;
        uint64_t hf;

@@ -295,7 +295,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
                0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
        };

-       if (dev->data->nb_rx_queues == 1 ||
+       if (ETH_DATA(dev)->dd.nb_rx_queues == 1 ||
            dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
            dev_conf->rx_adv_conf.rss_conf.rss_hf == 0)
                return;
@@ -317,7 +317,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
         */
        reta = 0;
        for (i = 0, j = 0; i < FM10K_RETA_SIZE; i++, j++) {
-               if (j == dev->data->nb_rx_queues)
+               if (j == ETH_DATA(dev)->dd.nb_rx_queues)
                        j = 0;
                reta = (reta << CHAR_BIT) | j;
                if ((i & 3) == 3)
@@ -353,7 +353,7 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
 static int
 fm10k_dev_tx_init(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int i, ret;
        struct fm10k_tx_queue *txq;
        uint64_t base_addr;
@@ -365,8 +365,8 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev)
                                3 << FM10K_TXINT_TIMER_SHIFT);

        /* Setup TX queue */
-       for (i = 0; i < dev->data->nb_tx_queues; ++i) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; ++i) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                base_addr = txq->hw_ring_phys_addr;
                size = txq->nb_desc * sizeof(struct fm10k_tx_desc);

@@ -390,7 +390,7 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev)
 static int
 fm10k_dev_rx_init(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int i, ret;
        struct fm10k_rx_queue *rxq;
        uint64_t base_addr;
@@ -405,8 +405,8 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
                                3 << FM10K_RXINT_TIMER_SHIFT);

        /* Setup RX queues */
-       for (i = 0; i < dev->data->nb_rx_queues; ++i) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; ++i) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                base_addr = rxq->hw_ring_phys_addr;
                size = rxq->nb_desc * sizeof(union fm10k_rx_desc);

@@ -432,9 +432,9 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
                                buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT);

                /* It adds dual VLAN length for supporting dual VLAN */
-               if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               if ((ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                2 * FM10K_VLAN_TAG_SIZE) > buf_size){
-                       dev->data->scattered_rx = 1;
+                       ETH_DATA(dev)->scattered_rx = 1;
                        dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
                }

@@ -446,9 +446,9 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
                FM10K_WRITE_FLUSH(hw);
        }

-       if (dev->data->dev_conf.rxmode.enable_scatter) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.enable_scatter) {
                dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
-               dev->data->scattered_rx = 1;
+               ETH_DATA(dev)->scattered_rx = 1;
        }

        /* Configure RSS if applicable */
@@ -459,15 +459,15 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
 static int
 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int err = -1;
        uint32_t reg;
        struct fm10k_rx_queue *rxq;

        PMD_INIT_FUNC_TRACE();

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];
                err = rx_queue_reset(rxq);
                if (err == -ENOMEM) {
                        PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
@@ -510,16 +510,16 @@ fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
 static int
 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

-       if (rx_queue_id < dev->data->nb_rx_queues) {
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
                /* Disable RX queue */
                rx_queue_disable(hw, rx_queue_id);

                /* Free mbuf and clean HW ring */
-               rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+               rx_queue_clean(ETH_DATA(dev)->dd.rx_queues[rx_queue_id]);
        }

        return 0;
@@ -528,7 +528,7 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
 static int
 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        /** @todo - this should be defined in the shared code */
 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY      0x00010000
        uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
@@ -536,8 +536,8 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)

        PMD_INIT_FUNC_TRACE();

-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               tx_queue_reset(dev->data->tx_queues[tx_queue_id]);
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
+               tx_queue_reset(ETH_DATA(dev)->dd.tx_queues[tx_queue_id]);

                /* reset head and tail pointers */
                FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
@@ -556,13 +556,13 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
 static int
 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

-       if (tx_queue_id < dev->data->nb_tx_queues) {
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
                tx_queue_disable(hw, tx_queue_id);
-               tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+               tx_queue_clean(ETH_DATA(dev)->dd.tx_queues[tx_queue_id]);
        }

        return 0;
@@ -576,7 +576,7 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 static int
 fm10k_dev_start(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int i, diag;

        PMD_INIT_FUNC_TRACE();
@@ -618,7 +618,7 @@ fm10k_dev_start(struct rte_eth_dev *dev)

                /* Configure RSS bits used in RETA table */
                FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0),
-                               fls(dev->data->nb_rx_queues - 1) <<
+                               fls(ETH_DATA(dev)->dd.nb_rx_queues - 1) <<
                                FM10K_DGLORTDEC_RSSLENGTH_SHIFT);

                /* Invalidate all other GLORT entries */
@@ -627,9 +627,9 @@ fm10k_dev_start(struct rte_eth_dev *dev)
                                        FM10K_DGLORTMAP_NONE);
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                struct fm10k_rx_queue *rxq;
-               rxq = dev->data->rx_queues[i];
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                if (rxq->rx_deferred_start)
                        continue;
@@ -637,22 +637,22 @@ fm10k_dev_start(struct rte_eth_dev *dev)
                if (diag != 0) {
                        int j;
                        for (j = 0; j < i; ++j)
-                               rx_queue_clean(dev->data->rx_queues[j]);
+                               rx_queue_clean(ETH_DATA(dev)->dd.rx_queues[j]);
                        return diag;
                }
        }

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
                struct fm10k_tx_queue *txq;
-               txq = dev->data->tx_queues[i];
+               txq = ETH_DATA(dev)->dd.tx_queues[i];

                if (txq->tx_deferred_start)
                        continue;
                diag = fm10k_dev_tx_queue_start(dev, i);
                if (diag != 0) {
                        int j;
-                       for (j = 0; j < dev->data->nb_rx_queues; ++j)
-                               rx_queue_clean(dev->data->rx_queues[j]);
+                       for (j = 0; j < ETH_DATA(dev)->dd.nb_rx_queues; ++j)
+                               rx_queue_clean(ETH_DATA(dev)->dd.rx_queues[j]);
                        return diag;
                }
        }
@@ -667,17 +667,17 @@ fm10k_dev_stop(struct rte_eth_dev *dev)

        PMD_INIT_FUNC_TRACE();

-       for (i = 0; i < dev->data->nb_tx_queues; i++)
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++)
                fm10k_dev_tx_queue_stop(dev, i);

-       for (i = 0; i < dev->data->nb_rx_queues; i++)
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++)
                fm10k_dev_rx_queue_stop(dev, i);
 }

 static void
 fm10k_dev_close(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -696,9 +696,9 @@ fm10k_link_update(struct rte_eth_dev *dev,
        /* The host-interface link is always up.  The speed is ~50Gbps per Gen3
         * x8 PCIe interface. For now, we leave the speed undefined since there
         * is no 50Gbps Ethernet. */
-       dev->data->dev_link.link_speed  = 0;
-       dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       dev->data->dev_link.link_status = 1;
+       ETH_DATA(dev)->dev_link.link_speed  = 0;
+       ETH_DATA(dev)->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+       ETH_DATA(dev)->dev_link.link_status = 1;

        return 0;
 }
@@ -708,9 +708,9 @@ fm10k_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 {
        uint64_t ipackets, opackets, ibytes, obytes;
        struct fm10k_hw *hw =
-               FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct fm10k_hw_stats *hw_stats =
-               FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+               FM10K_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);
        int i;

        PMD_INIT_FUNC_TRACE();
@@ -738,9 +738,9 @@ fm10k_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static void
 fm10k_stats_reset(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct fm10k_hw_stats *hw_stats =
-               FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+               FM10K_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -752,7 +752,7 @@ static void
 fm10k_dev_infos_get(struct rte_eth_dev *dev,
        struct rte_eth_dev_info *dev_info)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -798,7 +798,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
 static int
 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -929,7 +929,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
        uint16_t nb_desc, unsigned int socket_id,
        const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct fm10k_rx_queue *q;
        const struct rte_memzone *mz;

@@ -958,9 +958,9 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
         * queue cannot be reused in case we need to allocate memory on
         * different socket than was previously used.
         */
-       if (dev->data->rx_queues[queue_id] != NULL) {
-               rx_queue_free(dev->data->rx_queues[queue_id]);
-               dev->data->rx_queues[queue_id] = NULL;
+       if (ETH_DATA(dev)->dd.rx_queues[queue_id] != NULL) {
+               rx_queue_free(ETH_DATA(dev)->dd.rx_queues[queue_id]);
+               ETH_DATA(dev)->dd.rx_queues[queue_id] = NULL;
        }

        /* allocate memory for the queue structure */
@@ -974,7 +974,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
        /* setup queue */
        q->mp = mp;
        q->nb_desc = nb_desc;
-       q->port_id = dev->data->port_id;
+       q->port_id = ETH_DATA(dev)->dd.port_id;
        q->queue_id = queue_id;
        q->tail_ptr = (volatile uint32_t *)
                &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
@@ -997,7 +997,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
         * resizing in later calls to the queue setup function.
         */
        mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
-                               dev->data->port_id, queue_id, socket_id,
+                               ETH_DATA(dev)->dd.port_id, queue_id, socket_id,
                                FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
        if (mz == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
@@ -1008,7 +1008,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
        q->hw_ring = mz->addr;
        q->hw_ring_phys_addr = mz->phys_addr;

-       dev->data->rx_queues[queue_id] = q;
+       ETH_DATA(dev)->dd.rx_queues[queue_id] = q;
        return 0;
 }

@@ -1082,7 +1082,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
        uint16_t nb_desc, unsigned int socket_id,
        const struct rte_eth_txconf *conf)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct fm10k_tx_queue *q;
        const struct rte_memzone *mz;

@@ -1105,9 +1105,9 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
         * queue cannot be reused in case we need to allocate memory on
         * different socket than was previously used.
         */
-       if (dev->data->tx_queues[queue_id] != NULL) {
-               tx_queue_free(dev->data->tx_queues[queue_id]);
-               dev->data->tx_queues[queue_id] = NULL;
+       if (ETH_DATA(dev)->dd.tx_queues[queue_id] != NULL) {
+               tx_queue_free(ETH_DATA(dev)->dd.tx_queues[queue_id]);
+               ETH_DATA(dev)->dd.tx_queues[queue_id] = NULL;
        }

        /* allocate memory for the queue structure */
@@ -1120,7 +1120,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,

        /* setup queue */
        q->nb_desc = nb_desc;
-       q->port_id = dev->data->port_id;
+       q->port_id = ETH_DATA(dev)->dd.port_id;
        q->queue_id = queue_id;
        q->tail_ptr = (volatile uint32_t *)
                &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
@@ -1143,7 +1143,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
         * resizing in later calls to the queue setup function.
         */
        mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
-                               dev->data->port_id, queue_id, socket_id,
+                               ETH_DATA(dev)->dd.port_id, queue_id, socket_id,
                                FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
        if (mz == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
@@ -1169,7 +1169,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_id,
                return (-ENOMEM);
        }

-       dev->data->tx_queues[queue_id] = q;
+       ETH_DATA(dev)->dd.tx_queues[queue_id] = q;
        return 0;
 }

@@ -1186,7 +1186,7 @@ fm10k_reta_update(struct rte_eth_dev *dev,
                        struct rte_eth_rss_reta_entry64 *reta_conf,
                        uint16_t reta_size)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint16_t i, j, idx, shift;
        uint8_t mask;
        uint32_t reta;
@@ -1235,7 +1235,7 @@ fm10k_reta_query(struct rte_eth_dev *dev,
                        struct rte_eth_rss_reta_entry64 *reta_conf,
                        uint16_t reta_size)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint16_t i, j, idx, shift;
        uint8_t mask;
        uint32_t reta;
@@ -1276,7 +1276,7 @@ static int
 fm10k_rss_hash_update(struct rte_eth_dev *dev,
        struct rte_eth_rss_conf *rss_conf)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t *key = (uint32_t *)rss_conf->rss_key;
        uint32_t mrqc;
        uint64_t hf = rss_conf->rss_hf;
@@ -1319,7 +1319,7 @@ static int
 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
        struct rte_eth_rss_conf *rss_conf)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t *key = (uint32_t *)rss_conf->rss_key;
        uint32_t mrqc;
        uint64_t hf;
@@ -1355,7 +1355,7 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
 static void
 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;

        /* Bind all local non-queue interrupt to vector 0 */
@@ -1387,7 +1387,7 @@ fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
 static void
 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;

        /* Bind all local non-queue interrupt to vector 0 */
@@ -1518,7 +1518,7 @@ fm10k_dev_interrupt_handler_pf(
                        void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t cause, status;

        if (hw->mac.type != fm10k_mac_pf)
@@ -1585,7 +1585,7 @@ fm10k_dev_interrupt_handler_vf(
                        void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (hw->mac.type != fm10k_mac_vf)
                return;
@@ -1678,7 +1678,7 @@ static struct eth_dev_ops fm10k_eth_dev_ops = {
 static int
 eth_fm10k_dev_init(struct rte_eth_dev *dev)
 {
-       struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct fm10k_hw *hw = 
FM10K_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int diag;

        PMD_INIT_FUNC_TRACE();
@@ -1687,7 +1687,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        dev->rx_pkt_burst = &fm10k_recv_pkts;
        dev->tx_pkt_burst = &fm10k_xmit_pkts;

-       if (dev->data->scattered_rx)
+       if (ETH_DATA(dev)->scattered_rx)
                dev->rx_pkt_burst = &fm10k_recv_scattered_pkts;

        /* only initialize in the primary process */
@@ -1709,7 +1709,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        }

        /* Store fm10k_adapter pointer */
-       hw->back = dev->data->dev_private;
+       hw->back = ETH_DATA(dev)->dd.dev_private;

        /* Initialize the shared code */
        diag = fm10k_init_shared_code(hw);
@@ -1739,8 +1739,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        }

        /* Initialize MAC address(es) */
-       dev->data->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
-       if (dev->data->mac_addrs == NULL) {
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("fm10k", ETHER_ADDR_LEN, 0);
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
                return -ENOMEM;
        }
@@ -1762,7 +1762,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
        }

        ether_addr_copy((const struct ether_addr *)hw->mac.addr,
-                       &dev->data->mac_addrs[0]);
+                       &ETH_DATA(dev)->mac_addrs[0]);

        /* Reset the hw statistics */
        fm10k_stats_reset(dev);
@@ -1848,7 +1848,7 @@ static struct eth_driver rte_pmd_fm10k = {
                .id_table = pci_id_fm10k_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
-       .eth_dev_init = eth_fm10k_dev_init,
+       .dev_init = eth_fm10k_dev_init,
        .dev_private_size = sizeof(struct fm10k_adapter),
 };

diff --git a/lib/librte_pmd_fm10k/fm10k_rxtx.c 
b/lib/librte_pmd_fm10k/fm10k_rxtx.c
index 83bddfc..54ac471 100644
--- a/lib/librte_pmd_fm10k/fm10k_rxtx.c
+++ b/lib/librte_pmd_fm10k/fm10k_rxtx.c
@@ -168,7 +168,7 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                         */
                        q->next_dd = (q->next_dd + q->nb_desc - count) %
                                                                q->nb_desc;
-                       rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[port])->dd.rx_mbuf_alloc_failed++;
                        return 0;
                }

@@ -310,7 +310,7 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                         */
                        q->next_dd = (q->next_dd + q->nb_desc - count) %
                                                                q->nb_desc;
-                       rte_eth_devices[port].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[port])->dd.rx_mbuf_alloc_failed++;
                        return 0;
                }

diff --git a/lib/librte_pmd_i40e/i40e_ethdev.c 
b/lib/librte_pmd_i40e/i40e_ethdev.c
index 6b8f96e..4d6ad84 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev.c
@@ -106,7 +106,7 @@
        (1UL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
        (1UL << RTE_ETH_FLOW_L2_PAYLOAD))

-static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_init(struct rte_eth_dev *dev);
 static int i40e_dev_configure(struct rte_eth_dev *dev);
 static int i40e_dev_start(struct rte_eth_dev *dev);
 static void i40e_dev_stop(struct rte_eth_dev *dev);
@@ -270,7 +270,7 @@ static struct eth_driver rte_i40e_pmd = {
                .id_table = pci_id_i40e_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
-       .eth_dev_init = eth_i40e_dev_init,
+       .dev_init = eth_i40e_dev_init,
        .dev_private_size = sizeof(struct i40e_adapter),
 };

@@ -287,7 +287,7 @@ rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev 
*dev,
                                     struct rte_eth_link *link)
 {
        struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
+       struct rte_eth_link *src = &(ETH_DATA(dev)->dev_link);

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
                                        *(uint64_t *)src) == 0)
@@ -300,7 +300,7 @@ static inline int
 rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                                      struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -365,8 +365,8 @@ static int
 eth_i40e_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi;
        int ret;
        uint32_t len;
@@ -382,12 +382,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (dev->data->scattered_rx)
+               if (ETH_DATA(dev)->scattered_rx)
                        dev->rx_pkt_burst = i40e_recv_scattered_pkts;
                return 0;
        }
        pci_dev = dev->pci_dev;
-       pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       pf->adapter = 
I40E_DEV_PRIVATE_TO_ADAPTER(ETH_DATA(dev)->dd.dev_private);
        pf->adapter->eth_dev = dev;
        pf->dev_data = dev->data;

@@ -540,14 +540,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
                len = ETHER_ADDR_LEN * vsi->max_macaddrs;

        /* Should be after VSI initialized */
-       dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
-       if (!dev->data->mac_addrs) {
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("i40e", len, 0);
+       if (!ETH_DATA(dev)->mac_addrs) {
                PMD_INIT_LOG(ERR, "Failed to allocated memory "
                                        "for storing mac address");
                goto err_mac_alloc;
        }
        ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
-                                       &dev->data->mac_addrs[0]);
+                                       &ETH_DATA(dev)->mac_addrs[0]);

        /* initialize pf host driver to setup SRIOV resource if applicable */
        i40e_pf_host_init(dev);
@@ -586,11 +586,11 @@ err_get_capabilities:
 static int
 i40e_dev_configure(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       enum rte_eth_rx_mq_mode mq_mode = dev->data->dev_conf.rxmode.mq_mode;
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       enum rte_eth_rx_mq_mode mq_mode = 
ETH_DATA(dev)->dev_conf.rxmode.mq_mode;
        int ret;

-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) {
                ret = i40e_fdir_setup(pf);
                if (ret != I40E_SUCCESS) {
                        PMD_DRV_LOG(ERR, "Failed to setup flow director.");
@@ -833,8 +833,8 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 {
        uint8_t speed;
        uint8_t abilities = 0;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct rte_eth_conf *conf = &dev->data->dev_conf;
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct rte_eth_conf *conf = &ETH_DATA(dev)->dev_conf;

        speed = i40e_parse_link_speed(conf->link_speed);
        abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
@@ -849,16 +849,16 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
 static int
 i40e_dev_start(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        int ret, i;

-       if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
-               (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+       if ((ETH_DATA(dev)->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+               (ETH_DATA(dev)->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
                PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
-                            dev->data->dev_conf.link_duplex,
-                            dev->data->port_id);
+                            ETH_DATA(dev)->dev_conf.link_duplex,
+                            ETH_DATA(dev)->dd.port_id);
                return -EINVAL;
        }

@@ -923,7 +923,7 @@ err_up:
 static void
 i40e_dev_stop(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *main_vsi = pf->main_vsi;
        int i;

@@ -954,8 +954,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
 static void
 i40e_dev_close(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;

        PMD_INIT_FUNC_TRACE();
@@ -990,8 +990,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
 static void
 i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
        int status;

@@ -1010,8 +1010,8 @@ i40e_dev_promiscuous_enable(struct rte_eth_dev *dev)
 static void
 i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
        int status;

@@ -1029,8 +1029,8 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
 static void
 i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
        int ret;

@@ -1042,12 +1042,12 @@ i40e_dev_allmulticast_enable(struct rte_eth_dev *dev)
 static void
 i40e_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
        int ret;

-       if (dev->data->promiscuous == 1)
+       if (ETH_DATA(dev)->promiscuous == 1)
                return; /* must remain in all_multicast mode */

        ret = i40e_aq_set_vsi_multicast_promiscuous(hw,
@@ -1074,7 +1074,7 @@ i40e_dev_set_link_down(__rte_unused struct rte_eth_dev 
*dev)
 {
        uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
        uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        return i40e_phy_conf_link(hw, abilities, speed);
 }
@@ -1085,7 +1085,7 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
 {
 #define CHECK_INTERVAL 100  /* 100ms */
 #define MAX_REPEAT_TIME 10  /* 1s (10 * 100ms) in total */
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_link_status link_status;
        struct rte_eth_link link, old;
        int status;
@@ -1218,8 +1218,8 @@ static void
 i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        uint32_t i;
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
        struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */

@@ -1497,7 +1497,7 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static void
 i40e_dev_stats_reset(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);

        /* It results in reloading the start point of each counter */
        pf->offset_loaded = false;
@@ -1517,7 +1517,7 @@ i40e_dev_queue_stats_mapping_set(__rte_unused struct 
rte_eth_dev *dev,
 static void
 i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;

        dev_info->max_rx_queues = vsi->nb_qps;
@@ -1578,7 +1578,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 static int
 i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
        PMD_INIT_FUNC_TRACE();

@@ -1598,19 +1598,19 @@ i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev,
 static void
 i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;

        if (mask & ETH_VLAN_STRIP_MASK) {
                /* Enable or disable VLAN stripping */
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                        i40e_vsi_config_vlan_stripping(vsi, TRUE);
                else
                        i40e_vsi_config_vlan_stripping(vsi, FALSE);
        }

        if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_extend)
                        i40e_vsi_config_double_vlan(vsi, TRUE);
                else
                        i40e_vsi_config_double_vlan(vsi, FALSE);
@@ -1628,7 +1628,7 @@ i40e_vlan_strip_queue_set(__rte_unused struct rte_eth_dev 
*dev,
 static int
 i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
        struct rte_eth_dev_data *data = I40E_VSI_TO_DEV_DATA(vsi);
        struct i40e_vsi_vlan_pvid_info info;
@@ -1650,7 +1650,7 @@ i40e_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t 
pvid, int on)
 static int
 i40e_dev_led_on(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t mode = i40e_led_get(hw);

        if (mode == 0)
@@ -1662,7 +1662,7 @@ i40e_dev_led_on(struct rte_eth_dev *dev)
 static int
 i40e_dev_led_off(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t mode = i40e_led_get(hw);

        if (mode != 0)
@@ -1696,7 +1696,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
                 __rte_unused uint32_t index,
                 uint32_t pool)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_mac_filter_info mac_filter;
        struct i40e_vsi *vsi;
        int ret;
@@ -1734,9 +1734,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
 static void
 i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_vsi *vsi;
-       struct rte_eth_dev_data *data = dev->data;
+       struct rte_eth_dev_data *data = ETH_DATA(dev);
        struct ether_addr *macaddr;
        int ret;
        uint32_t i;
@@ -1744,7 +1744,7 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t 
index)

        macaddr = &(data->mac_addrs[index]);

-       pool_sel = dev->data->mac_pool_sel[index];
+       pool_sel = ETH_DATA(dev)->mac_pool_sel[index];

        for (i = 0; i < sizeof(pool_sel) * CHAR_BIT; i++) {
                if (pool_sel & (1ULL << i)) {
@@ -1851,7 +1851,7 @@ static int
 i40e_mac_filter_handle(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
                void *arg)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_mac_filter *filter;
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        int ret = I40E_NOT_SUPPORTED;
@@ -1888,8 +1888,8 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
                         struct rte_eth_rss_reta_entry64 *reta_conf,
                         uint16_t reta_size)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t lut, l;
        uint16_t i, j, lut_size = pf->hash_lut_size;
        uint16_t idx, shift;
@@ -1932,8 +1932,8 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
                        struct rte_eth_rss_reta_entry64 *reta_conf,
                        uint16_t reta_size)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t lut;
        uint16_t i, j, lut_size = pf->hash_lut_size;
        uint16_t idx, shift;
@@ -2127,7 +2127,7 @@ i40e_get_cap(struct i40e_hw *hw)
 static int
 i40e_pf_parameter_init(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        uint16_t sum_queues = 0, sum_vsis, left_queues;

@@ -3238,7 +3238,7 @@ i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool 
on)
 static int
 i40e_dev_init_vlan(struct rte_eth_dev *dev)
 {
-       struct rte_eth_dev_data *data = dev->data;
+       struct rte_eth_dev_data *data = ETH_DATA(dev);
        int ret;

        /* Apply vlan offload setting */
@@ -3454,8 +3454,8 @@ i40e_dev_switch_tx_queues(struct i40e_pf *pf, bool on)
        uint16_t i;
        int ret;

-       for (i = 0; i < dev_data->nb_tx_queues; i++) {
-               txq = dev_data->tx_queues[i];
+       for (i = 0; i < dev_data->dd.nb_tx_queues; i++) {
+               txq = dev_data->dd.tx_queues[i];
                /* Don't operate the queue if not configured or
                 * if starting only per queue */
                if (!txq || !txq->q_set || (on && txq->tx_deferred_start))
@@ -3532,8 +3532,8 @@ i40e_dev_switch_rx_queues(struct i40e_pf *pf, bool on)
        uint16_t i;
        int ret;

-       for (i = 0; i < dev_data->nb_rx_queues; i++) {
-               rxq = dev_data->rx_queues[i];
+       for (i = 0; i < dev_data->dd.nb_rx_queues; i++) {
+               rxq = dev_data->dd.rx_queues[i];
                /* Don't operate the queue if not configured or
                 * if starting only per queue */
                if (!rxq || !rxq->q_set || (on && rxq->rx_deferred_start))
@@ -3585,8 +3585,8 @@ i40e_dev_tx_init(struct i40e_pf *pf)
        uint32_t ret = I40E_SUCCESS;
        struct i40e_tx_queue *txq;

-       for (i = 0; i < data->nb_tx_queues; i++) {
-               txq = data->tx_queues[i];
+       for (i = 0; i < data->dd.nb_tx_queues; i++) {
+               txq = data->dd.tx_queues[i];
                if (!txq || !txq->q_set)
                        continue;
                ret = i40e_tx_queue_init(txq);
@@ -3607,8 +3607,8 @@ i40e_dev_rx_init(struct i40e_pf *pf)
        struct i40e_rx_queue *rxq;

        i40e_pf_config_mq_rx(pf);
-       for (i = 0; i < data->nb_rx_queues; i++) {
-               rxq = data->rx_queues[i];
+       for (i = 0; i < data->dd.nb_rx_queues; i++) {
+               rxq = data->dd.rx_queues[i];
                if (!rxq || !rxq->q_set)
                        continue;

@@ -3645,8 +3645,8 @@ i40e_dev_rxtx_init(struct i40e_pf *pf)
 static int
 i40e_vmdq_setup(struct rte_eth_dev *dev)
 {
-       struct rte_eth_conf *conf = &dev->data->dev_conf;
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct rte_eth_conf *conf = &ETH_DATA(dev)->dev_conf;
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        int i, err, conf_vsis, j, loop;
        struct i40e_vsi *vsi;
        struct i40e_vmdq_info *vmdq_info;
@@ -3821,8 +3821,8 @@ i40e_pf_config_irq0(struct i40e_hw *hw)
 static void
 i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        int i;
        uint16_t abs_vf_id;
        uint32_t index, offset, val;
@@ -3860,7 +3860,7 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
 static void
 i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_arq_event_info info;
        uint16_t pending, opcode;
        int ret;
@@ -3912,7 +3912,7 @@ static void
 i40e_dev_interrupt_delayed_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t icr0;

        /* read interrupt causes again */
@@ -3947,7 +3947,7 @@ i40e_dev_interrupt_delayed_handler(void *param)

        /* handle the link up interrupt in an alarm callback */
        i40e_dev_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+       _rte_eth_dev_callback_process(dev, RTE_DEV_EVENT_INTR_LSC);

        i40e_pf_enable_irq0(hw);
        rte_intr_enable(&(dev->pci_dev->intr_handle));
@@ -3970,7 +3970,7 @@ i40e_dev_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
                           void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t icr0;

        /* Disable interrupt */
@@ -4031,7 +4031,7 @@ i40e_dev_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
                        return;
                else
                        _rte_eth_dev_callback_process(dev,
-                               RTE_ETH_EVENT_INTR_LSC);
+                               RTE_DEV_EVENT_INTR_LSC);
        }

 done:
@@ -4711,7 +4711,7 @@ static int
 i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
                         struct rte_eth_rss_conf *rss_conf)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
        uint64_t hena;

@@ -4733,7 +4733,7 @@ static int
 i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
                           struct rte_eth_rss_conf *rss_conf)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
        uint64_t hena;
        uint16_t i;
@@ -4952,7 +4952,7 @@ i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev,
                        struct rte_eth_udp_tunnel *udp_tunnel)
 {
        int ret = 0;
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);

        if (udp_tunnel == NULL)
                return -EINVAL;
@@ -4983,7 +4983,7 @@ i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev,
                        struct rte_eth_udp_tunnel *udp_tunnel)
 {
        int ret = 0;
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);

        if (udp_tunnel == NULL)
                return -EINVAL;
@@ -5016,7 +5016,7 @@ i40e_pf_calc_configured_queues_num(struct i40e_pf *pf)

        num = 0;
        for (i = 0; i < pf->lan_nb_qps; i++) {
-               rxq = data->rx_queues[i];
+               rxq = data->dd.rx_queues[i];
                if (rxq && rxq->q_set)
                        num++;
                else
@@ -5043,7 +5043,7 @@ i40e_pf_config_rss(struct i40e_pf *pf)
                num = i40e_pf_calc_configured_queues_num(pf);
                num = i40e_align_floor(num);
        } else
-               num = i40e_align_floor(pf->dev_data->nb_rx_queues);
+               num = i40e_align_floor(pf->dev_data->dd.nb_rx_queues);

        PMD_INIT_LOG(INFO, "Max of contiguous %u PF queues are configured",
                        num);
@@ -5092,7 +5092,7 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf,
                return -EINVAL;
        }

-       if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
+       if (filter->queue_id >= pf->dev_data->dd.nb_rx_queues) {
                PMD_DRV_LOG(ERR, "Invalid queue ID");
                return -EINVAL;
        }
@@ -5122,7 +5122,7 @@ i40e_tunnel_filter_handle(struct rte_eth_dev *dev, enum 
rte_filter_op filter_op,
                        void *arg)
 {
        struct rte_eth_tunnel_filter_conf *filter;
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        int ret = I40E_SUCCESS;

        filter = (struct rte_eth_tunnel_filter_conf *)(arg);
@@ -5400,7 +5400,7 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
                      enum rte_filter_op filter_op,
                      void *arg)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret = 0;

        switch (filter_op) {
@@ -5438,7 +5438,7 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
        uint16_t flags = 0;
        int ret;

-       if (filter->queue >= pf->dev_data->nb_rx_queues) {
+       if (filter->queue >= pf->dev_data->dd.nb_rx_queues) {
                PMD_DRV_LOG(ERR, "Invalid queue ID");
                return -EINVAL;
        }
@@ -5483,7 +5483,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
                                enum rte_filter_op filter_op,
                                void *arg)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        int ret = 0;

        if (filter_op == RTE_ETH_FILTER_NOP)
diff --git a/lib/librte_pmd_i40e/i40e_ethdev_vf.c 
b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
index c985e4a..e8ffcbc 100644
--- a/lib/librte_pmd_i40e/i40e_ethdev_vf.c
+++ b/lib/librte_pmd_i40e/i40e_ethdev_vf.c
@@ -264,8 +264,8 @@ i40evf_parse_pfmsg(struct i40e_vf *vf,
 static enum i40evf_aq_result
 i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_arq_event_info event;
        int ret;
        enum i40evf_aq_result result = I40EVF_MSG_NON;
@@ -344,8 +344,8 @@ _atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops 
ops)
 static int
 i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int err = -1;
        struct i40evf_arq_msg_info info;

@@ -386,7 +386,7 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
        struct i40e_virtchnl_version_info version, *pver;
        int err;
        struct vf_cmd_info args;
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);

        version.major = I40E_VIRTCHNL_VERSION_MAJOR;
        version.minor = I40E_VIRTCHNL_VERSION_MINOR;
@@ -425,8 +425,8 @@ i40evf_check_api_version(struct rte_eth_dev *dev)
 static int
 i40evf_get_vf_resource(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int err;
        struct vf_cmd_info args;
        uint32_t len;
@@ -459,7 +459,7 @@ i40evf_config_promisc(struct rte_eth_dev *dev,
                      bool enable_unicast,
                      bool enable_multicast)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int err;
        struct vf_cmd_info args;
        struct i40e_virtchnl_promisc_info promisc;
@@ -492,7 +492,7 @@ static int
 i40evf_config_vlan_offload(struct rte_eth_dev *dev,
                                bool enable_vlan_strip)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int err;
        struct vf_cmd_info args;
        struct i40e_virtchnl_vlan_offload_info offload;
@@ -517,7 +517,7 @@ static int
 i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
                                struct i40e_vsi_vlan_pvid_info *info)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int err;
        struct vf_cmd_info args;
        struct i40e_virtchnl_pvid_info tpid_info;
@@ -585,11 +585,11 @@ i40evf_fill_virtchnl_vsi_rxq_info(struct 
i40e_virtchnl_rxq_info *rxq_info,
 static int
 i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_rx_queue **rxq =
-               (struct i40e_rx_queue **)dev->data->rx_queues;
+               (struct i40e_rx_queue **)ETH_DATA(dev)->dd.rx_queues;
        struct i40e_tx_queue **txq =
-               (struct i40e_tx_queue **)dev->data->tx_queues;
+               (struct i40e_tx_queue **)ETH_DATA(dev)->dd.tx_queues;
        struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
        struct i40e_virtchnl_queue_pair_info *vc_qpi;
        struct vf_cmd_info args;
@@ -606,9 +606,9 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)

        for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
                i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
-                       vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+                       vc_vqci->vsi_id, i, ETH_DATA(dev)->dd.nb_tx_queues, 
txq[i]);
                i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
-                       vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
+                       vc_vqci->vsi_id, i, ETH_DATA(dev)->dd.nb_rx_queues,
                                        vf->max_pkt_len, rxq[i]);
        }
        memset(&args, 0, sizeof(args));
@@ -629,11 +629,11 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
 static int
 i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_rx_queue **rxq =
-               (struct i40e_rx_queue **)dev->data->rx_queues;
+               (struct i40e_rx_queue **)ETH_DATA(dev)->dd.rx_queues;
        struct i40e_tx_queue **txq =
-               (struct i40e_tx_queue **)dev->data->tx_queues;
+               (struct i40e_tx_queue **)ETH_DATA(dev)->dd.tx_queues;
        struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
        struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
        struct vf_cmd_info args;
@@ -650,18 +650,18 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
        vc_qpei = vc_vqcei->qpair;
        for (i = 0; i < nb_qp; i++, vc_qpei++) {
                i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
-                       vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
+                       vc_vqcei->vsi_id, i, ETH_DATA(dev)->dd.nb_tx_queues, 
txq[i]);
                i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
-                       vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
+                       vc_vqcei->vsi_id, i, ETH_DATA(dev)->dd.nb_rx_queues,
                                        vf->max_pkt_len, rxq[i]);
-               if (i < dev->data->nb_rx_queues)
+               if (i < ETH_DATA(dev)->dd.nb_rx_queues)
                        /*
                         * It adds extra info for configuring VSI queues, which
                         * is needed to enable the configurable crc stripping
                         * in VF.
                         */
                        vc_qpei->rxq_ext.crcstrip =
-                               dev->data->dev_conf.rxmode.hw_strip_crc;
+                               ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc;
        }
        memset(&args, 0, sizeof(args));
        args.ops =
@@ -681,7 +681,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
 static int
 i40evf_configure_queues(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);

        if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
                /* To support DPDK PF host */
@@ -694,7 +694,7 @@ i40evf_configure_queues(struct rte_eth_dev *dev)
 static int
 i40evf_config_irq_map(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct vf_cmd_info args;
        uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
                sizeof(struct i40e_virtchnl_vector_map)];
@@ -710,7 +710,7 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
        /* Don't map any tx queue */
        map_info->vecmap[0].txq_map = 0;
        map_info->vecmap[0].rxq_map = 0;
-       for (i = 0; i < dev->data->nb_rx_queues; i++)
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++)
                map_info->vecmap[0].rxq_map |= 1 << i;

        args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
@@ -729,7 +729,7 @@ static int
 i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
                                bool on)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_virtchnl_queue_select queue_select;
        int err;
        struct vf_cmd_info args;
@@ -765,8 +765,8 @@ i40evf_start_queues(struct rte_eth_dev *dev)
        struct i40e_rx_queue *rxq;
        struct i40e_tx_queue *txq;

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev_data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = dev_data->dd.rx_queues[i];
                if (rxq->rx_deferred_start)
                        continue;
                if (i40evf_dev_rx_queue_start(dev, i) != 0) {
@@ -775,8 +775,8 @@ i40evf_start_queues(struct rte_eth_dev *dev)
                }
        }

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev_data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = dev_data->dd.tx_queues[i];
                if (txq->tx_deferred_start)
                        continue;
                if (i40evf_dev_tx_queue_start(dev, i) != 0) {
@@ -794,7 +794,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
        int i;

        /* Stop TX queues first */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
                if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
                        PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
                        return -1;
@@ -802,7 +802,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
        }

        /* Then stop RX queues */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
                        PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
                        return -1;
@@ -816,7 +816,7 @@ static int
 i40evf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 {
        struct i40e_virtchnl_ether_addr_list *list;
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
                        sizeof(struct i40e_virtchnl_ether_addr)];
        int err;
@@ -853,7 +853,7 @@ static int
 i40evf_del_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
 {
        struct i40e_virtchnl_ether_addr_list *list;
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
                        sizeof(struct i40e_virtchnl_ether_addr)];
        int err;
@@ -889,7 +889,7 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, struct 
ether_addr *addr)
 static int
 i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_virtchnl_queue_select q_stats;
        struct i40e_eth_stats *pstats;
        int err;
@@ -924,7 +924,7 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static int
 i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_virtchnl_vlan_filter_list *vlan_list;
        uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
                                                        sizeof(uint16_t)];
@@ -951,7 +951,7 @@ i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 static int
 i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_virtchnl_vlan_filter_list *vlan_list;
        uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
                                                        sizeof(uint16_t)];
@@ -1009,7 +1009,7 @@ static inline int
 i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                                    struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -1061,10 +1061,10 @@ static int
 i40evf_init_vf(struct rte_eth_dev *dev)
 {
        int i, err, bufsz;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);

-       vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+       vf->adapter = 
I40E_DEV_PRIVATE_TO_ADAPTER(ETH_DATA(dev)->dd.dev_private);
        vf->dev_data = dev->data;
        err = i40evf_set_mac_type(hw);
        if (err) {
@@ -1148,51 +1148,51 @@ err:
 }

 static int
-i40evf_dev_init(struct rte_eth_dev *eth_dev)
+i40evf_dev_init(struct rte_eth_dev *dev)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
-                       eth_dev->data->dev_private);
+                       ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

        /* assign ops func pointer */
-       eth_dev->dev_ops = &i40evf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &i40e_recv_pkts;
-       eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
+       dev->dev_ops = &i40evf_eth_dev_ops;
+       dev->rx_pkt_burst = &i40e_recv_pkts;
+       dev->tx_pkt_burst = &i40e_xmit_pkts;

        /*
         * For secondary processes, we don't initialise any further as primary
         * has already done this work.
         */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = i40e_recv_scattered_pkts;
+               if (ETH_DATA(dev)->scattered_rx)
+                       dev->rx_pkt_burst = i40e_recv_scattered_pkts;
                return 0;
        }

-       hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
-       hw->device_id = eth_dev->pci_dev->id.device_id;
-       hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
-       hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
-       hw->bus.device = eth_dev->pci_dev->addr.devid;
-       hw->bus.func = eth_dev->pci_dev->addr.function;
-       hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+       hw->vendor_id = dev->pci_dev->id.vendor_id;
+       hw->device_id = dev->pci_dev->id.device_id;
+       hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+       hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+       hw->bus.device = dev->pci_dev->addr.devid;
+       hw->bus.func = dev->pci_dev->addr.function;
+       hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;

-       if(i40evf_init_vf(eth_dev) != 0) {
+       if(i40evf_init_vf(dev) != 0) {
                PMD_INIT_LOG(ERR, "Init vf failed");
                return -1;
        }

        /* copy mac addr */
-       eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("i40evf_mac",
                                        ETHER_ADDR_LEN, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
                                "store MAC addresses", ETHER_ADDR_LEN);
                return -ENOMEM;
        }
        ether_addr_copy((struct ether_addr *)hw->mac.addr,
-               (struct ether_addr *)eth_dev->data->mac_addrs);
+               (struct ether_addr *)ETH_DATA(dev)->mac_addrs);

        return 0;
 }
@@ -1206,7 +1206,7 @@ static struct eth_driver rte_i40evf_pmd = {
                .id_table = pci_id_i40evf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
-       .eth_dev_init = i40evf_dev_init,
+       .dev_init = i40evf_dev_init,
        .dev_private_size = sizeof(struct i40e_vf),
 };

@@ -1258,8 +1258,8 @@ static void
 i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        bool enable_vlan_strip = 0;
-       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct rte_eth_conf *dev_conf = &ETH_DATA(dev)->dev_conf;
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);

        /* Linux pf host doesn't support vlan offload yet */
        if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
@@ -1279,9 +1279,9 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 static int
 i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
 {
-       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       struct rte_eth_conf *dev_conf = &ETH_DATA(dev)->dev_conf;
        struct i40e_vsi_vlan_pvid_info info;
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);

        memset(&info, 0, sizeof(info));
        info.on = on;
@@ -1307,12 +1307,12 @@ i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
 {
        struct i40e_rx_queue *rxq;
        int err = 0;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];

                err = i40e_alloc_rx_queue_mbufs(rxq);
                if (err) {
@@ -1343,8 +1343,8 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        struct i40e_rx_queue *rxq;
        int err;

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];

                err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);

@@ -1368,7 +1368,7 @@ i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)

        PMD_INIT_FUNC_TRACE();

-       if (tx_queue_id < dev->data->nb_tx_queues) {
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {

                /* Ready to switch the queue on */
                err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
@@ -1387,8 +1387,8 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
        struct i40e_tx_queue *txq;
        int err;

-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               txq = dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
+               txq = ETH_DATA(dev)->dd.tx_queues[tx_queue_id];

                err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);

@@ -1421,14 +1421,14 @@ i40evf_vlan_filter_set(struct rte_eth_dev *dev, 
uint16_t vlan_id, int on)
 static int
 i40evf_rx_init(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        uint16_t i;
        struct i40e_rx_queue **rxq =
-               (struct i40e_rx_queue **)dev->data->rx_queues;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               (struct i40e_rx_queue **)ETH_DATA(dev)->dd.rx_queues;
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        i40evf_config_rss(vf);
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                rxq[i]->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(i);
                I40E_PCI_REG_WRITE(rxq[i]->qrx_tail, rxq[i]->nb_rx_desc - 1);
        }
@@ -1444,10 +1444,10 @@ i40evf_tx_init(struct rte_eth_dev *dev)
 {
        uint16_t i;
        struct i40e_tx_queue **txq =
-               (struct i40e_tx_queue **)dev->data->tx_queues;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               (struct i40e_tx_queue **)ETH_DATA(dev)->dd.tx_queues;
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       for (i = 0; i < dev->data->nb_tx_queues; i++)
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++)
                txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
 }

@@ -1469,14 +1469,14 @@ i40evf_disable_queues_intr(struct i40e_hw *hw)
 static int
 i40evf_dev_start(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ether_addr mac_addr;

        PMD_INIT_FUNC_TRACE();

-       vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+       vf->max_pkt_len = ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len;
+       if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1) {
                if (vf->max_pkt_len <= ETHER_MAX_LEN ||
                        vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -1498,8 +1498,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
                }
        }

-       vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
-                                       dev->data->nb_tx_queues);
+       vf->num_queue_pairs = RTE_MAX(ETH_DATA(dev)->dd.nb_rx_queues,
+                                       ETH_DATA(dev)->dd.nb_tx_queues);

        if (i40evf_rx_init(dev) != 0){
                PMD_DRV_LOG(ERR, "failed to do RX init");
@@ -1542,7 +1542,7 @@ err_queue:
 static void
 i40evf_dev_stop(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -1555,7 +1555,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
                       __rte_unused int wait_to_complete)
 {
        struct rte_eth_link new_link;
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        /*
         * DPDK pf host provide interfacet to acquire link status
         * while Linux driver does not
@@ -1576,7 +1576,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
 static void
 i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int ret;

        /* If enabled, just return */
@@ -1591,7 +1591,7 @@ i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
 static void
 i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int ret;

        /* If disabled, just return */
@@ -1606,7 +1606,7 @@ i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
 static void
 i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int ret;

        /* If enabled, just return */
@@ -1621,7 +1621,7 @@ i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
 static void
 i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
        int ret;

        /* If enabled, just return */
@@ -1636,7 +1636,7 @@ i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
 static void
 i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+       struct i40e_vf *vf = 
I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);

        memset(dev_info, 0, sizeof(*dev_info));
        dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
@@ -1679,7 +1679,7 @@ i40evf_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static void
 i40evf_dev_close(struct rte_eth_dev *dev)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        i40evf_dev_stop(dev);
        i40evf_reset_vf(hw);
@@ -1691,7 +1691,7 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
                           struct rte_eth_rss_reta_entry64 *reta_conf,
                           uint16_t reta_size)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t lut, l;
        uint16_t i, j;
        uint16_t idx, shift;
@@ -1734,7 +1734,7 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
                          struct rte_eth_rss_reta_entry64 *reta_conf,
                          uint16_t reta_size)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t lut;
        uint16_t i, j;
        uint16_t idx, shift;
@@ -1854,7 +1854,7 @@ static int
 i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
                           struct rte_eth_rss_conf *rss_conf)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
        uint64_t hena;

@@ -1877,7 +1877,7 @@ static int
 i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
                             struct rte_eth_rss_conf *rss_conf)
 {
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t *hash_key = (uint32_t *)(rss_conf->rss_key);
        uint64_t hena;
        uint16_t i;
diff --git a/lib/librte_pmd_i40e/i40e_fdir.c b/lib/librte_pmd_i40e/i40e_fdir.c
index 7b68c78..0a25294 100644
--- a/lib/librte_pmd_i40e/i40e_fdir.c
+++ b/lib/librte_pmd_i40e/i40e_fdir.c
@@ -194,7 +194,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
        int err = I40E_SUCCESS;
        char z_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz = NULL;
-       struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+       struct rte_eth_dev *dev = pf->adapter->eth_dev;

        if ((pf->flags & I40E_FLAG_FDIR) == 0) {
                PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
@@ -262,9 +262,9 @@ i40e_fdir_setup(struct i40e_pf *pf)

        /* reserve memory for the fdir programming packet */
        snprintf(z_name, sizeof(z_name), "%s_%s_%d",
-                       eth_dev->driver->pci_drv.name,
+                       dev->driver->pci_drv.name,
                        I40E_FDIR_MZ_NAME,
-                       eth_dev->data->port_id);
+                       ETH_DATA(dev)->dd.port_id);
        mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
        if (!mz) {
                PMD_DRV_LOG(ERR, "Cannot init memzone for "
@@ -637,8 +637,8 @@ i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
 int
 i40e_fdir_configure(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_fdir_flex_conf *conf;
        enum i40e_filter_pctype pctype;
        uint32_t val;
@@ -665,7 +665,7 @@ i40e_fdir_configure(struct rte_eth_dev *dev)

        i40e_init_flx_pld(pf); /* set flex config to default value */

-       conf = &dev->data->dev_conf.fdir_conf.flex_conf;
+       conf = &ETH_DATA(dev)->dev_conf.fdir_conf.flex_conf;
        ret = i40e_check_fdir_flex_conf(conf);
        if (ret < 0) {
                PMD_DRV_LOG(ERR, " invalid configuration arguments.");
@@ -971,12 +971,12 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
                            const struct rte_eth_fdir_filter *filter,
                            bool add)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
        enum i40e_filter_pctype pctype;
        int ret = 0;

-       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
                PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
                        " check the mode in fdir_conf.");
                return -ENOTSUP;
@@ -986,7 +986,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
                PMD_DRV_LOG(ERR, "invalid flow_type input.");
                return -EINVAL;
        }
-       if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+       if (filter->action.rx_queue >= pf->dev_data->dd.nb_rx_queues) {
                PMD_DRV_LOG(ERR, "Invalid queue ID");
                return -EINVAL;
        }
@@ -1137,7 +1137,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
 static int
 i40e_fdir_flush(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        uint32_t reg;
        uint16_t guarant_cnt, best_cnt;
@@ -1254,12 +1254,12 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
 static void
 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        uint16_t num_flex_set = 0;
        uint16_t num_flex_mask = 0;

-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
                fdir->mode = RTE_FDIR_MODE_PERFECT;
        else
                fdir->mode = RTE_FDIR_MODE_NONE;
@@ -1296,7 +1296,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct 
rte_eth_fdir_info *fdir)
 static void
 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        uint32_t fdstat;

@@ -1320,7 +1320,7 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
                       enum rte_filter_op filter_op,
                       void *arg)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        int ret = 0;

        if ((pf->flags & I40E_FLAG_FDIR) == 0)
diff --git a/lib/librte_pmd_i40e/i40e_pf.c b/lib/librte_pmd_i40e/i40e_pf.c
index cbb2dcc..c67f75c 100644
--- a/lib/librte_pmd_i40e/i40e_pf.c
+++ b/lib/librte_pmd_i40e/i40e_pf.c
@@ -847,7 +847,7 @@ i40e_pf_host_process_cmd_get_link_status(struct i40e_pf_vf 
*vf)
        /* Update link status first to acquire latest link change */
        i40e_dev_link_update(dev, 1);
        i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_LINK_STAT,
-               I40E_SUCCESS, (uint8_t *)&dev->data->dev_link,
+               I40E_SUCCESS, (uint8_t *)&ETH_DATA(dev)->dev_link,
                                sizeof(struct rte_eth_link));
 }

@@ -908,8 +908,8 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
                           uint8_t *msg,
                           uint16_t msglen)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct i40e_pf_vf *vf;
        /* AdminQ will pass absolute VF id, transfer to internal vf id */
        uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
@@ -1013,7 +1013,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
 int
 i40e_pf_host_init(struct rte_eth_dev *dev)
 {
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_hw *hw = I40E_PF_TO_HW(pf);
        int ret, i;
        uint32_t val;
diff --git a/lib/librte_pmd_i40e/i40e_rxtx.c b/lib/librte_pmd_i40e/i40e_rxtx.c
index 9c7be6f..1c7fea8 100644
--- a/lib/librte_pmd_i40e/i40e_rxtx.c
+++ b/lib/librte_pmd_i40e/i40e_rxtx.c
@@ -1656,12 +1656,12 @@ i40e_dev_rx_queue_start(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
 {
        struct i40e_rx_queue *rxq;
        int err = -1;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];

                err = i40e_alloc_rx_queue_mbufs(rxq);
                if (err) {
@@ -1693,10 +1693,10 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
 {
        struct i40e_rx_queue *rxq;
        int err;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];

                /*
                * rx_queue_id is queue id aplication refers to, while
@@ -1721,12 +1721,12 @@ i40e_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
 {
        int err = -1;
        struct i40e_tx_queue *txq;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               txq = dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
+               txq = ETH_DATA(dev)->dd.tx_queues[tx_queue_id];

                /*
                * tx_queue_id is queue id aplication refers to, while
@@ -1746,10 +1746,10 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
 {
        struct i40e_tx_queue *txq;
        int err;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               txq = dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
+               txq = ETH_DATA(dev)->dd.tx_queues[tx_queue_id];

                /*
                * tx_queue_id is queue id aplication refers to, while
@@ -1779,8 +1779,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        struct rte_mempool *mp)
 {
        struct i40e_vsi *vsi;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_rx_queue *rxq;
        const struct rte_memzone *rz;
        uint32_t ring_size;
@@ -1789,7 +1789,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,

        if (hw->mac.type == I40E_MAC_VF) {
                struct i40e_vf *vf =
-                       I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+                       I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
                vsi = &vf->vsi;
        } else
                vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
@@ -1808,9 +1808,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }

        /* Free memory if needed */
-       if (dev->data->rx_queues[queue_idx]) {
-               i40e_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.rx_queues[queue_idx]) {
+               
i40e_dev_rx_queue_release(ETH_DATA(dev)->dd.rx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.rx_queues[queue_idx] = NULL;
        }

        /* Allocate the rx queue data structure */
@@ -1833,8 +1833,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                rxq->reg_idx = vsi->base_queue +
                        i40e_get_queue_offset_by_qindex(pf, queue_idx);

-       rxq->port_id = dev->data->port_id;
-       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+       rxq->port_id = ETH_DATA(dev)->dd.port_id;
+       rxq->crc_len = (uint8_t) ((ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc) 
?
                                                        0 : ETHER_CRC_LEN);
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->vsi = vsi;
@@ -1885,11 +1885,11 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,

        i40e_reset_rx_queue(rxq);
        rxq->q_set = TRUE;
-       dev->data->rx_queues[queue_idx] = rxq;
+       ETH_DATA(dev)->dd.rx_queues[queue_idx] = rxq;

        use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);

-       if (!use_def_burst_func && !dev->data->scattered_rx) {
+       if (!use_def_burst_func && !ETH_DATA(dev)->scattered_rx) {
 #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
                             "satisfied. Rx Burst Bulk Alloc function will be "
@@ -1931,12 +1931,12 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        struct i40e_rx_queue *rxq;
        uint16_t desc = 0;

-       if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
+       if (unlikely(rx_queue_id >= ETH_DATA(dev)->dd.nb_rx_queues)) {
                PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
                return 0;
        }

-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);
        while ((desc < rxq->nb_rx_desc) &&
                ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
@@ -1991,8 +1991,8 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        const struct rte_eth_txconf *tx_conf)
 {
        struct i40e_vsi *vsi;
-       struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+       struct i40e_hw *hw = 
I40E_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       struct i40e_pf *pf = 
I40E_DEV_PRIVATE_TO_PF(ETH_DATA(dev)->dd.dev_private);
        struct i40e_tx_queue *txq;
        const struct rte_memzone *tz;
        uint32_t ring_size;
@@ -2000,7 +2000,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,

        if (hw->mac.type == I40E_MAC_VF) {
                struct i40e_vf *vf =
-                       I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+                       I40EVF_DEV_PRIVATE_TO_VF(ETH_DATA(dev)->dd.dev_private);
                vsi = &vf->vsi;
        } else
                vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
@@ -2048,7 +2048,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             "number of TX descriptors minus 2. "
                             "(tx_rs_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return I40E_ERR_PARAM;
        }
@@ -2058,7 +2058,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             "number of TX descriptors minus 3. "
                             "(tx_free_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return I40E_ERR_PARAM;
        }
@@ -2068,7 +2068,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             " tx_rs_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
                             (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return I40E_ERR_PARAM;
        }
@@ -2077,7 +2077,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             "number of TX descriptors. (tx_rs_thresh=%u"
                             " port=%d queue=%d)",
                             (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return I40E_ERR_PARAM;
        }
@@ -2086,15 +2086,15 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             "tx_rs_thresh is greater than 1. "
                             "(tx_rs_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return I40E_ERR_PARAM;
        }

        /* Free memory if needed. */
-       if (dev->data->tx_queues[queue_idx]) {
-               i40e_dev_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.tx_queues[queue_idx]) {
+               
i40e_dev_tx_queue_release(ETH_DATA(dev)->dd.tx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.tx_queues[queue_idx] = NULL;
        }

        /* Allocate the TX queue data structure. */
@@ -2135,7 +2135,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
                txq->reg_idx = vsi->base_queue +
                        i40e_get_queue_offset_by_qindex(pf, queue_idx);

-       txq->port_id = dev->data->port_id;
+       txq->port_id = ETH_DATA(dev)->dd.port_id;
        txq->txq_flags = tx_conf->txq_flags;
        txq->vsi = vsi;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
@@ -2161,7 +2161,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,

        i40e_reset_tx_queue(txq);
        txq->q_set = TRUE;
-       dev->data->tx_queues[queue_idx] = txq;
+       ETH_DATA(dev)->dd.tx_queues[queue_idx] = txq;

        /* Use a simple TX queue without offloads or multi segs if possible */
        if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) &&
@@ -2203,7 +2203,7 @@ i40e_ring_dma_zone_reserve(struct rte_eth_dev *dev,

        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
                        dev->driver->pci_drv.name, ring_name,
-                               dev->data->port_id, queue_id);
+                               ETH_DATA(dev)->dd.port_id, queue_id);
        mz = rte_memzone_lookup(z_name);
        if (mz)
                return mz;
@@ -2576,14 +2576,14 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)

        PMD_INIT_FUNC_TRACE();

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
-               i40e_reset_tx_queue(dev->data->tx_queues[i]);
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               i40e_tx_queue_release_mbufs(ETH_DATA(dev)->dd.tx_queues[i]);
+               i40e_reset_tx_queue(ETH_DATA(dev)->dd.tx_queues[i]);
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
-               i40e_reset_rx_queue(dev->data->rx_queues[i]);
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               i40e_rx_queue_release_mbufs(ETH_DATA(dev)->dd.rx_queues[i]);
+               i40e_reset_rx_queue(ETH_DATA(dev)->dd.rx_queues[i]);
        }
 }

diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c 
b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
index 5caee22..1479b44 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
@@ -116,7 +116,7 @@

 #define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / 
sizeof(hw_stats->qprc[0]))

-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *dev);
 static int  ixgbe_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbe_dev_start(struct rte_eth_dev *dev);
 static void ixgbe_dev_stop(struct rte_eth_dev *dev);
@@ -132,7 +132,7 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
 static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
                                struct rte_eth_stats *stats);
 static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
-static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
                                             uint16_t queue_id,
                                             uint8_t stat_idx,
                                             uint8_t is_rx);
@@ -182,7 +182,7 @@ static void ixgbe_remove_rar(struct rte_eth_dev *dev, 
uint32_t index);
 static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config 
*dcb_config);

 /* For Virtual Function support */
-static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbevf_dev_init(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_configure(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
@@ -426,7 +426,7 @@ rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev 
*dev,
                                struct rte_eth_link *link)
 {
        struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
+       struct rte_eth_link *src = &(ETH_DATA(dev)->dev_link);

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
                                        *(uint64_t *)src) == 0)
@@ -451,7 +451,7 @@ static inline int
 rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -501,9 +501,9 @@ static inline void
 ixgbe_enable_intr(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        IXGBE_WRITE_REG(hw, IXGBE_EIMS, intr->mask);
        IXGBE_WRITE_FLUSH(hw);
@@ -546,7 +546,7 @@ ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)


 static int
-ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
+ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
                                  uint16_t queue_id,
                                  uint8_t stat_idx,
                                  uint8_t is_rx)
@@ -555,9 +555,9 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev 
*eth_dev,
 #define NB_QMAP_FIELDS_PER_QSM_REG 4
 #define QMAP_FIELD_RESERVED_BITS_MASK 0x0f

-       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_stat_mapping_registers *stat_mappings =
-               IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(eth_dev->data->dev_private);
+               
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(ETH_DATA(dev)->dd.dev_private);
        uint32_t qsmr_mask = 0;
        uint32_t clearing_mask = QMAP_FIELD_RESERVED_BITS_MASK;
        uint32_t q_map;
@@ -570,7 +570,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev 
*eth_dev,
                return -ENOSYS;

        PMD_INIT_LOG(INFO, "Setting port %d, %s queue_id %d to stat index %d",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    (int)(ETH_DATA(dev)->dd.port_id), is_rx ? "RX" : "TX",
                     queue_id, stat_idx);

        n = (uint8_t)(queue_id / NB_QMAP_FIELDS_PER_QSM_REG);
@@ -596,7 +596,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev 
*eth_dev,
                stat_mappings->rqsmr[n] |= qsmr_mask;

        PMD_INIT_LOG(INFO, "Set port %d, %s queue_id %d to stat index %d",
-                    (int)(eth_dev->data->port_id), is_rx ? "RX" : "TX",
+                    (int)(ETH_DATA(dev)->dd.port_id), is_rx ? "RX" : "TX",
                     queue_id, stat_idx);
        PMD_INIT_LOG(INFO, "%s[%d] = 0x%08x", is_rx ? "RQSMR" : "TQSM", n,
                     is_rx ? stat_mappings->rqsmr[n] : stat_mappings->tqsm[n]);
@@ -619,8 +619,8 @@ static void
 ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
 {
        struct ixgbe_stat_mapping_registers *stat_mappings =
-               IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(ETH_DATA(dev)->dd.dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int i;

        /* write whatever was in stat mapping table to the NIC */
@@ -715,28 +715,28 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
  * It returns 0 on success.
  */
 static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vfta * shadow_vfta =
-               IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hwstrip *hwstrip =
-               IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+               
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_dcb_config *dcb_config =
-               IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_DCB_CFG(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl_ext;
        uint16_t csum;
        int diag, i;

        PMD_INIT_FUNC_TRACE();

-       eth_dev->dev_ops = &ixgbe_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+       dev->dev_ops = &ixgbe_eth_dev_ops;
+       dev->rx_pkt_burst = &ixgbe_recv_pkts;
+       dev->tx_pkt_burst = &ixgbe_xmit_pkts;

        /*
         * For secondary processes, we don't initialise any further as primary
@@ -747,20 +747,20 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                struct ixgbe_tx_queue *txq;
                /* TX queue function in primary, set by last queue initialized
                 * Tx queue may not initialized by primary process */
-               if (eth_dev->data->tx_queues) {
-                       txq = 
eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
-                       ixgbe_set_tx_function(eth_dev, txq);
+               if (ETH_DATA(dev)->dd.tx_queues) {
+                       txq = 
ETH_DATA(dev)->dd.tx_queues[ETH_DATA(dev)->dd.nb_tx_queues-1];
+                       ixgbe_set_tx_function(dev, txq);
                } else {
                        /* Use default TX function if we get here */
                        PMD_INIT_LOG(INFO, "No TX queues configured yet. "
                                           "Using default TX function.");
                }

-               ixgbe_set_rx_function(eth_dev);
+               ixgbe_set_rx_function(dev);

                return 0;
        }
-       pci_dev = eth_dev->pci_dev;
+       pci_dev = dev->pci_dev;

        /* Vendor and Device ID need to be set before init of shared code */
        hw->device_id = pci_dev->id.device_id;
@@ -848,9 +848,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        ixgbe_reset_qstat_mappings(hw);

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
                        hw->mac.num_rar_entries, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                        "Failed to allocate %u bytes needed to store "
                        "MAC addresses",
@@ -859,12 +859,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        }
        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
-                       &eth_dev->data->mac_addrs[0]);
+                       &ETH_DATA(dev)->mac_addrs[0]);

        /* Allocate memory for storing hash filter MAC addresses */
-       eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+       ETH_DATA(dev)->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
                        IXGBE_VMDQ_NUM_UC_MAC, 0);
-       if (eth_dev->data->hash_mac_addrs == NULL) {
+       if (ETH_DATA(dev)->hash_mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                        "Failed to allocate %d bytes needed to store MAC 
addresses",
                        ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
@@ -878,7 +878,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
        memset(hwstrip, 0, sizeof(*hwstrip));

        /* initialize PF if max_vfs not zero */
-       ixgbe_pf_host_init(eth_dev);
+       ixgbe_pf_host_init(dev);

        ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
        /* let hardware know driver is loaded */
@@ -897,17 +897,17 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
                             (int) hw->mac.type, (int) hw->phy.type);

        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
-                       eth_dev->data->port_id, pci_dev->id.vendor_id,
+                       ETH_DATA(dev)->dd.port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);

        rte_intr_callback_register(&(pci_dev->intr_handle),
-               ixgbe_dev_interrupt_handler, (void *)eth_dev);
+               ixgbe_dev_interrupt_handler, (void *)dev);

        /* enable uio intr after callback register */
        rte_intr_enable(&(pci_dev->intr_handle));

        /* enable support intr */
-       ixgbe_enable_intr(eth_dev);
+       ixgbe_enable_intr(dev);

        /* initialize 5tuple filter list */
        TAILQ_INIT(&filter_info->fivetuple_list);
@@ -963,35 +963,35 @@ generate_random_mac_addr(struct ether_addr *mac_addr)
  * Virtual Function device init
  */
 static int
-eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbevf_dev_init(struct rte_eth_dev *dev)
 {
        int diag;
        uint32_t tc, tcs;
        struct rte_pci_device *pci_dev;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vfta * shadow_vfta =
-               IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hwstrip *hwstrip =
-               IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
+               
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(ETH_DATA(dev)->dd.dev_private);
        struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;

        PMD_INIT_FUNC_TRACE();

-       eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
-       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+       dev->dev_ops = &ixgbevf_eth_dev_ops;
+       dev->rx_pkt_burst = &ixgbe_recv_pkts;
+       dev->tx_pkt_burst = &ixgbe_xmit_pkts;

        /* for secondary processes, we don't initialise any further as primary
         * has already done this work. Only check we don't need a different
         * RX function */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY){
-               if (eth_dev->data->scattered_rx)
-                       eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               if (ETH_DATA(dev)->scattered_rx)
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                return 0;
        }

-       pci_dev = eth_dev->pci_dev;
+       pci_dev = dev->pci_dev;

        hw->device_id = pci_dev->id.device_id;
        hw->vendor_id = pci_dev->id.vendor_id;
@@ -1036,9 +1036,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        ixgbevf_get_queues(hw, &tcs, &tc);

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+       ETH_DATA(dev)->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
                        hw->mac.num_rar_entries, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                        "Failed to allocate %u bytes needed to store "
                        "MAC addresses",
@@ -1051,8 +1051,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
                generate_random_mac_addr(perm_addr);
                diag = ixgbe_set_rar_vf(hw, 1, perm_addr->addr_bytes, 0, 1);
                if (diag) {
-                       rte_free(eth_dev->data->mac_addrs);
-                       eth_dev->data->mac_addrs = NULL;
+                       rte_free(ETH_DATA(dev)->mac_addrs);
+                       ETH_DATA(dev)->mac_addrs = NULL;
                        return diag;
                }
                PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
@@ -1067,7 +1067,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        }

        /* Copy the permanent MAC address */
-       ether_addr_copy(perm_addr, &eth_dev->data->mac_addrs[0]);
+       ether_addr_copy(perm_addr, &ETH_DATA(dev)->mac_addrs[0]);

        /* reset the hardware with the new settings */
        diag = hw->mac.ops.start_hw(hw);
@@ -1081,7 +1081,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
        }

        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
-                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    ETH_DATA(dev)->dd.port_id, pci_dev->id.vendor_id,
                     pci_dev->id.device_id, "ixgbe_mac_82599_vf");

        return 0;
@@ -1093,7 +1093,7 @@ static struct eth_driver rte_ixgbe_pmd = {
                .id_table = pci_id_ixgbe_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
        },
-       .eth_dev_init = eth_ixgbe_dev_init,
+       .dev_init = eth_ixgbe_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
 };

@@ -1106,7 +1106,7 @@ static struct eth_driver rte_ixgbevf_pmd = {
                .id_table = pci_id_ixgbevf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
-       .eth_dev_init = eth_ixgbevf_dev_init,
+       .dev_init = eth_ixgbevf_dev_init,
        .dev_private_size = sizeof(struct ixgbe_adapter),
 };

@@ -1142,9 +1142,9 @@ static int
 ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vfta * shadow_vfta =
-               IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t vfta;
        uint32_t vid_idx;
        uint32_t vid_bit;
@@ -1177,7 +1177,7 @@ static void
 ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Only the high 16-bits is valid */
        IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
@@ -1187,7 +1187,7 @@ void
 ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t vlnctrl;

        PMD_INIT_FUNC_TRACE();
@@ -1203,9 +1203,9 @@ void
 ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vfta * shadow_vfta =
-               IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t vlnctrl;
        uint16_t i;

@@ -1227,7 +1227,7 @@ static void
 ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool 
on)
 {
        struct ixgbe_hwstrip *hwstrip =
-               IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+               
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(ETH_DATA(dev)->dd.dev_private);

        if(queue >= IXGBE_MAX_RX_QUEUE_NUM)
                return;
@@ -1242,7 +1242,7 @@ static void
 ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;

        PMD_INIT_FUNC_TRACE();
@@ -1266,7 +1266,7 @@ static void
 ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;

        PMD_INIT_FUNC_TRACE();
@@ -1290,7 +1290,7 @@ void
 ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;
        uint16_t i;

@@ -1303,7 +1303,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
        }
        else {
                /* Other 10G NIC, the VLAN strip can be setup per queue in 
RXDCTL */
-               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
                        ctrl &= ~IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
@@ -1318,7 +1318,7 @@ void
 ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;
        uint16_t i;

@@ -1331,7 +1331,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
        }
        else {
                /* Other 10G NIC, the VLAN strip can be setup per queue in 
RXDCTL */
-               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
                        ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
                        ctrl |= IXGBE_RXDCTL_VME;
                        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
@@ -1346,7 +1346,7 @@ static void
 ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;

        PMD_INIT_FUNC_TRACE();
@@ -1367,7 +1367,7 @@ static void
 ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;

        PMD_INIT_FUNC_TRACE();
@@ -1392,21 +1392,21 @@ static void
 ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        if(mask & ETH_VLAN_STRIP_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                        ixgbe_vlan_hw_strip_enable_all(dev);
                else
                        ixgbe_vlan_hw_strip_disable_all(dev);
        }

        if(mask & ETH_VLAN_FILTER_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter)
                        ixgbe_vlan_hw_filter_enable(dev);
                else
                        ixgbe_vlan_hw_filter_disable(dev);
        }

        if(mask & ETH_VLAN_EXTEND_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_extend)
                        ixgbe_vlan_hw_extend_enable(dev);
                else
                        ixgbe_vlan_hw_extend_disable(dev);
@@ -1417,7 +1417,7 @@ static void
 ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
        uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
        vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
@@ -1428,9 +1428,9 @@ static int
 ixgbe_dev_configure(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -1455,9 +1455,9 @@ static int
 ixgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
        int err, link_up = 0, negotiate = 0;
        uint32_t speed = 0;
        int mask = 0;
@@ -1467,11 +1467,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();

        /* IXGBE devices don't support half duplex */
-       if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
-                       (dev->data->dev_conf.link_duplex != 
ETH_LINK_FULL_DUPLEX)) {
+       if ((ETH_DATA(dev)->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+                       (ETH_DATA(dev)->dev_conf.link_duplex != 
ETH_LINK_FULL_DUPLEX)) {
                PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu",
-                            dev->data->dev_conf.link_duplex,
-                            dev->data->port_id);
+                            ETH_DATA(dev)->dev_conf.link_duplex,
+                            ETH_DATA(dev)->dd.port_id);
                return -EINVAL;
        }

@@ -1508,7 +1508,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)

        /* Skip link setup if loopback mode is enabled for 82599. */
        if (hw->mac.type == ixgbe_mac_82599EB &&
-                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+                       ETH_DATA(dev)->dev_conf.lpbk_mode == 
IXGBE_LPBK_82599_TX_RX)
                goto skip_link_setup;

        if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
@@ -1523,13 +1523,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
        err = ixgbe_check_link(hw, &speed, &link_up, 0);
        if (err)
                goto error;
-       dev->data->dev_link.link_status = link_up;
+       ETH_DATA(dev)->dev_link.link_status = link_up;

        err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
        if (err)
                goto error;

-       switch(dev->data->dev_conf.link_speed) {
+       switch(ETH_DATA(dev)->dev_conf.link_speed) {
        case ETH_LINK_SPEED_AUTONEG:
                speed = (hw->mac.type != ixgbe_mac_82598EB) ?
                                IXGBE_LINK_SPEED_82599_AUTONEG :
@@ -1550,8 +1550,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                break;
        default:
                PMD_INIT_LOG(ERR, "Invalid link_speed (%hu) for port %hhu",
-                            dev->data->dev_conf.link_speed,
-                            dev->data->port_id);
+                            ETH_DATA(dev)->dev_conf.link_speed,
+                            ETH_DATA(dev)->dd.port_id);
                goto error;
        }

@@ -1562,7 +1562,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
 skip_link_setup:

        /* check if lsc interrupt is enabled */
-       if (dev->data->dev_conf.intr_conf.lsc != 0)
+       if (ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0)
                ixgbe_dev_lsc_interrupt_setup(dev);

        /* resume enabled intr since hw reset */
@@ -1572,7 +1572,7 @@ skip_link_setup:
                ETH_VLAN_EXTEND_MASK;
        ixgbe_vlan_offload_set(dev, mask);

-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
                /* Enable vlan filtering for VMDq */
                ixgbe_vmdq_vlan_hw_filter_enable(dev);
        }
@@ -1580,7 +1580,7 @@ skip_link_setup:
        /* Configure DCB hw */
        ixgbe_configure_dcb(dev);

-       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
                err = ixgbe_fdir_configure(dev);
                if (err)
                        goto error;
@@ -1614,11 +1614,11 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
        int vf;

@@ -1644,7 +1644,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
        ixgbe_dev_clear_queues(dev);

        /* Clear stored conf */
-       dev->data->scattered_rx = 0;
+       ETH_DATA(dev)->scattered_rx = 0;

        /* Clear recorded link status */
        memset(&link, 0, sizeof(link));
@@ -1670,7 +1670,7 @@ static int
 ixgbe_dev_set_link_up(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        if (hw->mac.type == ixgbe_mac_82599EB) {
 #ifdef RTE_NIC_BYPASS
                if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
@@ -1697,7 +1697,7 @@ static int
 ixgbe_dev_set_link_down(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        if (hw->mac.type == ixgbe_mac_82599EB) {
 #ifdef RTE_NIC_BYPASS
                if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
@@ -1724,7 +1724,7 @@ static void
 ixgbe_dev_close(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -1746,9 +1746,9 @@ static void
 ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+                       IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw_stats *hw_stats =
-                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);
        uint32_t bprc, lxon, lxoff, total;
        uint64_t total_missed_rx, total_qbrc, total_qprc;
        unsigned i;
@@ -1927,7 +1927,7 @@ static void
 ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw_stats *stats =
-                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* HW registers are cleared on read */
        ixgbe_dev_stats_get(dev, NULL);
@@ -1939,9 +1939,9 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
 static void
 ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
-                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                         
IXGBE_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* Good Rx packet, include VF loopback */
        UPDATE_VF_STAT(IXGBE_VFGPRC,
@@ -1977,7 +1977,7 @@ static void
 ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
 {
        struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
-                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_STATS(ETH_DATA(dev)->dd.dev_private);

        /* Sync HW register to the last stats */
        ixgbevf_dev_stats_get(dev, NULL);
@@ -1994,7 +1994,7 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
 static void
 ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
@@ -2050,7 +2050,7 @@ static void
 ixgbevf_dev_info_get(struct rte_eth_dev *dev,
                     struct rte_eth_dev_info *dev_info)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
@@ -2100,7 +2100,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
 static int
 ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct rte_eth_link link, old;
        ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
        int link_up;
@@ -2113,7 +2113,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int 
wait_to_complete)
        rte_ixgbe_dev_atomic_read_link_status(dev, &old);

        /* check if it needs to wait to complete, if lsc interrupt is enabled */
-       if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+       if (wait_to_complete == 0 || ETH_DATA(dev)->dev_conf.intr_conf.lsc != 0)
                diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
        else
                diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
@@ -2171,7 +2171,7 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int 
wait_to_complete)
 static void
 ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t fctrl;

        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -2182,12 +2182,12 @@ ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
 static void
 ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t fctrl;

        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
        fctrl &= (~IXGBE_FCTRL_UPE);
-       if (dev->data->all_multicast == 1)
+       if (ETH_DATA(dev)->all_multicast == 1)
                fctrl |= IXGBE_FCTRL_MPE;
        else
                fctrl &= (~IXGBE_FCTRL_MPE);
@@ -2197,7 +2197,7 @@ ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
 static void
 ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t fctrl;

        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -2208,10 +2208,10 @@ ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
 static void
 ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t fctrl;

-       if (dev->data->promiscuous == 1)
+       if (ETH_DATA(dev)->promiscuous == 1)
                return; /* must remain in all_multicast mode */

        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -2234,7 +2234,7 @@ static int
 ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        ixgbe_dev_link_status_print(dev);
        intr->mask |= IXGBE_EICR_LSC;
@@ -2256,9 +2256,9 @@ static int
 ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
 {
        uint32_t eicr;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        /* clear all cause mask */
        ixgbe_disable_intr(hw);
@@ -2298,13 +2298,13 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
        rte_ixgbe_dev_atomic_read_link_status(dev, &link);
        if (link.link_status) {
                PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
-                                       (int)(dev->data->port_id),
+                                       (int)(ETH_DATA(dev)->dd.port_id),
                                        (unsigned)link.link_speed,
                        link.link_duplex == ETH_LINK_FULL_DUPLEX ?
                                        "full-duplex" : "half-duplex");
        } else {
                PMD_INIT_LOG(INFO, " Port %d: Link Down",
-                               (int)(dev->data->port_id));
+                               (int)(ETH_DATA(dev)->dd.port_id));
        }
        PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
                                dev->pci_dev->addr.domain,
@@ -2327,7 +2327,7 @@ static int
 ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        int64_t timeout;
        struct rte_eth_link link;
        int intr_enable_delay = false;
@@ -2393,9 +2393,9 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
 {
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t eicr;

        eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
@@ -2406,7 +2406,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
                ixgbe_dev_link_update(dev, 0);
                intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
                ixgbe_dev_link_status_print(dev);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+               _rte_eth_dev_callback_process(dev, RTE_DEV_EVENT_INTR_LSC);
        }

        PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
@@ -2440,7 +2440,7 @@ ixgbe_dev_led_on(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }

@@ -2449,7 +2449,7 @@ ixgbe_dev_led_off(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
 }

@@ -2462,7 +2462,7 @@ ixgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)
        int rx_pause;
        int tx_pause;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        fc_conf->pause_time = hw->fc.pause_time;
        fc_conf->high_water = hw->fc.high_water[0];
@@ -2519,7 +2519,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct 
rte_eth_fc_conf *fc_conf)

        PMD_INIT_FUNC_TRACE();

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        if (fc_conf->autoneg != !hw->fc.disable_fc_autoneg)
                return -ENOTSUP;
        rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
@@ -2711,7 +2711,7 @@ out:
 static int
 ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int32_t ret_val = IXGBE_NOT_IMPLEMENTED;

        if(hw->mac.type != ixgbe_mac_82598EB) {
@@ -2729,9 +2729,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, 
struct rte_eth_pfc_conf *p
        uint8_t tc_num;
        uint8_t  map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
        struct ixgbe_hw *hw =
-                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+                IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_dcb_config *dcb_config =
-                IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+                IXGBE_DEV_PRIVATE_TO_DCB_CFG(ETH_DATA(dev)->dd.dev_private);

        enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
                ixgbe_fc_none,
@@ -2782,7 +2782,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
        uint8_t i, j, mask;
        uint32_t reta, r;
        uint16_t idx, shift;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();
        if (reta_size != ETH_RSS_RETA_SIZE_128) {
@@ -2825,7 +2825,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
        uint8_t i, j, mask;
        uint32_t reta;
        uint16_t idx, shift;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();
        if (reta_size != ETH_RSS_RETA_SIZE_128) {
@@ -2859,7 +2859,7 @@ static void
 ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                                uint32_t index, uint32_t pool)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t enable_addr = 1;

        ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
@@ -2868,7 +2868,7 @@ ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr 
*mac_addr,
 static void
 ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        ixgbe_clear_rar(hw, index);
 }
@@ -2890,30 +2890,30 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)

        /* refuse mtu that requires the support of scattered packets when this
         * feature has not been enabled before. */
-       if (!dev->data->scattered_rx &&
+       if (!ETH_DATA(dev)->scattered_rx &&
            (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
-            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+            ETH_DATA(dev)->dd.min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
                return -EINVAL;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);

        /* switch to jumbo mode if needed */
        if (frame_size > ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame = 1;
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
        } else {
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame = 0;
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
        }
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);

        /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len = frame_size;

        maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
        maxfrs &= 0x0000FFFF;
-       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       maxfrs |= (ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len << 16);
        IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);

        return 0;
@@ -2936,10 +2936,10 @@ ixgbevf_intr_disable(struct ixgbe_hw *hw)
 static int
 ixgbevf_dev_configure(struct rte_eth_dev *dev)
 {
-       struct rte_eth_conf* conf = &dev->data->dev_conf;
+       struct rte_eth_conf* conf = &ETH_DATA(dev)->dev_conf;

        PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
-                    dev->data->port_id);
+                    ETH_DATA(dev)->dd.port_id);

        /*
         * VF has no ability to enable/disable HW CRC
@@ -2964,7 +2964,7 @@ static int
 ixgbevf_dev_start(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int err, mask = 0;

        PMD_INIT_FUNC_TRACE();
@@ -3001,7 +3001,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
 static void
 ixgbevf_dev_stop(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -3015,7 +3015,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
        ixgbevf_set_vfta_all(dev,0);

        /* Clear stored conf */
-       dev->data->scattered_rx = 0;
+       ETH_DATA(dev)->scattered_rx = 0;

        ixgbe_dev_clear_queues(dev);
 }
@@ -3023,7 +3023,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
 static void
 ixgbevf_dev_close(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

@@ -3037,9 +3037,9 @@ ixgbevf_dev_close(struct rte_eth_dev *dev)

 static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vfta * shadow_vfta =
-               IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        int i = 0, j = 0, vfta = 0, mask = 1;

        for (i = 0; i < IXGBE_VFTA_SIZE; i++){
@@ -3060,9 +3060,9 @@ static int
 ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vfta * shadow_vfta =
-               IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_VFTA(ETH_DATA(dev)->dd.dev_private);
        uint32_t vid_idx = 0;
        uint32_t vid_bit = 0;
        int ret = 0;
@@ -3091,7 +3091,7 @@ static void
 ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t ctrl;

        PMD_INIT_FUNC_TRACE();
@@ -3113,13 +3113,13 @@ static void
 ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint16_t i;
        int on = 0;

        /* VF function only support hw strip feature, others are not support */
        if(mask & ETH_VLAN_STRIP_MASK){
-               on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+               on = !!(ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip);

                for(i=0; i < hw->mac.max_rx_queues; i++)
                        ixgbevf_vlan_strip_queue_set(dev,i,on);
@@ -3186,9 +3186,9 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct 
ether_addr* mac_addr,
        const uint32_t bit1 = 0x1;

        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_uta_info *uta_info =
-               IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_UTA(ETH_DATA(dev)->dd.dev_private);

        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
@@ -3229,9 +3229,9 @@ ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, 
uint8_t on)
 {
        int i;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_uta_info *uta_info =
-               IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_UTA(ETH_DATA(dev)->dd.dev_private);

        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
@@ -3278,7 +3278,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t 
pool,
        int val = 0;

        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));

        if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -3309,7 +3309,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, 
uint8_t on)
        const uint8_t bit1 = 0x1;

        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
@@ -3336,7 +3336,7 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, 
uint8_t on)
        const uint8_t bit1 = 0x1;

        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
@@ -3362,7 +3362,7 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, 
uint16_t vlan,
        int ret = 0;
        uint16_t pool_idx;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
@@ -3397,9 +3397,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
        const uint8_t mirror_rule_mask= 0x0F;

        struct ixgbe_mirror_info *mr_info =
-                       (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+                       
(IXGBE_DEV_PRIVATE_TO_PFDATA(ETH_DATA(dev)->dd.dev_private));
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
@@ -3508,9 +3508,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t 
rule_id)
        const uint8_t rule_mr_offset = 4;

        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_mirror_info *mr_info =
-               (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+               (IXGBE_DEV_PRIVATE_TO_PFDATA(ETH_DATA(dev)->dd.dev_private));

        if (ixgbe_vmdq_mode_check(hw) < 0)
                return (-ENOTSUP);
@@ -3535,10 +3535,10 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, 
uint8_t rule_id)
 static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
        uint16_t queue_idx, uint16_t tx_rate)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t rf_dec, rf_int;
        uint32_t bcnrc_val;
-       uint16_t link_speed = dev->data->dev_link.link_speed;
+       uint16_t link_speed = ETH_DATA(dev)->dev_link.link_speed;

        if (queue_idx >= hw->mac.max_tx_queues)
                return -EINVAL;
@@ -3562,8 +3562,8 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev 
*dev,
         * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
         * set as 0x4.
         */
-       if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
-               (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+       if ((ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1) &&
+               (ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len >=
                                IXGBE_MAX_JUMBO_FRAME_SIZE))
                IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
                        IXGBE_MMW_SIZE_JUMBO_FRAME);
@@ -3582,12 +3582,12 @@ static int ixgbe_set_queue_rate_limit(struct 
rte_eth_dev *dev,
 static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
        uint16_t tx_rate, uint64_t q_msk)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-       uint8_t  nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
+       uint8_t  nb_q_per_pool = ETH_SRIOV(dev).nb_q_per_pool;
        uint32_t queue_stride =
-               IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+               IXGBE_MAX_RX_QUEUE_NUM / ETH_SRIOV(dev).active;
        uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
        uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
        uint16_t total_rate = 0;
@@ -3615,7 +3615,7 @@ static int ixgbe_set_vf_rate_limit(struct rte_eth_dev 
*dev, uint16_t vf,
                }
        }

-       if (total_rate > dev->data->dev_link.link_speed) {
+       if (total_rate > ETH_DATA(dev)->dev_link.link_speed) {
                /*
                 * Reset stored TX rate of the VF if it causes exceed
                 * link speed.
@@ -3639,7 +3639,7 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct 
ether_addr *mac_addr,
                     __attribute__((unused)) uint32_t index,
                     __attribute__((unused)) uint32_t pool)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int diag;

        /*
@@ -3658,7 +3658,7 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct 
ether_addr *mac_addr,
 static void
 ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
        struct ether_addr *mac_addr;
        uint32_t i;
@@ -3676,7 +3676,7 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t 
index)
         * Add again all MAC addresses, with the exception of the deleted one
         * and of the permanent MAC address.
         */
-       for (i = 0, mac_addr = dev->data->mac_addrs;
+       for (i = 0, mac_addr = ETH_DATA(dev)->mac_addrs;
             i < hw->mac.num_rar_entries; i++, mac_addr++) {
                /* Skip the deleted MAC address */
                if (i == index)
@@ -3714,7 +3714,7 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
                        struct rte_eth_syn_filter *filter,
                        bool add)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t synqf;

        if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
@@ -3746,7 +3746,7 @@ static int
 ixgbe_syn_filter_get(struct rte_eth_dev *dev,
                        struct rte_eth_syn_filter *filter)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);

        if (synqf & IXGBE_SYN_FILTER_ENABLE) {
@@ -3762,7 +3762,7 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
                        enum rte_filter_op filter_op,
                        void *arg)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        MAC_TYPE_FILTER_SUP(hw->mac.type);
@@ -3831,9 +3831,9 @@ static int
 ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
                        struct ixgbe_5tuple_filter *filter)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        int i, idx, shift;
        uint32_t ftqf, sdpqf;
        uint32_t l34timir = 0;
@@ -3905,9 +3905,9 @@ static void
 ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
                        struct ixgbe_5tuple_filter *filter)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint16_t index = filter->index;

        filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
@@ -3928,16 +3928,16 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t 
mtu)
        struct ixgbe_hw *hw;
        uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
                return -EINVAL;

        /* refuse mtu that requires the support of scattered packets when this
         * feature has not been enabled before. */
-       if (!dev->data->scattered_rx &&
+       if (!ETH_DATA(dev)->scattered_rx &&
            (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
-            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+            ETH_DATA(dev)->dd.min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
                return -EINVAL;

        /*
@@ -3952,7 +3952,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        ixgbevf_rlpml_set_vf(hw, max_frame);

        /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+       ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len = max_frame;
        return 0;
 }

@@ -4074,7 +4074,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
                        bool add)
 {
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_5tuple_filter_info filter_5tuple;
        struct ixgbe_5tuple_filter *filter;
        int ret;
@@ -4136,7 +4136,7 @@ ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ntuple_filter *ntuple_filter)
 {
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_5tuple_filter_info filter_5tuple;
        struct ixgbe_5tuple_filter *filter;
        int ret;
@@ -4176,7 +4176,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
                                enum rte_filter_op filter_op,
                                void *arg)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
@@ -4259,9 +4259,9 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ethertype_filter *filter,
                        bool add)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t etqf = 0;
        uint32_t etqs = 0;
        int ret;
@@ -4326,9 +4326,9 @@ static int
 ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
                        struct rte_eth_ethertype_filter *filter)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_filter_info *filter_info =
-               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_FILTER_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t etqf, etqs;
        int ret;

@@ -4362,7 +4362,7 @@ ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
                                enum rte_filter_op filter_op,
                                void *arg)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret;

        MAC_TYPE_FILTER_SUP(hw->mac.type);
diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c 
b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
index afc53cb..1a4b2a3 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_fdir.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
@@ -288,9 +288,9 @@ static int
 fdir_set_input_mask_82599(struct rte_eth_dev *dev,
                const struct rte_eth_fdir_masks *input_mask)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(ETH_DATA(dev)->dd.dev_private);
        /*
         * mask VM pool and DIPv6 since there are currently not supported
         * mask FLEX byte, it will be set in flex_conf
@@ -346,7 +346,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
        info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
        info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;

-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
                /*
                 * IPv6 mask is only meaningful in signature mode
                 * Store source and destination IPv6 masks (bit reversed)
@@ -371,9 +371,9 @@ static int
 ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
                const struct rte_eth_fdir_flex_conf *conf)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(ETH_DATA(dev)->dd.dev_private);
        const struct rte_eth_flex_payload_cfg *flex_cfg;
        const struct rte_eth_fdir_flex_mask *flex_mask;
        uint32_t fdirctrl, fdirm;
@@ -434,7 +434,7 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
 int
 ixgbe_fdir_configure(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int err;
        uint32_t fdirctrl, pbsize;
        int i;
@@ -447,7 +447,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
                hw->mac.type != ixgbe_mac_X550EM_x)
                return -ENOSYS;

-       err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+       err = configure_fdir_flags(&ETH_DATA(dev)->dev_conf.fdir_conf, 
&fdirctrl);
        if (err)
                return err;

@@ -469,13 +469,13 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
        for (i = 1; i < 8; i++)
                IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);

-       err = fdir_set_input_mask_82599(dev, 
&dev->data->dev_conf.fdir_conf.mask);
+       err = fdir_set_input_mask_82599(dev, 
&ETH_DATA(dev)->dev_conf.fdir_conf.mask);
        if (err < 0) {
                PMD_INIT_LOG(ERR, " Error on setting FD mask");
                return err;
        }
        err = ixgbe_set_fdir_flex_conf(dev,
-               &dev->data->dev_conf.fdir_conf.flex_conf);
+               &ETH_DATA(dev)->dev_conf.fdir_conf.flex_conf);
        if (err < 0) {
                PMD_INIT_LOG(ERR, " Error on setting FD flexible arguments.");
                return err;
@@ -892,7 +892,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
                              bool del,
                              bool update)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t fdircmd_flags;
        uint32_t fdirhash;
        union ixgbe_atr_input input;
@@ -900,10 +900,10 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        bool is_perfect = FALSE;
        int err;

-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
                return -ENOTSUP;

-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
                is_perfect = TRUE;

        memset(&input, 0, sizeof(input));
@@ -919,12 +919,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
                        return -ENOTSUP;
                }
                fdirhash = atr_compute_perfect_hash_82599(&input,
-                               dev->data->dev_conf.fdir_conf.pballoc);
+                               ETH_DATA(dev)->dev_conf.fdir_conf.pballoc);
                fdirhash |= fdir_filter->soft_id <<
                                IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
        } else
                fdirhash = atr_compute_sig_hash_82599(&input,
-                               dev->data->dev_conf.fdir_conf.pballoc);
+                               ETH_DATA(dev)->dev_conf.fdir_conf.pballoc);

        if (del) {
                err = fdir_erase_filter_82599(hw, fdirhash);
@@ -938,7 +938,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
        fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
        if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
                if (is_perfect) {
-                       queue = dev->data->dev_conf.fdir_conf.drop_queue;
+                       queue = ETH_DATA(dev)->dev_conf.fdir_conf.drop_queue;
                        fdircmd_flags |= IXGBE_FDIRCMD_DROP;
                } else {
                        PMD_DRV_LOG(ERR, "Drop option is not supported in"
@@ -968,9 +968,9 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
 static int
 ixgbe_fdir_flush(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(ETH_DATA(dev)->dd.dev_private);
        int ret;

        ret = ixgbe_reinit_fdir_tables_82599(hw);
@@ -991,9 +991,9 @@ ixgbe_fdir_flush(struct rte_eth_dev *dev)
 static void
 ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info 
*fdir_info)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t fdirctrl, max_num;
        uint8_t offset;

@@ -1001,7 +1001,7 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct 
rte_eth_fdir_info *fdir_info
        offset = ((fdirctrl & IXGBE_FDIRCTRL_FLEX_MASK) >>
                        IXGBE_FDIRCTRL_FLEX_SHIFT) * sizeof(uint16_t);

-       fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
+       fdir_info->mode = ETH_DATA(dev)->dev_conf.fdir_conf.mode;
        max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
                        (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
        if (fdir_info->mode == RTE_FDIR_MODE_PERFECT)
@@ -1038,9 +1038,9 @@ ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct 
rte_eth_fdir_info *fdir_info
 static void
 ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats 
*fdir_stats)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw_fdir_info *info =
-                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+                       
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg, max_num;

        /* Get the information from registers */
@@ -1081,9 +1081,9 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_fdir_stats *fdir_st
        reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
        max_num = (1 << (FDIRENTRIES_NUM_SHIFT +
                        (reg & FDIRCTRL_PBALLOC_MASK)));
-       if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
+       if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
                        fdir_stats->guarant_cnt = max_num - fdir_stats->free;
-       else if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE)
+       else if (ETH_DATA(dev)->dev_conf.fdir_conf.mode == 
RTE_FDIR_MODE_SIGNATURE)
                fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free;

 }
@@ -1098,7 +1098,7 @@ int
 ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
                        enum rte_filter_op filter_op, void *arg)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        int ret = 0;

        if (hw->mac.type != ixgbe_mac_82599EB &&
diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c
index dbda9b5..1fd5d2f 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_pf.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c
@@ -57,9 +57,9 @@
 #define IXGBE_VF_GET_QUEUE_MSG_SIZE 5

 static inline uint16_t
-dev_num_vf(struct rte_eth_dev *eth_dev)
+dev_num_vf(struct rte_eth_dev *dev)
 {
-       return eth_dev->pci_dev->max_vfs;
+       return dev->pci_dev->max_vfs;
 }

 static inline
@@ -67,7 +67,7 @@ int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t 
vf_num)
 {
        unsigned char vf_mac_addr[ETHER_ADDR_LEN];
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
        uint16_t vfn;

        for (vfn = 0; vfn < vf_num; vfn++) {
@@ -84,30 +84,30 @@ static inline int
 ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
 {
        struct ixgbe_interrupt *intr =
-               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_INTR(ETH_DATA(dev)->dd.dev_private);

        intr->mask |= IXGBE_EICR_MAILBOX;

        return 0;
 }

-void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
+void ixgbe_pf_host_init(struct rte_eth_dev *dev)
 {
        struct ixgbe_vf_info **vfinfo =
-               IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_mirror_info *mirror_info =
-        IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+        IXGBE_DEV_PRIVATE_TO_PFDATA(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_uta_info *uta_info =
-        IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+        IXGBE_DEV_PRIVATE_TO_UTA(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint16_t vf_num;
        uint8_t nb_queue;

        PMD_INIT_FUNC_TRACE();

-       RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
-       if (0 == (vf_num = dev_num_vf(eth_dev)))
+       ETH_SRIOV(dev).active = 0;
+       if (0 == (vf_num = dev_num_vf(dev)))
                return;

        *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 
0);
@@ -120,50 +120,50 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)

        if (vf_num >= ETH_32_POOLS) {
                nb_queue = 2;
-               RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
+               ETH_SRIOV(dev).active = ETH_64_POOLS;
        } else if (vf_num >= ETH_16_POOLS) {
                nb_queue = 4;
-               RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
+               ETH_SRIOV(dev).active = ETH_32_POOLS;
        } else {
                nb_queue = 8;
-               RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
+               ETH_SRIOV(dev).active = ETH_16_POOLS;
        }

-       RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
-       RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
-       RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * 
nb_queue);
+       ETH_SRIOV(dev).nb_q_per_pool = nb_queue;
+       ETH_SRIOV(dev).def_vmdq_idx = vf_num;
+       ETH_SRIOV(dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);

-       ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
+       ixgbe_vf_perm_addr_gen(dev, vf_num);

        /* init_mailbox_params */
        hw->mbx.ops.init_params(hw);

        /* set mb interrupt mask */
-       ixgbe_mb_intr_setup(eth_dev);
+       ixgbe_mb_intr_setup(dev);

        return;
 }

-int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
+int ixgbe_pf_host_configure(struct rte_eth_dev *dev)
 {
        uint32_t vtctl, fcrth;
        uint32_t vfre_slot, vfre_offset;
        uint16_t vf_num;
        const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
        const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
-       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t gpie, gcr_ext;
        uint32_t vlanctrl;
        int i;

-       if (0 == (vf_num = dev_num_vf(eth_dev)))
+       if (0 == (vf_num = dev_num_vf(dev)))
                return -1;

        /* enable VMDq and set the default pool for PF */
        vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
        vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
-       vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
+       vtctl |= ETH_SRIOV(dev).def_vmdq_idx
                << IXGBE_VT_CTL_POOL_SHIFT;
        vtctl |= IXGBE_VT_CTL_REPLEN;
        IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
@@ -188,7 +188,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
        IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);

        /* set VMDq map to default PF pool */
-       hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
+       hw->mac.ops.set_vmdq(hw, 0, ETH_SRIOV(dev).def_vmdq_idx);

        /*
         * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
@@ -200,7 +200,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
        gpie &= ~IXGBE_GPIE_VTMODE_MASK;
        gpie |= IXGBE_GPIE_MSIX_MODE;

-       switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
+       switch (ETH_SRIOV(dev).active) {
        case ETH_64_POOLS:
                gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
                gpie |= IXGBE_GPIE_VTMODE_64;
@@ -246,9 +246,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
 static void
 set_rx_mode(struct rte_eth_dev *dev)
 {
-       struct rte_eth_dev_data *dev_data =
-               (struct rte_eth_dev_data*)dev->data->dev_private;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
        uint16_t vfn = dev_num_vf(dev);

@@ -262,11 +260,11 @@ set_rx_mode(struct rte_eth_dev *dev)
        /* clear the bits we are changing the status of */
        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);

-       if (dev_data->promiscuous) {
+       if (ETH_DATA(dev)->promiscuous) {
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
        } else {
-               if (dev_data->all_multicast) {
+               if (ETH_DATA(dev)->all_multicast) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                } else {
@@ -283,7 +281,7 @@ set_rx_mode(struct rte_eth_dev *dev)

        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);

-       if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                ixgbe_vlan_hw_strip_enable_all(dev);
        else
                ixgbe_vlan_hw_strip_disable_all(dev);
@@ -293,9 +291,9 @@ static inline void
 ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
        uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));

@@ -317,7 +315,7 @@ ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
 static inline void
 ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t reg;
        uint32_t reg_offset, vf_shift;
        const uint8_t VFRE_SHIFT = 5;  /* VFRE 32 bits per slot */
@@ -346,9 +344,9 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
 static int
 ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
@@ -373,9 +371,9 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, 
uint32_t *msgbuf)
 static int
 ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
        uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);

@@ -389,9 +387,9 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, 
uint32_t *msgbuf)
 static int
 ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, 
uint32_t *msgbuf)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));
        int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
                IXGBE_VT_MSGINFO_SHIFT;
        uint16_t *hash_list = (uint16_t *)&msgbuf[1];
@@ -428,9 +426,9 @@ static int
 ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 {
        int add, vid;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+               *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private));

        add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
                >> IXGBE_VT_MSGINFO_SHIFT;
@@ -446,7 +444,7 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, 
uint32_t *msgbuf)
 static int
 ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t 
*msgbuf)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t new_mtu = msgbuf[1];
        uint32_t max_frs;
        int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
@@ -475,7 +473,7 @@ ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t 
vf, uint32_t *msgbuf)
 {
        uint32_t api_version = msgbuf[1];
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);

        switch (api_version) {
        case ixgbe_mbox_api_10:
@@ -496,8 +494,8 @@ static int
 ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
 {
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
-       uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);
+       uint32_t default_q = vf * ETH_SRIOV(dev).nb_q_per_pool;

        /* Verify if the PF supports the mbox APIs version or not */
        switch (vfinfo[vf].api_version) {
@@ -509,8 +507,8 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, 
uint32_t *msgbuf)
        }

        /* Notify VF of Rx and Tx queue number */
-       msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-       msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       msgbuf[IXGBE_VF_RX_QUEUES] = ETH_SRIOV(dev).nb_q_per_pool;
+       msgbuf[IXGBE_VF_TX_QUEUES] = ETH_SRIOV(dev).nb_q_per_pool;

        /* Notify VF of default queue */
        msgbuf[IXGBE_VF_DEF_QUEUE] = default_q;
@@ -530,9 +528,9 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
        uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT;
        uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
        int32_t retval;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);

        retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
        if (retval) {
@@ -599,31 +597,31 @@ ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t 
vf)
 {
        uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        struct ixgbe_vf_info *vfinfo =
-               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+               *IXGBE_DEV_PRIVATE_TO_P_VFDATA(ETH_DATA(dev)->dd.dev_private);

        if (!vfinfo[vf].clear_to_send)
                ixgbe_write_mbx(hw, &msg, 1, vf);
 }

-void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
+void ixgbe_pf_mbx_process(struct rte_eth_dev *dev)
 {
        uint16_t vf;
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
+       for (vf = 0; vf < dev_num_vf(dev); vf++) {
                /* check & process vf function level reset */
                if (!ixgbe_check_for_rst(hw, vf))
-                       ixgbe_vf_reset_event(eth_dev, vf);
+                       ixgbe_vf_reset_event(dev, vf);

                /* check & process vf mailbox messages */
                if (!ixgbe_check_for_msg(hw, vf))
-                       ixgbe_rcv_msg_from_vf(eth_dev, vf);
+                       ixgbe_rcv_msg_from_vf(dev, vf);

                /* check & process acks from vf */
                if (!ixgbe_check_for_ack(hw, vf))
-                       ixgbe_rcv_ack_from_vf(eth_dev, vf);
+                       ixgbe_rcv_ack_from_vf(dev, vf);
        }
 }
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 9da2c7e..0cf20ca 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -1117,7 +1117,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                                   "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);

-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed +=
                                rxq->rx_free_thresh;

                        /*
@@ -1262,7 +1262,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed++;
                        break;
                }

@@ -1450,7 +1450,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
                                   (unsigned) rxq->queue_id);
-                       
rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed++;
                        break;
                }

@@ -1670,7 +1670,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char 
*ring_name,

        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
                        dev->driver->pci_drv.name, ring_name,
-                       dev->data->port_id, queue_id);
+                       ETH_DATA(dev)->dd.port_id, queue_id);

        mz = rte_memzone_lookup(z_name);
        if (mz)
@@ -1818,7 +1818,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        uint16_t tx_rs_thresh, tx_free_thresh;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Validate number of transmit descriptors.
@@ -1859,7 +1859,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the number "
                             "of TX descriptors minus 2. (tx_rs_thresh=%u "
                             "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id, (int)queue_idx);
+                            (int)ETH_DATA(dev)->dd.port_id, (int)queue_idx);
                return -(EINVAL);
        }
        if (tx_free_thresh >= (nb_desc - 3)) {
@@ -1868,7 +1868,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             "TX descriptors minus 3. (tx_free_thresh=%u "
                             "port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
-                            (int)dev->data->port_id, (int)queue_idx);
+                            (int)ETH_DATA(dev)->dd.port_id, (int)queue_idx);
                return -(EINVAL);
        }
        if (tx_rs_thresh > tx_free_thresh) {
@@ -1877,7 +1877,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                             "tx_rs_thresh=%u port=%d queue=%d)",
                             (unsigned int)tx_free_thresh,
                             (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id,
+                            (int)ETH_DATA(dev)->dd.port_id,
                             (int)queue_idx);
                return -(EINVAL);
        }
@@ -1885,7 +1885,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                PMD_INIT_LOG(ERR, "tx_rs_thresh must be a divisor of the "
                             "number of TX descriptors. (tx_rs_thresh=%u "
                             "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id, (int)queue_idx);
+                            (int)ETH_DATA(dev)->dd.port_id, (int)queue_idx);
                return -(EINVAL);
        }

@@ -1899,14 +1899,14 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
                PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
                             "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
                             "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
-                            (int)dev->data->port_id, (int)queue_idx);
+                            (int)ETH_DATA(dev)->dd.port_id, (int)queue_idx);
                return -(EINVAL);
        }

        /* Free memory prior to re-allocation if needed... */
-       if (dev->data->tx_queues[queue_idx] != NULL) {
-               ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.tx_queues[queue_idx] != NULL) {
+               ixgbe_tx_queue_release(ETH_DATA(dev)->dd.tx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.tx_queues[queue_idx] = NULL;
        }

        /* First allocate the tx queue data structure */
@@ -1935,9 +1935,9 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
        txq->hthresh = tx_conf->tx_thresh.hthresh;
        txq->wthresh = tx_conf->tx_thresh.wthresh;
        txq->queue_id = queue_idx;
-       txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
-               queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
-       txq->port_id = dev->data->port_id;
+       txq->reg_idx = (uint16_t)((ETH_SRIOV(dev).active == 0) ?
+               queue_idx : ETH_SRIOV(dev).def_pool_q_idx + queue_idx);
+       txq->port_id = ETH_DATA(dev)->dd.port_id;
        txq->txq_flags = tx_conf->txq_flags;
        txq->ops = &def_txq_ops;
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
@@ -1975,7 +1975,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,

        txq->ops->reset(txq);

-       dev->data->tx_queues[queue_idx] = txq;
+       ETH_DATA(dev)->dd.tx_queues[queue_idx] = txq;


        return (0);
@@ -2150,7 +2150,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        uint16_t len;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Validate number of receive descriptors.
@@ -2164,9 +2164,9 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }

        /* Free memory prior to re-allocation if needed... */
-       if (dev->data->rx_queues[queue_idx] != NULL) {
-               ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
+       if (ETH_DATA(dev)->dd.rx_queues[queue_idx] != NULL) {
+               ixgbe_rx_queue_release(ETH_DATA(dev)->dd.rx_queues[queue_idx]);
+               ETH_DATA(dev)->dd.rx_queues[queue_idx] = NULL;
        }

        /* First allocate the rx queue data structure */
@@ -2178,10 +2178,10 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
        rxq->queue_id = queue_idx;
-       rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
-               queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
-       rxq->port_id = dev->data->port_id;
-       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
+       rxq->reg_idx = (uint16_t)((ETH_SRIOV(dev).active == 0) ?
+               queue_idx : ETH_SRIOV(dev).def_pool_q_idx + queue_idx);
+       rxq->port_id = ETH_DATA(dev)->dd.port_id;
+       rxq->crc_len = (uint8_t) ((ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc) 
?
                                                        0 : ETHER_CRC_LEN);
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2269,7 +2269,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        } else
                ixgbe_rxq_vec_setup(rxq);

-       dev->data->rx_queues[queue_idx] = rxq;
+       ETH_DATA(dev)->dd.rx_queues[queue_idx] = rxq;

        ixgbe_reset_rx_queue(hw, rxq);

@@ -2284,12 +2284,12 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        struct ixgbe_rx_queue *rxq;
        uint32_t desc = 0;

-       if (rx_queue_id >= dev->data->nb_rx_queues) {
+       if (rx_queue_id >= ETH_DATA(dev)->dd.nb_rx_queues) {
                PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
                return 0;
        }

-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);

        while ((desc < rxq->nb_rx_desc) &&
@@ -2325,20 +2325,20 @@ void
 ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
        unsigned i;
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct ixgbe_tx_queue *txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               struct ixgbe_tx_queue *txq = ETH_DATA(dev)->dd.tx_queues[i];
                if (txq != NULL) {
                        txq->ops->release_mbufs(txq);
                        txq->ops->reset(txq);
                }
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               struct ixgbe_rx_queue *rxq = ETH_DATA(dev)->dd.rx_queues[i];
                if (rxq != NULL) {
                        ixgbe_rx_queue_release_mbufs(rxq);
                        ixgbe_reset_rx_queue(hw, rxq);
@@ -2388,7 +2388,7 @@ ixgbe_rss_disable(struct rte_eth_dev *dev)
        struct ixgbe_hw *hw;
        uint32_t mrqc;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
        mrqc &= ~IXGBE_MRQC_RSSEN;
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
@@ -2447,7 +2447,7 @@ ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
        uint32_t mrqc;
        uint64_t rss_hf;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Excerpt from section 7.1.2.8 Receive-Side Scaling (RSS):
@@ -2483,7 +2483,7 @@ ixgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        uint64_t rss_hf;
        uint16_t i;

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        hash_key = rss_conf->rss_key;
        if (hash_key != NULL) {
                /* Return RSS hash key */
@@ -2535,7 +2535,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev)
        uint16_t j;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Fill in redirection table
@@ -2544,7 +2544,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev)
         */
        reta = 0;
        for (i = 0, j = 0; i < 128; i++, j++) {
-               if (j == dev->data->nb_rx_queues)
+               if (j == ETH_DATA(dev)->dd.nb_rx_queues)
                        j = 0;
                reta = (reta << 8) | j;
                if ((i & 3) == 3)
@@ -2556,7 +2556,7 @@ ixgbe_rss_configure(struct rte_eth_dev *dev)
         * Configure the RSS key and the RSS protocols used to compute
         * the RSS hash of input packets.
         */
-       rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
+       rss_conf = ETH_DATA(dev)->dev_conf.rx_adv_conf.rss_conf;
        if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
                ixgbe_rss_disable(dev);
                return;
@@ -2581,8 +2581,8 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
        int i;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       cfg = &ETH_DATA(dev)->dev_conf.rx_adv_conf.vmdq_dcb_conf;
        num_pools = cfg->nb_queue_pools;
        /* Check we have a valid number of pools */
        if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
@@ -2734,9 +2734,9 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
                        struct ixgbe_dcb_config *dcb_config)
 {
        struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
-                       &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+                       &ETH_DATA(dev)->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
        struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+                       IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        PMD_INIT_FUNC_TRACE();
        if (hw->mac.type != ixgbe_mac_82598EB)
@@ -2754,7 +2754,7 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
                         struct ixgbe_dcb_config *dcb_config)
 {
        struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
-                       &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+                       &ETH_DATA(dev)->dev_conf.rx_adv_conf.vmdq_dcb_conf;
        struct ixgbe_dcb_tc_config *tc;
        uint8_t i,j;

@@ -2781,7 +2781,7 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
                         struct ixgbe_dcb_config *dcb_config)
 {
        struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
-                       &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
+                       &ETH_DATA(dev)->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
        struct ixgbe_dcb_tc_config *tc;
        uint8_t i,j;

@@ -2810,7 +2810,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev,
                struct ixgbe_dcb_config *dcb_config)
 {
        struct rte_eth_dcb_rx_conf *rx_conf =
-                       &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+                       &ETH_DATA(dev)->dev_conf.rx_adv_conf.dcb_rx_conf;
        struct ixgbe_dcb_tc_config *tc;
        uint8_t i,j;

@@ -2831,7 +2831,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
                struct ixgbe_dcb_config *dcb_config)
 {
        struct rte_eth_dcb_tx_conf *tx_conf =
-                       &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
+                       &ETH_DATA(dev)->dev_conf.tx_adv_conf.dcb_tx_conf;
        struct ixgbe_dcb_tc_config *tc;
        uint8_t i,j;

@@ -2979,11 +2979,11 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        uint16_t max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
        uint8_t map[IXGBE_DCB_MAX_TRAFFIC_CLASS] = {0};
        struct ixgbe_dcb_tc_config *tc;
-       uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+       uint32_t max_frame = ETH_DATA(dev)->dd.mtu + ETHER_HDR_LEN + 
ETHER_CRC_LEN;
        struct ixgbe_hw *hw =
-                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+                       IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       switch(dev->data->dev_conf.rxmode.mq_mode){
+       switch(ETH_DATA(dev)->dev_conf.rxmode.mq_mode){
        case ETH_MQ_RX_VMDQ_DCB:
                dcb_config->vt_mode = true;
                if (hw->mac.type != ixgbe_mac_82598EB) {
@@ -3009,7 +3009,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
                PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
                break;
        }
-       switch (dev->data->dev_conf.txmode.mq_mode) {
+       switch (ETH_DATA(dev)->dev_conf.txmode.mq_mode) {
        case ETH_MQ_TX_VMDQ_DCB:
                dcb_config->vt_mode = true;
                config_dcb_tx = DCB_TX_CONFIG;
@@ -3118,7 +3118,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
        ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);

        /* Check if the PFC is supported */
-       if(dev->data->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
+       if(ETH_DATA(dev)->dev_conf.dcb_capability_en & ETH_DCB_PFC_SUPPORT) {
                pbsize = (uint16_t) (NIC_RX_BUFFER_SIZE / nb_tcs);
                for (i = 0; i < nb_tcs; i++) {
                        /*
@@ -3147,8 +3147,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
 void ixgbe_configure_dcb(struct rte_eth_dev *dev)
 {
        struct ixgbe_dcb_config *dcb_cfg =
-                       IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-       struct rte_eth_conf *dev_conf = &(dev->data->dev_conf);
+                       
IXGBE_DEV_PRIVATE_TO_DCB_CFG(ETH_DATA(dev)->dd.dev_private);
+       struct rte_eth_conf *dev_conf = &(ETH_DATA(dev)->dev_conf);

        PMD_INIT_FUNC_TRACE();

@@ -3157,7 +3157,7 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
            (dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB))
                return;

-       if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+       if (ETH_DATA(dev)->dd.nb_rx_queues != ETH_DCB_NUM_QUEUES)
                return;

        /** Configure DCB hardware **/
@@ -3180,8 +3180,8 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
        int i;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
+       cfg = &ETH_DATA(dev)->dev_conf.rx_adv_conf.vmdq_rx_conf;
        num_pools = cfg->nb_queue_pools;

        ixgbe_rss_disable(dev);
@@ -3335,12 +3335,12 @@ ixgbe_config_vf_rss(struct rte_eth_dev *dev)

        ixgbe_rss_configure(dev);

-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* MRQC: enable VF RSS */
        mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
        mrqc &= ~IXGBE_MRQC_MRQE_MASK;
-       switch (RTE_ETH_DEV_SRIOV(dev).active) {
+       switch (ETH_SRIOV(dev).active) {
        case ETH_64_POOLS:
                mrqc |= IXGBE_MRQC_VMDQRSS64EN;
                break;
@@ -3363,9 +3363,9 @@ static int
 ixgbe_config_vf_default(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       switch (RTE_ETH_DEV_SRIOV(dev).active) {
+       switch (ETH_SRIOV(dev).active) {
        case ETH_64_POOLS:
                IXGBE_WRITE_REG(hw, IXGBE_MRQC,
                        IXGBE_MRQC_VMDQEN);
@@ -3392,17 +3392,17 @@ static int
 ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        if (hw->mac.type == ixgbe_mac_82598EB)
                return 0;

-       if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+       if (ETH_SRIOV(dev).active == 0) {
                /*
                 * SRIOV inactive scheme
                 * any DCB/RSS w/o VMDq multi-queue setting
                 */
-               switch (dev->data->dev_conf.rxmode.mq_mode) {
+               switch (ETH_DATA(dev)->dev_conf.rxmode.mq_mode) {
                        case ETH_MQ_RX_RSS:
                                ixgbe_rss_configure(dev);
                                break;
@@ -3424,7 +3424,7 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
                 * SRIOV active scheme
                 * Support RSS together with VMDq & SRIOV
                 */
-               switch (dev->data->dev_conf.rxmode.mq_mode) {
+               switch (ETH_DATA(dev)->dev_conf.rxmode.mq_mode) {
                case ETH_MQ_RX_RSS:
                case ETH_MQ_RX_VMDQ_RSS:
                        ixgbe_config_vf_rss(dev);
@@ -3449,7 +3449,7 @@ static int
 ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
 {
        struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+               IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);
        uint32_t mtqc;
        uint32_t rttdcs;

@@ -3461,19 +3461,19 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
        rttdcs |= IXGBE_RTTDCS_ARBDIS;
        IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);

-       if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+       if (ETH_SRIOV(dev).active == 0) {
                /*
                 * SRIOV inactive scheme
                 * any DCB w/o VMDq multi-queue setting
                 */
-               if (dev->data->dev_conf.txmode.mq_mode == ETH_MQ_TX_VMDQ_ONLY)
+               if (ETH_DATA(dev)->dev_conf.txmode.mq_mode == 
ETH_MQ_TX_VMDQ_ONLY)
                        ixgbe_vmdq_tx_hw_configure(hw);
                else {
                        mtqc = IXGBE_MTQC_64Q_1PB;
                        IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
                }
        } else {
-               switch (RTE_ETH_DEV_SRIOV(dev).active) {
+               switch (ETH_SRIOV(dev).active) {

                /*
                 * SRIOV active scheme
@@ -3505,7 +3505,7 @@ ixgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)

 void ixgbe_set_rx_function(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw *hw = 
IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * In order to allow Vector Rx there are a few configuration
@@ -3516,12 +3516,12 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
                PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet Vector Rx "
                                    "preconditions or RTE_IXGBE_INC_VECTOR is "
                                    "not enabled",
-                            dev->data->port_id);
+                            ETH_DATA(dev)->dd.port_id);

                hw->rx_vec_allowed = false;
        }

-       if (dev->data->scattered_rx) {
+       if (ETH_DATA(dev)->scattered_rx) {
                /*
                 * Set the non-LRO scattered callback: there are Vector and
                 * single allocation versions.
@@ -3529,14 +3529,14 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
                if (hw->rx_vec_allowed) {
                        PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
                                            "callback (port=%d).",
-                                    dev->data->port_id);
+                                    ETH_DATA(dev)->dd.port_id);

                        dev->rx_pkt_burst = ixgbe_recv_scattered_pkts_vec;
                } else {
                        PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector) "
                                            "Scattered Rx callback "
                                            "(port=%d).",
-                                    dev->data->port_id);
+                                    ETH_DATA(dev)->dd.port_id);

                        dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
                }
@@ -3557,7 +3557,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
                PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
                                    "satisfied. Rx Burst Bulk Alloc function "
                                    "will be used on port=%d.",
-                            dev->data->port_id);
+                            ETH_DATA(dev)->dd.port_id);

                dev->rx_pkt_burst = ixgbe_recv_pkts_bulk_alloc;
        } else {
@@ -3565,7 +3565,7 @@ void ixgbe_set_rx_function(struct rte_eth_dev *dev)
                                    "satisfied, or Scattered Rx is requested, "
                                    "or RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC "
                                    "is not enabled (port=%d).",
-                            dev->data->port_id);
+                            ETH_DATA(dev)->dd.port_id);

                dev->rx_pkt_burst = ixgbe_recv_pkts;
        }
@@ -3592,7 +3592,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        uint16_t i;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /*
         * Make sure receives are disabled while setting
@@ -3612,7 +3612,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         * Configure CRC stripping, if any.
         */
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-       if (dev->data->dev_conf.rxmode.hw_strip_crc)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc)
                hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
        else
                hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
@@ -3620,11 +3620,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure jumbo frame support, if any.
         */
-       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.jumbo_frame == 1) {
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
                maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
                maxfrs &= 0x0000FFFF;
-               maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+               maxfrs |= (ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len << 16);
                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
        } else
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
@@ -3633,7 +3633,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         * If loopback mode is configured for 82599, set LPBK bit.
         */
        if (hw->mac.type == ixgbe_mac_82599EB &&
-                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+                       ETH_DATA(dev)->dev_conf.lpbk_mode == 
IXGBE_LPBK_82599_TX_RX)
                hlreg0 |= IXGBE_HLREG0_LPBK;
        else
                hlreg0 &= ~IXGBE_HLREG0_LPBK;
@@ -3641,15 +3641,15 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);

        /* Setup RX queues */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                /*
                 * Reset crc_len in case it was changed after queue setup by a
                 * call to configure.
                 */
                rxq->crc_len = (uint8_t)
-                               ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+                               ((ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc) 
? 0 :
                                ETHER_CRC_LEN);

                /* Setup the Base and Length of the Rx Descriptor Rings */
@@ -3668,7 +3668,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                /*
                 * Configure Header Split
                 */
-               if (dev->data->dev_conf.rxmode.header_split) {
+               if (ETH_DATA(dev)->dev_conf.rxmode.header_split) {
                        if (hw->mac.type == ixgbe_mac_82599EB) {
                                /* Must setup the PSRTYPE register */
                                uint32_t psrtype;
@@ -3678,7 +3678,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                        IXGBE_PSRTYPE_IPV6HDR;
                                IXGBE_WRITE_REG(hw, 
IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
                        }
-                       srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+                       srrctl = 
((ETH_DATA(dev)->dev_conf.rxmode.split_hdr_size <<
                                IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                                IXGBE_SRRCTL_BSIZEHDR_MASK);
                        srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
@@ -3707,13 +3707,13 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);

                /* It adds dual VLAN length for supporting dual VLAN */
-               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               if (ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                            2 * IXGBE_VLAN_TAG_SIZE > buf_size)
-                       dev->data->scattered_rx = 1;
+                       ETH_DATA(dev)->scattered_rx = 1;
        }

-       if (dev->data->dev_conf.rxmode.enable_scatter)
-               dev->data->scattered_rx = 1;
+       if (ETH_DATA(dev)->dev_conf.rxmode.enable_scatter)
+               ETH_DATA(dev)->scattered_rx = 1;

        ixgbe_set_rx_function(dev);

@@ -3729,7 +3729,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         */
        rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
        rxcsum |= IXGBE_RXCSUM_PCSD;
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_ip_checksum)
                rxcsum |= IXGBE_RXCSUM_IPPCSE;
        else
                rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -3739,7 +3739,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        if (hw->mac.type == ixgbe_mac_82599EB ||
            hw->mac.type == ixgbe_mac_X540) {
                rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-               if (dev->data->dev_conf.rxmode.hw_strip_crc)
+               if (ETH_DATA(dev)->dev_conf.rxmode.hw_strip_crc)
                        rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
                else
                        rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
@@ -3764,7 +3764,7 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
        uint16_t i;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Enable TX CRC (checksum offload requirement) and hw padding
         * (TSO requirement) */
@@ -3773,8 +3773,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);

        /* Setup the Base and Length of the Tx Descriptor Rings */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];

                bus_addr = txq->tx_ring_phys_addr;
                IXGBE_WRITE_REG(hw, IXGBE_TDBAL(txq->reg_idx),
@@ -3862,10 +3862,10 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
        int ret = 0;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                /* Setup Transmit Threshold Registers */
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
                txdctl |= txq->pthresh & 0x7F;
@@ -3880,8 +3880,8 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
        }

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                if (!txq->tx_deferred_start) {
                        ret = ixgbe_dev_tx_queue_start(dev, i);
                        if (ret < 0)
@@ -3889,8 +3889,8 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
                }
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];
                if (!rxq->rx_deferred_start) {
                        ret = ixgbe_dev_rx_queue_start(dev, i);
                        if (ret < 0)
@@ -3907,7 +3907,7 @@ ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)

        /* If loopback mode is enabled for 82599, set up the link accordingly */
        if (hw->mac.type == ixgbe_mac_82599EB &&
-                       dev->data->dev_conf.lpbk_mode == IXGBE_LPBK_82599_TX_RX)
+                       ETH_DATA(dev)->dev_conf.lpbk_mode == 
IXGBE_LPBK_82599_TX_RX)
                ixgbe_setup_loopback_link_82599(hw);

        return 0;
@@ -3925,10 +3925,10 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        int poll_ms;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];

                /* Allocate buffers for descriptor rings */
                if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
@@ -3970,10 +3970,10 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        int poll_ms;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (rx_queue_id < dev->data->nb_rx_queues) {
-               rxq = dev->data->rx_queues[rx_queue_id];
+       if (rx_queue_id < ETH_DATA(dev)->dd.nb_rx_queues) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[rx_queue_id];

                rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
                rxdctl &= ~IXGBE_RXDCTL_ENABLE;
@@ -4012,10 +4012,10 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
        int poll_ms;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               txq = dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
+               txq = ETH_DATA(dev)->dd.tx_queues[tx_queue_id];
                txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
                txdctl |= IXGBE_TXDCTL_ENABLE;
                IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
@@ -4054,10 +4054,10 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
        int poll_ms;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (tx_queue_id < dev->data->nb_tx_queues) {
-               txq = dev->data->tx_queues[tx_queue_id];
+       if (tx_queue_id < ETH_DATA(dev)->dd.nb_tx_queues) {
+               txq = ETH_DATA(dev)->dd.tx_queues[tx_queue_id];

                /* Wait until TX queue is empty */
                if (hw->mac.type == ixgbe_mac_82599EB) {
@@ -4117,15 +4117,15 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
        int ret;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
+       if (rte_is_power_of_2(ETH_DATA(dev)->dd.nb_rx_queues) == 0) {
                PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
                        "it should be power of 2");
                return -1;
        }

-       if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
+       if (ETH_DATA(dev)->dd.nb_rx_queues > hw->mac.max_rx_queues) {
                PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
                        "it should be equal to or less than %d",
                        hw->mac.max_rx_queues);
@@ -4147,12 +4147,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
         * VF packets received can work in all cases.
         */
        ixgbevf_rlpml_set_vf(hw,
-               (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+               (uint16_t)ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len);

        /* Setup RX queues */
        dev->rx_pkt_burst = ixgbe_recv_pkts;
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               rxq = dev->data->rx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                /* Allocate buffers for descriptor rings */
                ret = ixgbe_alloc_rx_queue_mbufs(rxq);
@@ -4177,8 +4177,8 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                /*
                 * Configure Header Split
                 */
-               if (dev->data->dev_conf.rxmode.header_split) {
-                       srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+               if (ETH_DATA(dev)->dd.dev_conf.rxmode.header_split) {
+                       srrctl = 
((ETH_DATA(dev)->dev_conf.rxmode.split_hdr_size <<
                                IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                                IXGBE_SRRCTL_BSIZEHDR_MASK);
                        srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
@@ -4210,13 +4210,13 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
                buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);

-               if (dev->data->dev_conf.rxmode.enable_scatter ||
+               if (ETH_DATA(dev)->dev_conf.rxmode.enable_scatter ||
                    /* It adds dual VLAN length for supporting dual VLAN */
-                   (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+                   (ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len +
                                2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
-                       if (!dev->data->scattered_rx)
+                       if (!ETH_DATA(dev)->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
-                       dev->data->scattered_rx = 1;
+                       ETH_DATA(dev)->scattered_rx = 1;
 #ifdef RTE_IXGBE_INC_VECTOR
                        if (rte_is_power_of_2(rxq->nb_rx_desc))
                                dev->rx_pkt_burst =
@@ -4228,7 +4228,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
        }

 #ifdef RTE_HEADER_SPLIT_ENABLE
-       if (dev->data->dev_conf.rxmode.header_split)
+       if (ETH_DATA(dev)->dev_conf.rxmode.header_split)
                /* Must setup the PSRTYPE register */
                psrtype = IXGBE_PSRTYPE_TCPHDR |
                        IXGBE_PSRTYPE_UDPHDR   |
@@ -4237,7 +4237,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 #endif

        /* Set RQPL for VF RSS according to max Rx queue */
-       psrtype |= (dev->data->nb_rx_queues >> 1) <<
+       psrtype |= (ETH_DATA(dev)->dd.nb_rx_queues >> 1) <<
                IXGBE_PSRTYPE_RQPL_SHIFT;
        IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);

@@ -4257,11 +4257,11 @@ ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
        uint16_t i;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

        /* Setup the Base and Length of the Tx Descriptor Rings */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                bus_addr = txq->tx_ring_phys_addr;
                IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
                                (uint32_t)(bus_addr & 0x00000000ffffffffULL));
@@ -4300,10 +4300,10 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
        int poll_ms;

        PMD_INIT_FUNC_TRACE();
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(ETH_DATA(dev)->dd.dev_private);

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txq = dev->data->tx_queues[i];
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {
+               txq = ETH_DATA(dev)->dd.tx_queues[i];
                /* Setup Transmit Threshold Registers */
                txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
                txdctl |= txq->pthresh & 0x7F;
@@ -4312,7 +4312,7 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
                IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
        }

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_tx_queues; i++) {

                txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
                txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -4327,9 +4327,9 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
                if (!poll_ms)
                        PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
        }
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < ETH_DATA(dev)->dd.nb_rx_queues; i++) {

-               rxq = dev->data->rx_queues[i];
+               rxq = ETH_DATA(dev)->dd.rx_queues[i];

                rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
                rxdctl |= IXGBE_RXDCTL_ENABLE;
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
index 7ac6b61..536fb7a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx_vec.c
@@ -71,7 +71,7 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
                                                dma_addr0);
                        }
                }
-               rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+               
ETH_DATA(&rte_eth_devices[rxq->port_id])->dd.rx_mbuf_alloc_failed +=
                        RTE_IXGBE_RXQ_REARM_THRESH;
                return;
        }
@@ -763,8 +763,8 @@ int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
 int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
 {
 #ifndef RTE_LIBRTE_IEEE1588
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-       struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+       struct rte_eth_rxmode *rxmode = &ETH_DATA(dev)->dev_conf.rxmode;
+       struct rte_fdir_conf *fconf = &ETH_DATA(dev)->dev_conf.fdir_conf;

 #ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
        /* whithout rx ol_flags, no VP flag report */
diff --git a/lib/librte_pmd_mlx4/mlx4.c b/lib/librte_pmd_mlx4/mlx4.c
index fa749f4..7a3ccae 100644
--- a/lib/librte_pmd_mlx4/mlx4.c
+++ b/lib/librte_pmd_mlx4/mlx4.c
@@ -4578,7 +4578,7 @@ mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)

                        snprintf(name, sizeof(name), "%s port %u",
                                 ibv_get_device_name(ibv_dev), port);
-                       eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
+                       eth_dev = rte_eth_dev_allocate(name, RTE_DEV_PCI);
                }
                if (eth_dev == NULL) {
                        ERROR("can not allocate rte ethdev");
diff --git a/lib/librte_pmd_null/rte_eth_null.c 
b/lib/librte_pmd_null/rte_eth_null.c
index 0e18502..1f2bb68 100644
--- a/lib/librte_pmd_null/rte_eth_null.c
+++ b/lib/librte_pmd_null/rte_eth_null.c
@@ -186,7 +186,7 @@ eth_dev_start(struct rte_eth_dev *dev)
        if (dev == NULL)
                return -EINVAL;

-       dev->data->dev_link.link_status = 1;
+       ETH_DATA(dev)->dev_link.link_status = 1;
        return 0;
 }

@@ -196,7 +196,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
        if (dev == NULL)
                return;

-       dev->data->dev_link.link_status = 0;
+       ETH_DATA(dev)->dev_link.link_status = 0;
 }

 static int
@@ -216,11 +216,11 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
rx_queue_id,
        if (rx_queue_id != 0)
                return -ENODEV;

-       internals = dev->data->dev_private;
+       internals = _DD_PRIVATE(dev);
        packet_size = internals->packet_size;

        internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
-       dev->data->rx_queues[rx_queue_id] =
+       _DD(dev, rx_queues[rx_queue_id]) =
                &internals->rx_null_queues[rx_queue_id];
        dummy_packet = rte_zmalloc_socket(NULL,
                        packet_size, 0, internals->numa_node);
@@ -249,10 +249,10 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
tx_queue_id,
        if (tx_queue_id != 0)
                return -ENODEV;

-       internals = dev->data->dev_private;
+       internals = _DD_PRIVATE(dev);
        packet_size = internals->packet_size;

-       dev->data->tx_queues[tx_queue_id] =
+       _DD(dev, tx_queues[tx_queue_id]) =
                &internals->tx_null_queues[tx_queue_id];
        dummy_packet = rte_zmalloc_socket(NULL,
                        packet_size, 0, internals->numa_node);
@@ -275,14 +275,14 @@ eth_dev_info(struct rte_eth_dev *dev,
        if ((dev == NULL) || (dev_info == NULL))
                return;

-       internals = dev->data->dev_private;
-       dev_info->driver_name = drivername;
+       internals = _DD_PRIVATE(dev);
+       dev_info->di.driver_name = drivername;
        dev_info->max_mac_addrs = 1;
        dev_info->max_rx_pktlen = (uint32_t)-1;
        dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
        dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
        dev_info->min_rx_bufsize = 0;
-       dev_info->pci_dev = NULL;
+       dev_info->di.pci_dev = NULL;
 }

 static void
@@ -295,7 +295,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats 
*igb_stats)
        if ((dev == NULL) || (igb_stats == NULL))
                return;

-       internal = dev->data->dev_private;
+       internal = _DD_PRIVATE(dev);
        memset(igb_stats, 0, sizeof(*igb_stats));
        num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
                                        internal->nb_rx_queues);
@@ -330,7 +330,7 @@ eth_stats_reset(struct rte_eth_dev *dev)
        if (dev == NULL)
                return;

-       internal = dev->data->dev_private;
+       internal = _DD_PRIVATE(dev);
        for (i = 0; i < internal->nb_rx_queues; i++)
                internal->rx_null_queues[i].rx_pkts.cnt = 0;
        for (i = 0; i < internal->nb_tx_queues; i++) {
@@ -412,7 +412,7 @@ eth_dev_null_create(const char *name,
                goto error;

        /* reserve an ethdev entry */
-       eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+       eth_dev = rte_eth_dev_allocate(name, RTE_DEV_VIRTUAL);
        if (eth_dev == NULL)
                goto error;

@@ -433,13 +433,13 @@ eth_dev_null_create(const char *name,

        pci_dev->numa_node = numa_node;

-       data->dev_private = internals;
-       data->port_id = eth_dev->data->port_id;
-       data->nb_rx_queues = (uint16_t)nb_rx_queues;
-       data->nb_tx_queues = (uint16_t)nb_tx_queues;
+       data->dd.dev_private = internals;
+       data->dd.port_id = _DD(eth_dev, port_id);
+       data->dd.nb_rx_queues = (uint16_t)nb_rx_queues;
+       data->dd.nb_tx_queues = (uint16_t)nb_tx_queues;
        data->dev_link = pmd_link;
        data->mac_addrs = &eth_addr;
-       strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
+       strncpy(data->dd.name, _DD(eth_dev, name), strlen(_DD(eth_dev, name)));

        eth_dev->data = data;
        eth_dev->dev_ops = &ops;
@@ -569,7 +569,7 @@ rte_pmd_null_devuninit(const char *name)
        if (eth_dev == NULL)
                return -1;

-       rte_free(eth_dev->data->dev_private);
+       rte_free(_DD_PRIVATE(eth_dev));
        rte_free(eth_dev->data);
        rte_free(eth_dev->pci_dev);

diff --git a/lib/librte_pmd_pcap/rte_eth_pcap.c 
b/lib/librte_pmd_pcap/rte_eth_pcap.c
index 204ae68..e295510 100644
--- a/lib/librte_pmd_pcap/rte_eth_pcap.c
+++ b/lib/librte_pmd_pcap/rte_eth_pcap.c
@@ -716,7 +716,7 @@ rte_pmd_init_internals(const char *name, const unsigned 
nb_rx_queues,
                goto error;

        /* reserve an ethdev entry */
-       *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+       *eth_dev = rte_eth_dev_allocate(name, RTE_DEV_VIRTUAL);
        if (*eth_dev == NULL)
                goto error;

diff --git a/lib/librte_pmd_ring/rte_eth_ring.c 
b/lib/librte_pmd_ring/rte_eth_ring.c
index 1e66d4e..18930e9 100644
--- a/lib/librte_pmd_ring/rte_eth_ring.c
+++ b/lib/librte_pmd_ring/rte_eth_ring.c
@@ -113,27 +113,27 @@ eth_dev_configure(struct rte_eth_dev *dev __rte_unused) { 
return 0; }
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = 1;
+       ETH_DATA(dev)->dev_link.link_status = 1;
        return 0;
 }

 static void
 eth_dev_stop(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = 0;
+       ETH_DATA(dev)->dev_link.link_status = 0;
 }

 static int
 eth_dev_set_link_down(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = 0;
+       ETH_DATA(dev)->dev_link.link_status = 0;
        return 0;
 }

 static int
 eth_dev_set_link_up(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = 1;
+       ETH_DATA(dev)->dev_link.link_status = 1;
        return 0;
 }

@@ -144,8 +144,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,uint16_t 
rx_queue_id,
                                    const struct rte_eth_rxconf *rx_conf 
__rte_unused,
                                    struct rte_mempool *mb_pool __rte_unused)
 {
-       struct pmd_internals *internals = dev->data->dev_private;
-       dev->data->rx_queues[rx_queue_id] = 
&internals->rx_ring_queues[rx_queue_id];
+       struct pmd_internals *internals = _DD_PRIVATE(dev);
+       _DD(dev, rx_queues[rx_queue_id]) = 
&internals->rx_ring_queues[rx_queue_id];
        return 0;
 }

@@ -155,8 +155,8 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
tx_queue_id,
                                    unsigned int socket_id __rte_unused,
                                    const struct rte_eth_txconf *tx_conf 
__rte_unused)
 {
-       struct pmd_internals *internals = dev->data->dev_private;
-       dev->data->tx_queues[tx_queue_id] = 
&internals->tx_ring_queues[tx_queue_id];
+       struct pmd_internals *internals = _DD_PRIVATE(dev);
+       _DD(dev, tx_queues[tx_queue_id]) = 
&internals->tx_ring_queues[tx_queue_id];
        return 0;
 }

@@ -165,14 +165,14 @@ static void
 eth_dev_info(struct rte_eth_dev *dev,
                struct rte_eth_dev_info *dev_info)
 {
-       struct pmd_internals *internals = dev->data->dev_private;
-       dev_info->driver_name = drivername;
+       struct pmd_internals *internals = _DD_PRIVATE(dev);
+       dev_info->di.driver_name = drivername;
        dev_info->max_mac_addrs = 1;
        dev_info->max_rx_pktlen = (uint32_t)-1;
        dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
        dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
        dev_info->min_rx_bufsize = 0;
-       dev_info->pci_dev = NULL;
+       dev_info->di.pci_dev = NULL;
 }

 static void
@@ -180,7 +180,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats 
*igb_stats)
 {
        unsigned i;
        unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
-       const struct pmd_internals *internal = dev->data->dev_private;
+       const struct pmd_internals *internal = _DD_PRIVATE(dev);

        for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
                        i < internal->nb_rx_queues; i++) {
@@ -205,7 +205,7 @@ static void
 eth_stats_reset(struct rte_eth_dev *dev)
 {
        unsigned i;
-       struct pmd_internals *internal = dev->data->dev_private;
+       struct pmd_internals *internal = _DD_PRIVATE(dev);
        for (i = 0; i < internal->nb_rx_queues; i++)
                internal->rx_ring_queues[i].rx_pkts.cnt = 0;
        for (i = 0; i < internal->nb_tx_queues; i++) {
@@ -297,7 +297,7 @@ rte_eth_from_rings(const char *name, struct rte_ring *const 
rx_queues[],
                goto error;

        /* reserve an ethdev entry */
-       eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+       eth_dev = rte_eth_dev_allocate(name, RTE_DEV_VIRTUAL);
        if (eth_dev == NULL)
                goto error;

@@ -329,10 +329,10 @@ rte_eth_from_rings(const char *name, struct rte_ring 
*const rx_queues[],
        pci_dev->numa_node = numa_node;
        pci_dev->driver = &eth_drv->pci_drv;

-       data->dev_private = internals;
-       data->port_id = eth_dev->data->port_id;
-       data->nb_rx_queues = (uint16_t)nb_rx_queues;
-       data->nb_tx_queues = (uint16_t)nb_tx_queues;
+       data->dd.dev_private = internals;
+       data->dd.port_id = _DD(eth_dev, port_id);
+       data->dd.nb_rx_queues = (uint16_t)nb_rx_queues;
+       data->dd.nb_tx_queues = (uint16_t)nb_tx_queues;
        data->dev_link = pmd_link;
        data->mac_addrs = &internals->address;

diff --git a/lib/librte_pmd_virtio/virtio_ethdev.c 
b/lib/librte_pmd_virtio/virtio_ethdev.c
index 7b83d9b..d6b3826 100644
--- a/lib/librte_pmd_virtio/virtio_ethdev.c
+++ b/lib/librte_pmd_virtio/virtio_ethdev.c
@@ -216,7 +216,7 @@ virtio_send_command(struct virtqueue *vq, struct 
virtio_pmd_ctrl *ctrl,
 static int
 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtio_pmd_ctrl ctrl;
        int dlen[1];
        int ret;
@@ -250,7 +250,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
        const struct rte_memzone *mz;
        uint16_t vq_size;
        int size;
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtqueue  *vq = NULL;

        /* Write the virtqueue index to the Queue Select Field */
@@ -279,17 +279,17 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,

        if (queue_type == VTNET_RQ) {
                snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
-                       dev->data->port_id, queue_idx);
+                       _DD_PORT(dev), queue_idx);
                vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
                        vq_size * sizeof(struct vq_desc_extra), 
RTE_CACHE_LINE_SIZE);
        } else if (queue_type == VTNET_TQ) {
                snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
-                       dev->data->port_id, queue_idx);
+                       _DD_PORT(dev), queue_idx);
                vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
                        vq_size * sizeof(struct vq_desc_extra), 
RTE_CACHE_LINE_SIZE);
        } else if (queue_type == VTNET_CQ) {
                snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
-                       dev->data->port_id);
+                       _DD_PORT(dev));
                vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
                        vq_size * sizeof(struct vq_desc_extra),
                        RTE_CACHE_LINE_SIZE);
@@ -300,7 +300,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
        }

        vq->hw = hw;
-       vq->port_id = dev->data->port_id;
+       vq->port_id = _DD_PORT(dev);
        vq->queue_id = queue_idx;
        vq->vq_queue_index = vtpci_queue_idx;
        vq->vq_nentries = vq_size;
@@ -345,7 +345,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
                 * For each xmit packet, allocate a virtio_net_hdr
                 */
                snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
-                       dev->data->port_id, queue_idx);
+                       _DD_PORT(dev), queue_idx);
                vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
                        vq_size * hw->vtnet_hdr_size,
                        socket_id, 0, RTE_CACHE_LINE_SIZE);
@@ -360,7 +360,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
        } else if (queue_type == VTNET_CQ) {
                /* Allocate a page for control vq command, data and status */
                snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
-                       dev->data->port_id);
+                       _DD_PORT(dev));
                vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
                        PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
                if (vq->virtio_net_hdr_mz == NULL) {
@@ -389,7 +389,7 @@ virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t 
vtpci_queue_idx,
        struct virtqueue *vq;
        uint16_t nb_desc = 0;
        int ret;
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);

        PMD_INIT_FUNC_TRACE();
        ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
@@ -407,7 +407,7 @@ virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t 
vtpci_queue_idx,
 static void
 virtio_dev_close(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct rte_pci_device *pci_dev = dev->pci_dev;

        PMD_INIT_LOG(DEBUG, "virtio_dev_close");
@@ -423,7 +423,7 @@ virtio_dev_close(struct rte_eth_dev *dev)
 static void
 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtio_pmd_ctrl ctrl;
        int dlen[1];
        int ret;
@@ -442,7 +442,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
 static void
 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtio_pmd_ctrl ctrl;
        int dlen[1];
        int ret;
@@ -461,7 +461,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
 static void
 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtio_pmd_ctrl ctrl;
        int dlen[1];
        int ret;
@@ -480,7 +480,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
 static void
 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtio_pmd_ctrl ctrl;
        int dlen[1];
        int ret;
@@ -532,7 +532,7 @@ virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
        struct rte_eth_link *dst = link;
-       struct rte_eth_link *src = &(dev->data->dev_link);
+       struct rte_eth_link *src = &(ETH_DATA(dev)->dev_link);

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
                        *(uint64_t *)src) == 0)
@@ -557,7 +557,7 @@ static inline int
 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -572,8 +572,8 @@ virtio_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 {
        unsigned i;

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               const struct virtqueue *txvq = dev->data->tx_queues[i];
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++) {
+               const struct virtqueue *txvq = _DD(dev, tx_queues[i]);
                if (txvq == NULL)
                        continue;

@@ -587,8 +587,8 @@ virtio_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
                }
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               const struct virtqueue *rxvq = dev->data->rx_queues[i];
+       for (i = 0; i < _DD(dev, nb_rx_queues); i++) {
+               const struct virtqueue *rxvq = _DD(dev, rx_queues[i]);
                if (rxvq == NULL)
                        continue;

@@ -602,7 +602,7 @@ virtio_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
                }
        }

-       stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+       stats->rx_nombuf = _DD(dev, rx_mbuf_alloc_failed);
 }

 static void
@@ -610,8 +610,8 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
 {
        unsigned int i;

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct virtqueue *txvq = dev->data->tx_queues[i];
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++) {
+               struct virtqueue *txvq = _DD(dev, tx_queues[i]);
                if (txvq == NULL)
                        continue;

@@ -620,8 +620,8 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
                txvq->errors = 0;
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               struct virtqueue *rxvq = dev->data->rx_queues[i];
+       for (i = 0; i < _DD(dev, nb_rx_queues); i++) {
+               struct virtqueue *rxvq = _DD(dev, rx_queues[i]);
                if (rxvq == NULL)
                        continue;

@@ -630,7 +630,7 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev)
                rxvq->errors = 0;
        }

-       dev->data->rx_mbuf_alloc_failed = 0;
+       _DD(dev, rx_mbuf_alloc_failed) = 0;
 }

 static void
@@ -682,8 +682,8 @@ static void
 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                    uint32_t index, uint32_t vmdq __rte_unused)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
-       const struct ether_addr *addrs = dev->data->mac_addrs;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
+       const struct ether_addr *addrs = ETH_DATA(dev)->mac_addrs;
        unsigned int i;
        struct virtio_net_ctrl_mac *uc, *mc;

@@ -712,8 +712,8 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct 
ether_addr *mac_addr,
 static void
 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
-       struct ether_addr *addrs = dev->data->mac_addrs;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
+       struct ether_addr *addrs = ETH_DATA(dev)->mac_addrs;
        struct virtio_net_ctrl_mac *uc, *mc;
        unsigned int i;

@@ -743,7 +743,7 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t 
index)
 static void
 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);

        memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);

@@ -764,7 +764,7 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct 
ether_addr *mac_addr)
 static int
 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct virtio_pmd_ctrl ctrl;
        int len;

@@ -1079,7 +1079,7 @@ virtio_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
                         void *param)
 {
        struct rte_eth_dev *dev = param;
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        uint8_t isr;

        /* Read interrupt status which clears interrupt */
@@ -1092,7 +1092,7 @@ virtio_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
        if (isr & VIRTIO_PCI_ISR_CONFIG) {
                if (virtio_dev_link_update(dev, 0) == 0)
                        _rte_eth_dev_callback_process(dev,
-                                                     RTE_ETH_EVENT_INTR_LSC);
+                                                     RTE_DEV_EVENT_INTR_LSC);
        }

 }
@@ -1100,7 +1100,7 @@ virtio_interrupt_handler(__rte_unused struct 
rte_intr_handle *handle,
 static void
 rx_func_get(struct rte_eth_dev *eth_dev)
 {
-       struct virtio_hw *hw = eth_dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(eth_dev);
        if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
                eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
        else
@@ -1114,7 +1114,7 @@ rx_func_get(struct rte_eth_dev *eth_dev)
 static int
 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
 {
-       struct virtio_hw *hw = eth_dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(eth_dev);
        struct virtio_net_config *config;
        struct virtio_net_config local_config;
        uint32_t offset_conf = sizeof(config->mac);
@@ -1131,8 +1131,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
        }

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("virtio", ETHER_ADDR_LEN, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       ETH_DATA(eth_dev)->mac_addrs = rte_zmalloc("virtio", ETHER_ADDR_LEN, 0);
+       if (ETH_DATA(eth_dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                        "Failed to allocate %d bytes needed to store MAC 
addresses",
                        ETHER_ADDR_LEN);
@@ -1167,7 +1167,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
        /* Copy the permanent MAC address to: virtio_hw */
        virtio_get_hwaddr(hw);
        ether_addr_copy((struct ether_addr *) hw->mac_addr,
-                       &eth_dev->data->mac_addrs[0]);
+                       &ETH_DATA(eth_dev)->mac_addrs[0]);
        PMD_INIT_LOG(DEBUG,
                     "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
                     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
@@ -1218,8 +1218,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
                hw->max_tx_queues = 1;
        }

-       eth_dev->data->nb_rx_queues = hw->max_rx_queues;
-       eth_dev->data->nb_tx_queues = hw->max_tx_queues;
+       _DD(eth_dev, nb_rx_queues) = hw->max_rx_queues;
+       _DD(eth_dev, nb_tx_queues) = hw->max_tx_queues;

        PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d   hw->max_tx_queues=%d",
                        hw->max_rx_queues, hw->max_tx_queues);
@@ -1242,7 +1242,7 @@ static struct eth_driver rte_virtio_pmd = {
                .name = "rte_virtio_pmd",
                .id_table = pci_id_virtio_map,
        },
-       .eth_dev_init = eth_virtio_dev_init,
+       .dev_init = eth_virtio_dev_init,
        .dev_private_size = sizeof(struct virtio_hw),
 };

@@ -1285,8 +1285,8 @@ virtio_dev_tx_queue_release(__rte_unused void *txq)
 static int
 virtio_dev_configure(struct rte_eth_dev *dev)
 {
-       const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-       struct virtio_hw *hw = dev->data->dev_private;
+       const struct rte_eth_rxmode *rxmode = &ETH_DATA(dev)->dev_conf.rxmode;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct rte_pci_device *pci_dev = dev->pci_dev;

        PMD_INIT_LOG(DEBUG, "configure");
@@ -1319,11 +1319,11 @@ static int
 virtio_dev_start(struct rte_eth_dev *dev)
 {
        uint16_t nb_queues, i;
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        struct rte_pci_device *pci_dev = dev->pci_dev;

        /* check if lsc interrupt feature is enabled */
-       if ((dev->data->dev_conf.intr_conf.lsc) &&
+       if ((ETH_DATA(dev)->dev_conf.intr_conf.lsc) &&
                (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
                if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
                        PMD_DRV_LOG(ERR, "link status not supported by host");
@@ -1353,7 +1353,7 @@ virtio_dev_start(struct rte_eth_dev *dev)
         *Otherwise the tap backend might already stop its queue due to 
fullness.
         *vhost backend will have no chance to be waked up
         */
-       nb_queues = dev->data->nb_rx_queues;
+       nb_queues = _DD(dev, nb_rx_queues);
        if (nb_queues > 1) {
                if (virtio_set_multiple_queues(dev, nb_queues) != 0)
                        return -EINVAL;
@@ -1362,15 +1362,15 @@ virtio_dev_start(struct rte_eth_dev *dev)
        PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);

        for (i = 0; i < nb_queues; i++)
-               virtqueue_notify(dev->data->rx_queues[i]);
+               virtqueue_notify(_DD(dev, rx_queues[i]));

        PMD_INIT_LOG(DEBUG, "Notified backend at initialization");

-       for (i = 0; i < dev->data->nb_rx_queues; i++)
-               VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
+       for (i = 0; i < _DD(dev, nb_rx_queues); i++)
+               VIRTQUEUE_DUMP((struct virtqueue *)_DD(dev, rx_queues[i]));

-       for (i = 0; i < dev->data->nb_tx_queues; i++)
-               VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++)
+               VIRTQUEUE_DUMP((struct virtqueue *)_DD(dev, tx_queues[i]));

        return 0;
 }
@@ -1380,13 +1380,13 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev 
*dev)
        struct rte_mbuf *buf;
        int i, mbuf_num = 0;

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+       for (i = 0; i < _DD(dev, nb_rx_queues); i++) {
                PMD_INIT_LOG(DEBUG,
                             "Before freeing rxq[%d] used and unused buf", i);
                VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);

                while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
-                                       dev->data->rx_queues[i])) != NULL) {
+                                       _DD(dev, rx_queues[i]))) != NULL) {
                        rte_pktmbuf_free(buf);
                        mbuf_num++;
                }
@@ -1397,7 +1397,7 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
                VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
        }

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++) {
                PMD_INIT_LOG(DEBUG,
                             "Before freeing txq[%d] used and unused bufs",
                             i);
@@ -1405,7 +1405,7 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)

                mbuf_num = 0;
                while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
-                                       dev->data->tx_queues[i])) != NULL) {
+                                       _DD(dev, tx_queues[i]))) != NULL) {
                        rte_pktmbuf_free(buf);

                        mbuf_num++;
@@ -1428,7 +1428,7 @@ virtio_dev_stop(struct rte_eth_dev *dev)

        PMD_INIT_LOG(DEBUG, "stop");

-       if (dev->data->dev_conf.intr_conf.lsc)
+       if (ETH_DATA(dev)->dev_conf.intr_conf.lsc)
                rte_intr_disable(&dev->pci_dev->intr_handle);

        memset(&link, 0, sizeof(link));
@@ -1440,7 +1440,7 @@ virtio_dev_link_update(struct rte_eth_dev *dev, 
__rte_unused int wait_to_complet
 {
        struct rte_eth_link link, old;
        uint16_t status;
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);
        memset(&link, 0, sizeof(link));
        virtio_dev_atomic_read_link_status(dev, &link);
        old = link;
@@ -1472,9 +1472,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, 
__rte_unused int wait_to_complet
 static void
 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);

-       dev_info->driver_name = dev->driver->pci_drv.name;
+       dev_info->di.driver_name = dev->driver->pci_drv.name;
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c 
b/lib/librte_pmd_virtio/virtio_rxtx.c
index 3ff275c..d0227a5 100644
--- a/lib/librte_pmd_virtio/virtio_rxtx.c
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c
@@ -339,7 +339,7 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type)
 void
 virtio_dev_cq_start(struct rte_eth_dev *dev)
 {
-       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtio_hw *hw = _DD_PRIVATE(dev);

        if (hw->cvq) {
                virtio_dev_vring_start(hw->cvq, VTNET_CQ);
@@ -362,14 +362,14 @@ virtio_dev_rxtx_start(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();

        /* Start rx vring. */
-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ);
+       for (i = 0; i < _DD(dev, nb_rx_queues); i++) {
+               virtio_dev_vring_start(_DD(dev, rx_queues[i]), VTNET_RQ);
                VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
        }

        /* Start tx vring. */
-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ);
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++) {
+               virtio_dev_vring_start(_DD(dev, tx_queues[i]), VTNET_TQ);
                VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
        }
 }
@@ -397,7 +397,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
        /* Create mempool for rx mbuf allocation */
        vq->mpool = mp;

-       dev->data->rx_queues[queue_idx] = vq;
+       _DD(dev, rx_queues[queue_idx]) = vq;
        return 0;
 }

@@ -445,13 +445,13 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        "number of TX entries minus 3 (%u)."
                        " (tx_free_thresh=%u port=%u queue=%u)\n",
                        vq->vq_nentries - 3,
-                       tx_free_thresh, dev->data->port_id, queue_idx);
+                       tx_free_thresh, _DD_PORT(dev), queue_idx);
                return -EINVAL;
        }

        vq->vq_free_thresh = tx_free_thresh;

-       dev->data->tx_queues[queue_idx] = vq;
+       _DD(dev, tx_queues[queue_idx]) = vq;
        return 0;
 }

@@ -543,7 +543,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
                if (unlikely(new_mbuf == NULL)) {
                        struct rte_eth_dev *dev
                                = &rte_eth_devices[rxvq->port_id];
-                       dev->data->rx_mbuf_alloc_failed++;
+                       _DD(dev, rx_mbuf_alloc_failed)++;
                        break;
                }
                error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
@@ -706,7 +706,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
                if (unlikely(new_mbuf == NULL)) {
                        struct rte_eth_dev *dev
                                = &rte_eth_devices[rxvq->port_id];
-                       dev->data->rx_mbuf_alloc_failed++;
+                       _DD(dev, rx_mbuf_alloc_failed)++;
                        break;
                }
                error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf);
diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c 
b/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c
index 458dce5..64a7a2c 100644
--- a/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_ethdev.c
@@ -125,7 +125,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
        const struct rte_memzone *mz;

        snprintf(z_name, sizeof(z_name), "%s_%d_%s",
-                                       dev->driver->pci_drv.name, 
dev->data->port_id, post_string);
+                                       dev->driver->pci_drv.name, 
_DD_PORT(dev), post_string);

        mz = rte_memzone_lookup(z_name);
        if (mz)
@@ -151,7 +151,7 @@ static inline int
 rte_vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
                                struct rte_eth_link *link)
 {
-       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *dst = &(ETH_DATA(dev)->dev_link);
        struct rte_eth_link *src = link;

        if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
@@ -183,7 +183,7 @@ static int
 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
 {
        struct rte_pci_device *pci_dev;
-       struct vmxnet3_hw *hw = eth_dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(eth_dev);
        uint32_t mac_hi, mac_lo, ver;

        PMD_INIT_FUNC_TRACE();
@@ -194,7 +194,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
        pci_dev = eth_dev->pci_dev;

        /*
-        * for secondary processes, we don't initialise any further as primary
+        * for secondary processes, we don't initialize any further as primary
         * has already done this work.
         */
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -238,9 +238,9 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
        memcpy(hw->perm_addr+4, &mac_hi, 2);

        /* Allocate memory for storing MAC addresses */
-       eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
+       ETH_DATA(eth_dev)->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
                                               VMXNET3_MAX_MAC_ADDRS, 0);
-       if (eth_dev->data->mac_addrs == NULL) {
+       if (ETH_DATA(eth_dev)->mac_addrs == NULL) {
                PMD_INIT_LOG(ERR,
                             "Failed to allocate %d bytes needed to store MAC 
addresses",
                             ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
@@ -248,7 +248,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
        }
        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->perm_addr,
-                       &eth_dev->data->mac_addrs[0]);
+                       &ETH_DATA(eth_dev)->mac_addrs[0]);

        PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
                     hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
@@ -266,7 +266,7 @@ static struct eth_driver rte_vmxnet3_pmd = {
                .id_table = pci_id_vmxnet3_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
        },
-       .eth_dev_init = eth_vmxnet3_dev_init,
+       .dev_init = eth_vmxnet3_dev_init,
        .dev_private_size = sizeof(struct vmxnet3_hw),
 };

@@ -288,23 +288,23 @@ static int
 vmxnet3_dev_configure(struct rte_eth_dev *dev)
 {
        const struct rte_memzone *mz;
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);
        size_t size;

        PMD_INIT_FUNC_TRACE();

-       if (dev->data->nb_rx_queues > UINT8_MAX ||
-           dev->data->nb_tx_queues > UINT8_MAX)
+       if (_DD(dev, nb_rx_queues) > UINT8_MAX ||
+           _DD(dev, nb_tx_queues) > UINT8_MAX)
                return -EINVAL;

-       size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
-               dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
+       size = _DD(dev, nb_rx_queues) * sizeof(struct Vmxnet3_TxQueueDesc) +
+               _DD(dev, nb_tx_queues) * sizeof(struct Vmxnet3_RxQueueDesc);

        if (size > UINT16_MAX)
                return -EINVAL;

-       hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
-       hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
+       hw->num_rx_queues = (uint8_t)_DD(dev, nb_rx_queues);
+       hw->num_tx_queues = (uint8_t)_DD(dev, nb_tx_queues);

        /*
         * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
@@ -340,7 +340,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
        hw->queueDescPA = mz->phys_addr;
        hw->queue_desc_len = (uint16_t)size;

-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {

                /* Allocate memory structure for UPT1_RSSConf and configure */
                mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), 
"rss_conf",
@@ -362,8 +362,8 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
 static int
 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
 {
-       struct rte_eth_conf port_conf = dev->data->dev_conf;
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct rte_eth_conf port_conf = ETH_DATA(dev)->dev_conf;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);
        Vmxnet3_DriverShared *shared = hw->shared;
        Vmxnet3_DSDevRead *devRead = &shared->devRead;
        uint32_t *mac_ptr;
@@ -398,7 +398,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)

        for (i = 0; i < hw->num_tx_queues; i++) {
                Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
-               vmxnet3_tx_queue_t *txq  = dev->data->tx_queues[i];
+               vmxnet3_tx_queue_t *txq  = _DD(dev, tx_queues[i]);

                tqd->ctrl.txNumDeferred  = 0;
                tqd->ctrl.txThreshold    = 1;
@@ -417,7 +417,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)

        for (i = 0; i < hw->num_rx_queues; i++) {
                Vmxnet3_RxQueueDesc *rqd  = &hw->rqd_start[i];
-               vmxnet3_rx_queue_t *rxq   = dev->data->rx_queues[i];
+               vmxnet3_rx_queue_t *rxq   = _DD(dev, rx_queues[i]);

                rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
                rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
@@ -436,10 +436,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
        devRead->rxFilterConf.rxMode = 0;

        /* Setting up feature flags */
-       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_ip_checksum)
                devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;

-       if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_strip)
                devRead->misc.uptFeatures |= VMXNET3_F_RXVLAN;

        if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
@@ -453,7 +453,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
                devRead->rssConfDesc.confPA  = hw->rss_confPA;
        }

-       if (dev->data->dev_conf.rxmode.hw_vlan_filter) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.hw_vlan_filter) {
                ret = vmxnet3_vlan_configure(dev);
                if (ret != VMXNET3_SUCCESS)
                        return ret;
@@ -484,7 +484,7 @@ static int
 vmxnet3_dev_start(struct rte_eth_dev *dev)
 {
        int status, ret;
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        PMD_INIT_FUNC_TRACE();

@@ -541,7 +541,7 @@ static void
 vmxnet3_dev_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        PMD_INIT_FUNC_TRACE();

@@ -576,7 +576,7 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
 static void
 vmxnet3_dev_close(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        PMD_INIT_FUNC_TRACE();

@@ -588,7 +588,7 @@ static void
 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
        unsigned int i;
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);

@@ -649,7 +649,7 @@ vmxnet3_dev_info_get(__attribute__((unused))struct 
rte_eth_dev *dev, struct rte_
 static int
 vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int 
wait_to_complete)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);
        struct rte_eth_link link;
        uint32_t ret;

@@ -692,7 +692,7 @@ vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t 
feature, int set) {
 static void
 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
 }
@@ -701,7 +701,7 @@ vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
 static void
 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
 }
@@ -710,7 +710,7 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
 static void
 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
 }
@@ -719,7 +719,7 @@ vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
 static void
 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
 }
diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c 
b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
index a530c80..3b8a7fc 100644
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
@@ -276,8 +276,8 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)

        PMD_INIT_FUNC_TRACE();

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++) {
+               struct vmxnet3_tx_queue *txq = _DD(dev, tx_queues[i]);

                if (txq != NULL) {
                        txq->stopped = TRUE;
@@ -285,8 +285,8 @@ vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
                }
        }

-       for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
+       for (i = 0; i < _DD(dev, nb_rx_queues); i++) {
+               struct vmxnet3_rx_queue *rxq = _DD(dev, rx_queues[i]);

                if (rxq != NULL) {
                        rxq->stopped = TRUE;
@@ -704,7 +704,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char 
*ring_name,

        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
                        dev->driver->pci_drv.name, ring_name,
-                       dev->data->port_id, queue_id);
+                       _DD_PORT(dev), queue_id);

        mz = rte_memzone_lookup(z_name);
        if (mz)
@@ -721,7 +721,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           unsigned int socket_id,
                           __attribute__((unused)) const struct rte_eth_txconf 
*tx_conf)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);
        const struct rte_memzone *mz;
        struct vmxnet3_tx_queue *txq;
        struct vmxnet3_cmd_ring *ring;
@@ -750,7 +750,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }

        txq->queue_id = queue_idx;
-       txq->port_id = dev->data->port_id;
+       txq->port_id = _DD_PORT(dev);
        txq->shared = &hw->tqd_start[queue_idx];
        txq->hw = hw;
        txq->qid = queue_idx;
@@ -816,7 +816,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
        }

        /* Update the data portion with txq */
-       dev->data->tx_queues[queue_idx] = txq;
+       _DD(dev, tx_queues[queue_idx]) = txq;

        return 0;
 }
@@ -831,7 +831,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 {
        const struct rte_memzone *mz;
        struct vmxnet3_rx_queue *rxq;
-       struct vmxnet3_hw     *hw = dev->data->dev_private;
+       struct vmxnet3_hw     *hw = _DD_PRIVATE(dev);
        struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
        struct vmxnet3_comp_ring *comp_ring;
        int size;
@@ -847,7 +847,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
                               RTE_PKTMBUF_HEADROOM);

-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
+       if (ETH_DATA(dev)->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
                PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
                             "VMXNET3 don't support scatter packets yet",
                             buf_size, 
dev->data->dev_conf.rxmode.max_rx_pkt_len);
@@ -862,7 +862,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,

        rxq->mp = mp;
        rxq->queue_id = queue_idx;
-       rxq->port_id = dev->data->port_id;
+       rxq->port_id = _DD_PORT(dev);
        rxq->shared = &hw->rqd_start[queue_idx];
        rxq->hw = hw;
        rxq->qid1 = queue_idx;
@@ -936,7 +936,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }

        /* Update the data portion with rxq */
-       dev->data->rx_queues[queue_idx] = rxq;
+       _DD(dev, rx_queues[queue_idx]) = rxq;

        return 0;
 }
@@ -948,7 +948,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 int
 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);

        int i, ret;
        uint8_t j;
@@ -956,7 +956,7 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();

        for (i = 0; i < hw->num_rx_queues; i++) {
-               vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+               vmxnet3_rx_queue_t *rxq = _DD(dev, rx_queues[i]);

                for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
                        /* Passing 0 as alloc_num will allocate full ring */
@@ -974,8 +974,8 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
                rxq->stopped = FALSE;
        }

-       for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+       for (i = 0; i < _DD(dev, nb_tx_queues); i++) {
+               struct vmxnet3_tx_queue *txq = _DD(dev, tx_queues[i]);

                txq->stopped = FALSE;
        }
@@ -997,7 +997,7 @@ static uint8_t rss_intel_key[40] = {
 int
 vmxnet3_rss_configure(struct rte_eth_dev *dev)
 {
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);
        struct VMXNET3_RSSConf *dev_rss_conf;
        struct rte_eth_rss_conf *port_rss_conf;
        uint64_t rss_hf;
@@ -1006,7 +1006,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();

        dev_rss_conf = hw->rss_conf;
-       port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+       port_rss_conf = &ETH_DATA(dev)->dev_conf.rx_adv_conf.rss_conf;

        /* loading hashFunc */
        dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
@@ -1025,7 +1025,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)

        /* loading indTable */
        for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
-               if (j == dev->data->nb_rx_queues)
+               if (j == _DD(dev, nb_rx_queues))
                        j = 0;
                dev_rss_conf->indTable[i] = j;
        }
@@ -1052,7 +1052,7 @@ int
 vmxnet3_vlan_configure(struct rte_eth_dev *dev)
 {
        uint8_t i;
-       struct vmxnet3_hw *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = _DD_PRIVATE(dev);
        uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;

        PMD_INIT_FUNC_TRACE();
diff --git a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c 
b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
index bc403d6..a63e041 100644
--- a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
+++ b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
@@ -648,7 +648,7 @@ eth_dev_xenvirt_create(const char *name, const char *params,
                goto err;

        /* reserve an ethdev entry */
-       eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+       eth_dev = rte_eth_dev_allocate(name, RTE_DEV_VIRTUAL);
        if (eth_dev == NULL)
                goto err;

-- 
2.3.0

Reply via email to