The rte_pktmbuf structure was initially included in the rte_mbuf
structure. This was needed when there was 2 types of mbuf (ctrl and
packet). As the control mbuf has been removed, we can merge the
rte_pktmbuf into the rte_mbuf structure.

Advantages of doing this:
  - the access to mbuf fields is easier (ex: m->data instead of m->pkt.data)
  - make the structure more consistent: for instance, there was no reason
    to have the ol_flags field in rte_mbuf
  - it will allow a deeper reorganization of the rte_mbuf structure in the
    next commits, allowing to gain several bytes in it

Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
---
 app/test-pmd/cmdline.c                             |   1 -
 app/test-pmd/csumonly.c                            |   6 +-
 app/test-pmd/ieee1588fwd.c                         |   6 +-
 app/test-pmd/macfwd-retry.c                        |   2 +-
 app/test-pmd/macfwd.c                              |   8 +-
 app/test-pmd/rxonly.c                              |  12 +-
 app/test-pmd/testpmd.c                             |   8 +-
 app/test-pmd/testpmd.h                             |   2 +-
 app/test-pmd/txonly.c                              |  42 +++----
 app/test/commands.c                                |   1 -
 app/test/test_mbuf.c                               |  12 +-
 app/test/test_sched.c                              |   4 +-
 examples/dpdk_qat/crypto.c                         |  22 ++--
 examples/dpdk_qat/main.c                           |   2 +-
 examples/exception_path/main.c                     |  10 +-
 examples/ip_reassembly/ipv4_rsmbl.h                |  20 +--
 examples/ip_reassembly/main.c                      |   6 +-
 examples/ipv4_frag/main.c                          |   4 +-
 examples/ipv4_frag/rte_ipv4_frag.h                 |  42 +++----
 examples/ipv4_multicast/main.c                     |  14 +--
 examples/l3fwd-power/main.c                        |   2 +-
 examples/l3fwd-vf/main.c                           |   2 +-
 examples/l3fwd/main.c                              |  10 +-
 examples/load_balancer/runtime.c                   |   2 +-
 .../client_server_mp/mp_client/client.c            |   2 +-
 examples/quota_watermark/qw/main.c                 |   4 +-
 examples/vhost/main.c                              |  22 ++--
 examples/vhost_xen/main.c                          |  22 ++--
 lib/librte_mbuf/rte_mbuf.c                         |  26 ++--
 lib/librte_mbuf/rte_mbuf.h                         | 140 ++++++++++-----------
 lib/librte_pmd_e1000/em_rxtx.c                     |  64 +++++-----
 lib/librte_pmd_e1000/igb_rxtx.c                    |  68 +++++-----
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c                  | 100 +++++++--------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h                  |   2 +-
 lib/librte_pmd_pcap/rte_eth_pcap.c                 |  14 +--
 lib/librte_pmd_virtio/virtio_rxtx.c                |  16 +--
 lib/librte_pmd_virtio/virtqueue.h                  |   6 +-
 lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c              |  26 ++--
 lib/librte_pmd_xenvirt/rte_eth_xenvirt.c           |  12 +-
 lib/librte_pmd_xenvirt/virtqueue.h                 |   4 +-
 lib/librte_sched/rte_sched.c                       |  14 +--
 lib/librte_sched/rte_sched.h                       |  10 +-
 42 files changed, 394 insertions(+), 398 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index e3d1849..c507c46 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -5009,7 +5009,6 @@ dump_struct_sizes(void)
 {
 #define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
        DUMP_SIZE(struct rte_mbuf);
-       DUMP_SIZE(struct rte_pktmbuf);
        DUMP_SIZE(struct rte_mempool);
        DUMP_SIZE(struct rte_ring);
 #undef DUMP_SIZE
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 3568ba0..ee82eb6 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -263,7 +263,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                pkt_ol_flags = mb->ol_flags;
                ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));

-               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               eth_hdr = (struct ether_hdr *) mb->data;
                eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
                if (eth_type == ETHER_TYPE_VLAN) {
                        /* Only allow single VLAN label here */
@@ -430,8 +430,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                }

                /* Combine the packet header write. VLAN is not consider here */
-               mb->pkt.vlan_macip.f.l2_len = l2_len;
-               mb->pkt.vlan_macip.f.l3_len = l3_len;
+               mb->vlan_macip.f.l2_len = l2_len;
+               mb->vlan_macip.f.l3_len = l3_len;
                mb->ol_flags = ol_flags;
        }
        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
index 44f0a89..4f18183 100644
--- a/app/test-pmd/ieee1588fwd.c
+++ b/app/test-pmd/ieee1588fwd.c
@@ -546,7 +546,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
         * Check that the received packet is a PTP packet that was detected
         * by the hardware.
         */
-       eth_hdr = (struct ether_hdr *)mb->pkt.data;
+       eth_hdr = (struct ether_hdr *)mb->data;
        eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
        if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
                if (eth_type == ETHER_TYPE_1588) {
@@ -557,7 +557,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
                        printf("Port %u Received non PTP packet type=0x%4x "
                               "len=%u\n",
                               (unsigned) fs->rx_port, eth_type,
-                              (unsigned) mb->pkt.pkt_len);
+                              (unsigned) mb->pkt_len);
                }
                rte_pktmbuf_free(mb);
                return;
@@ -574,7 +574,7 @@ ieee1588_packet_fwd(struct fwd_stream *fs)
         * Check that the received PTP packet is a PTP V2 packet of type
         * PTP_SYNC_MESSAGE.
         */
-       ptp_hdr = (struct ptpv2_msg *) ((char *) mb->pkt.data +
+       ptp_hdr = (struct ptpv2_msg *) ((char *) mb->data +
                                        sizeof(struct ether_hdr));
        if (ptp_hdr->version != 0x02) {
                printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
diff --git a/app/test-pmd/macfwd-retry.c b/app/test-pmd/macfwd-retry.c
index 98fc037..687ff8d 100644
--- a/app/test-pmd/macfwd-retry.c
+++ b/app/test-pmd/macfwd-retry.c
@@ -119,7 +119,7 @@ pkt_burst_mac_retry_forward(struct fwd_stream *fs)
        fs->rx_packets += nb_rx;
        for (i = 0; i < nb_rx; i++) {
                mb = pkts_burst[i];
-               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               eth_hdr = (struct ether_hdr *) mb->data;
                ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
                                &eth_hdr->d_addr);
                ether_addr_copy(&ports[fs->tx_port].eth_addr,
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 3099792..8d7612c 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -110,15 +110,15 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
        txp = &ports[fs->tx_port];
        for (i = 0; i < nb_rx; i++) {
                mb = pkts_burst[i];
-               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               eth_hdr = (struct ether_hdr *) mb->data;
                ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
                                &eth_hdr->d_addr);
                ether_addr_copy(&ports[fs->tx_port].eth_addr,
                                &eth_hdr->s_addr);
                mb->ol_flags = txp->tx_ol_flags;
-               mb->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
-               mb->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
-               mb->pkt.vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+               mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+               mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+               mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
        }
        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
        fs->tx_packets += nb_tx;
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index 30f8195..b77c8ce 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -149,24 +149,24 @@ pkt_burst_receive(struct fwd_stream *fs)
                        rte_pktmbuf_free(mb);
                        continue;
                }
-               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               eth_hdr = (struct ether_hdr *) mb->data;
                eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
                ol_flags = mb->ol_flags;
                print_ether_addr("  src=", &eth_hdr->s_addr);
                print_ether_addr(" - dst=", &eth_hdr->d_addr);
                printf(" - type=0x%04x - length=%u - nb_segs=%d",
-                      eth_type, (unsigned) mb->pkt.pkt_len,
-                      (int)mb->pkt.nb_segs);
+                      eth_type, (unsigned) mb->pkt_len,
+                      (int)mb->nb_segs);
                if (ol_flags & PKT_RX_RSS_HASH) {
-                       printf(" - RSS hash=0x%x", (unsigned) mb->pkt.hash.rss);
+                       printf(" - RSS hash=0x%x", (unsigned) mb->hash.rss);
                        printf(" - RSS queue=0x%x",(unsigned) fs->rx_queue);
                }
                else if (ol_flags & PKT_RX_FDIR)
                        printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
-                              mb->pkt.hash.fdir.hash, mb->pkt.hash.fdir.id);
+                              mb->hash.fdir.hash, mb->hash.fdir.id);
                if (ol_flags & PKT_RX_VLAN_PKT)
                        printf(" - VLAN tci=0x%x",
-                               mb->pkt.vlan_macip.f.vlan_tci);
+                               mb->vlan_macip.f.vlan_tci);
                printf("\n");
                if (ol_flags != 0) {
                        int rxf;
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 76b3823..1964020 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -395,10 +395,10 @@ testpmd_mbuf_ctor(struct rte_mempool *mp,
                        mb_ctor_arg->seg_buf_offset);
        mb->buf_len      = mb_ctor_arg->seg_buf_size;
        mb->ol_flags     = 0;
-       mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
-       mb->pkt.nb_segs  = 1;
-       mb->pkt.vlan_macip.data = 0;
-       mb->pkt.hash.rss = 0;
+       mb->data         = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+       mb->nb_segs      = 1;
+       mb->vlan_macip.data = 0;
+       mb->hash.rss     = 0;
 }

 static void
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 5b4ee6f..bb10d3b 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -60,7 +60,7 @@ int main(int argc, char **argv);
  * The maximum number of segments per packet is used when creating
  * scattered transmit packets composed of a list of mbufs.
  */
-#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
+#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */

 #define MAX_PKT_BURST 512
 #define DEF_PKT_BURST 16
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 1f066d0..3baa0c8 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -106,18 +106,18 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct 
rte_mbuf *pkt,
        unsigned copy_len;

        seg = pkt;
-       while (offset >= seg->pkt.data_len) {
-               offset -= seg->pkt.data_len;
-               seg = seg->pkt.next;
+       while (offset >= seg->data_len) {
+               offset -= seg->data_len;
+               seg = seg->next;
        }
-       copy_len = seg->pkt.data_len - offset;
-       seg_buf = ((char *) seg->pkt.data + offset);
+       copy_len = seg->data_len - offset;
+       seg_buf = ((char *) seg->data + offset);
        while (len > copy_len) {
                rte_memcpy(seg_buf, buf, (size_t) copy_len);
                len -= copy_len;
                buf = ((char*) buf + copy_len);
-               seg = seg->pkt.next;
-               seg_buf = seg->pkt.data;
+               seg = seg->next;
+               seg_buf = seg->data;
        }
        rte_memcpy(seg_buf, buf, (size_t) len);
 }
@@ -125,8 +125,8 @@ copy_buf_to_pkt_segs(void* buf, unsigned len, struct 
rte_mbuf *pkt,
 static inline void
 copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
 {
-       if (offset + len <= pkt->pkt.data_len) {
-               rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) 
len);
+       if (offset + len <= pkt->data_len) {
+               rte_memcpy(((char *) pkt->data + offset), buf, (size_t) len);
                return;
        }
        copy_buf_to_pkt_segs(buf, len, pkt, offset);
@@ -225,19 +225,19 @@ pkt_burst_transmit(struct fwd_stream *fs)
                                return;
                        break;
                }
-               pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+               pkt->data_len = tx_pkt_seg_lengths[0];
                pkt_seg = pkt;
                for (i = 1; i < tx_pkt_nb_segs; i++) {
-                       pkt_seg->pkt.next = tx_mbuf_alloc(mbp);
-                       if (pkt_seg->pkt.next == NULL) {
-                               pkt->pkt.nb_segs = i;
+                       pkt_seg->next = tx_mbuf_alloc(mbp);
+                       if (pkt_seg->next == NULL) {
+                               pkt->nb_segs = i;
                                rte_pktmbuf_free(pkt);
                                goto nomore_mbuf;
                        }
-                       pkt_seg = pkt_seg->pkt.next;
-                       pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+                       pkt_seg = pkt_seg->next;
+                       pkt_seg->data_len = tx_pkt_seg_lengths[i];
                }
-               pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+               pkt_seg->next = NULL; /* Last segment of packet. */

                /*
                 * Initialize Ethernet header.
@@ -260,12 +260,12 @@ pkt_burst_transmit(struct fwd_stream *fs)
                 * Complete first mbuf of packet and append it to the
                 * burst of packets to be transmitted.
                 */
-               pkt->pkt.nb_segs = tx_pkt_nb_segs;
-               pkt->pkt.pkt_len = tx_pkt_length;
+               pkt->nb_segs = tx_pkt_nb_segs;
+               pkt->pkt_len = tx_pkt_length;
                pkt->ol_flags = ol_flags;
-               pkt->pkt.vlan_macip.f.vlan_tci  = vlan_tci;
-               pkt->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
-               pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+               pkt->vlan_macip.f.vlan_tci  = vlan_tci;
+               pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+               pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
                pkts_burst[nb_pkt] = pkt;
        }
        nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
diff --git a/app/test/commands.c b/app/test/commands.c
index c69544b..ef66fdd 100644
--- a/app/test/commands.c
+++ b/app/test/commands.c
@@ -261,7 +261,6 @@ dump_struct_sizes(void)
 {
 #define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
        DUMP_SIZE(struct rte_mbuf);
-       DUMP_SIZE(struct rte_pktmbuf);
        DUMP_SIZE(struct rte_mempool);
        DUMP_SIZE(struct rte_ring);
 #undef DUMP_SIZE
diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index 07b5551..320d76f 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -344,8 +344,8 @@ testclone_testupdate_testdetach(void)
                GOTO_FAIL("cannot clone data\n");
        rte_pktmbuf_free(clone);

-       mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
-       if(mc->pkt.next == NULL)
+       mc->next = rte_pktmbuf_alloc(pktmbuf_pool);
+       if(mc->next == NULL)
                GOTO_FAIL("Next Pkt Null\n");

        clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
@@ -432,7 +432,7 @@ test_pktmbuf_pool_ptr(void)
                        printf("rte_pktmbuf_alloc() failed (%u)\n", i);
                        ret = -1;
                }
-               m[i]->pkt.data = RTE_PTR_ADD(m[i]->pkt.data, 64);
+               m[i]->data = RTE_PTR_ADD(m[i]->data, 64);
        }

        /* free them */
@@ -451,8 +451,8 @@ test_pktmbuf_pool_ptr(void)
                        printf("rte_pktmbuf_alloc() failed (%u)\n", i);
                        ret = -1;
                }
-               if (m[i]->pkt.data != RTE_PTR_ADD(m[i]->buf_addr, 
RTE_PKTMBUF_HEADROOM)) {
-                       printf ("pkt.data pointer not set properly\n");
+               if (m[i]->data != RTE_PTR_ADD(m[i]->buf_addr, 
RTE_PKTMBUF_HEADROOM)) {
+                       printf ("data pointer not set properly\n");
                        ret = -1;
                }
        }
@@ -493,7 +493,7 @@ test_pktmbuf_free_segment(void)
                        mb = m[i];
                        while(mb != NULL) {
                                mt = mb;
-                               mb = mb->pkt.next;
+                               mb = mb->next;
                                rte_pktmbuf_free_seg(mt);
                        }
                }
diff --git a/app/test/test_sched.c b/app/test/test_sched.c
index 0de5b1c..f729ade 100644
--- a/app/test/test_sched.c
+++ b/app/test/test_sched.c
@@ -148,8 +148,8 @@ prepare_pkt(struct rte_mbuf *mbuf)
        rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, 
e_RTE_METER_YELLOW);

        /* 64 byte packet */
-       mbuf->pkt.pkt_len  = 60;
-       mbuf->pkt.data_len = 60;
+       mbuf->pkt_len  = 60;
+       mbuf->data_len = 60;
 }


diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c
index 7606d3d..e519e25 100644
--- a/examples/dpdk_qat/crypto.c
+++ b/examples/dpdk_qat/crypto.c
@@ -183,7 +183,7 @@ struct glob_keys g_crypto_hash_keys = {
  *
  */
 #define PACKET_DATA_START_PHYS(p) \
-               ((p)->buf_physaddr + ((char *)p->pkt.data - (char 
*)p->buf_addr))
+               ((p)->buf_physaddr + ((char *)p->data - (char *)p->buf_addr))

 /*
  * A fixed offset to where the crypto is to be performed, which is the first
@@ -773,7 +773,7 @@ enum crypto_result
 crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 {
        CpaCySymDpOpData *opData =
-                       (CpaCySymDpOpData *) ((char *) (rte_buff->pkt.data)
+                       (CpaCySymDpOpData *) ((char *) (rte_buff->data)
                                        + CRYPTO_OFFSET_TO_OPDATA);
        uint32_t lcore_id;

@@ -785,7 +785,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
        bzero(opData, sizeof(CpaCySymDpOpData));

        opData->srcBuffer = opData->dstBuffer = 
PACKET_DATA_START_PHYS(rte_buff);
-       opData->srcBufferLen = opData->dstBufferLen = rte_buff->pkt.data_len;
+       opData->srcBufferLen = opData->dstBufferLen = rte_buff->data_len;
        opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
        opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
                        + CRYPTO_OFFSET_TO_OPDATA;
@@ -805,7 +805,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
                        opData->ivLenInBytes = IV_LENGTH_8_BYTES;

                opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
-               opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+               opData->messageLenToCipherInBytes = rte_buff->data_len
                                - CRYPTO_START_OFFSET;
                /*
                 * Work around for padding, message length has to be a multiple 
of
@@ -818,7 +818,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
        if (NO_HASH != h) {

                opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
-               opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+               opData->messageLenToHashInBytes = rte_buff->data_len
                                - HASH_START_OFFSET;
                /*
                 * Work around for padding, message length has to be a multiple 
of block
@@ -831,7 +831,7 @@ crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
                 * Assumption: Ok ignore the passed digest pointer and place 
HMAC at end
                 * of packet.
                 */
-               opData->digestResult = rte_buff->buf_physaddr + 
rte_buff->pkt.data_len;
+               opData->digestResult = rte_buff->buf_physaddr + 
rte_buff->data_len;
        }

        if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
@@ -848,7 +848,7 @@ enum crypto_result
 crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
 {

-       CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->pkt.data)
+       CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->data)
                        + CRYPTO_OFFSET_TO_OPDATA);
        uint32_t lcore_id;

@@ -860,7 +860,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
        bzero(opData, sizeof(CpaCySymDpOpData));

        opData->dstBuffer = opData->srcBuffer = 
PACKET_DATA_START_PHYS(rte_buff);
-       opData->dstBufferLen = opData->srcBufferLen = rte_buff->pkt.data_len;
+       opData->dstBufferLen = opData->srcBufferLen = rte_buff->data_len;
        opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
                        + CRYPTO_OFFSET_TO_OPDATA;
        opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
@@ -880,7 +880,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
                        opData->ivLenInBytes = IV_LENGTH_8_BYTES;

                opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
-               opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+               opData->messageLenToCipherInBytes = rte_buff->data_len
                                - CRYPTO_START_OFFSET;

                /*
@@ -892,7 +892,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
        }
        if (NO_HASH != h) {
                opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
-               opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+               opData->messageLenToHashInBytes = rte_buff->data_len
                                - HASH_START_OFFSET;
                /*
                 * Work around for padding, message length has to be a multiple 
of block
@@ -900,7 +900,7 @@ crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg 
c, enum hash_alg h)
                 */
                opData->messageLenToHashInBytes -= 
opData->messageLenToHashInBytes
                                % HASH_BLOCK_DEFAULT_SIZE;
-               opData->digestResult = rte_buff->buf_physaddr + 
rte_buff->pkt.data_len;
+               opData->digestResult = rte_buff->buf_physaddr + 
rte_buff->data_len;
        }

        if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
index cdf6832..1401d6b 100644
--- a/examples/dpdk_qat/main.c
+++ b/examples/dpdk_qat/main.c
@@ -384,7 +384,7 @@ main_loop(__attribute__((unused)) void *dummy)
                        }
                }

-               port = dst_ports[pkt->pkt.in_port];
+               port = dst_ports[pkt->in_port];

                /* Transmit the packet */
                nic_tx_send_packet(pkt, (uint8_t)port);
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 0bc149d..d9a85b5 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -302,16 +302,16 @@ main_loop(__attribute__((unused)) void *arg)
                        if (m == NULL)
                                continue;

-                       ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ);
+                       ret = read(tap_fd, m->data, MAX_PACKET_SZ);
                        lcore_stats[lcore_id].rx++;
                        if (unlikely(ret < 0)) {
                                FATAL_ERROR("Reading from %s interface failed",
                                            tap_name);
                        }
-                       m->pkt.nb_segs = 1;
-                       m->pkt.next = NULL;
-                       m->pkt.pkt_len = (uint16_t)ret;
-                       m->pkt.data_len = (uint16_t)ret;
+                       m->nb_segs = 1;
+                       m->next = NULL;
+                       m->pkt_len = (uint16_t)ret;
+                       m->data_len = (uint16_t)ret;
                        ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
                        if (unlikely(ret < 1)) {
                                rte_pktmbuf_free(m);
diff --git a/examples/ip_reassembly/ipv4_rsmbl.h 
b/examples/ip_reassembly/ipv4_rsmbl.h
index 58ec1ee..9b647fb 100644
--- a/examples/ip_reassembly/ipv4_rsmbl.h
+++ b/examples/ip_reassembly/ipv4_rsmbl.h
@@ -168,20 +168,20 @@ ipv4_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
        struct rte_mbuf *ms;

        /* adjust start of the last fragment data. */
-       rte_pktmbuf_adj(mp, (uint16_t)(mp->pkt.vlan_macip.f.l2_len +
-               mp->pkt.vlan_macip.f.l3_len));
+       rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
+               mp->vlan_macip.f.l3_len));

        /* chain two fragments. */
        ms = rte_pktmbuf_lastseg(mn);
-       ms->pkt.next = mp;
+       ms->next = mp;

        /* accumulate number of segments and total length. */
-       mn->pkt.nb_segs = (uint8_t)(mn->pkt.nb_segs + mp->pkt.nb_segs);
-       mn->pkt.pkt_len += mp->pkt.pkt_len;
+       mn->nb_segs = (uint8_t)(mn->nb_segs + mp->nb_segs);
+       mn->pkt_len += mp->pkt_len;

        /* reset pkt_len and nb_segs for chained fragment. */
-       mp->pkt.pkt_len = mp->pkt.data_len;
-       mp->pkt.nb_segs = 1;
+       mp->pkt_len = mp->data_len;
+       mp->nb_segs = 1;
 }

 /*
@@ -233,10 +233,10 @@ ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)

        /* update ipv4 header for the reassmebled packet */
        ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
-               m->pkt.vlan_macip.f.l2_len);
+               m->vlan_macip.f.l2_len);

        ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
-               m->pkt.vlan_macip.f.l3_len));
+               m->vlan_macip.f.l3_len));
        ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
                rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
        ip_hdr->hdr_checksum = 0;
@@ -377,7 +377,7 @@ ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct 
ipv4_frag_death_row *dr,

        ip_ofs *= IPV4_HDR_OFFSET_UNITS;
        ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
-               mb->pkt.vlan_macip.f.l3_len);
+               mb->vlan_macip.f.l3_len);

        IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
                "mbuf: %p, tms: %" PRIu64
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 4880a5f..5c5626a 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -655,7 +655,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, 
uint32_t queue,

 #ifdef DO_RFC_1812_CHECKS
                /* Check to make sure the packet is valid (RFC1812) */
-               if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+               if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
                        rte_pktmbuf_free(m);
                        return;
                }
@@ -680,8 +680,8 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, 
uint32_t queue,
                        dr = &qconf->death_row;

                        /* prepare mbuf: setup l2_len/l3_len. */
-                       m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
-                       m->pkt.vlan_macip.f.l3_len = sizeof(*ipv4_hdr);
+                       m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
+                       m->vlan_macip.f.l3_len = sizeof(*ipv4_hdr);

                        /* process this fragment. */
                        if ((mo = ipv4_frag_mbuf(tbl, dr, m, tms, ipv4_hdr,
diff --git a/examples/ipv4_frag/main.c b/examples/ipv4_frag/main.c
index 93664c8..b950b87 100644
--- a/examples/ipv4_frag/main.c
+++ b/examples/ipv4_frag/main.c
@@ -257,7 +257,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t port_in)
        len = qconf->tx_mbufs[port_out].len;

        /* if we don't need to do any fragmentation */
-       if (likely (IPV4_MTU_DEFAULT  >= m->pkt.pkt_len)) {
+       if (likely (IPV4_MTU_DEFAULT  >= m->pkt_len)) {
                qconf->tx_mbufs[port_out].m_table[len] = m;
                len2 = 1;
        } else {
@@ -283,7 +283,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t port_in)
                        rte_panic("No headroom in mbuf.\n");
                }

-               m->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+               m->vlan_macip.f.l2_len = sizeof(struct ether_hdr);

                ether_addr_copy(&remote_eth_addr, &eth_hdr->d_addr);
                ether_addr_copy(&ports_eth_addr[port_out], &eth_hdr->s_addr);
diff --git a/examples/ipv4_frag/rte_ipv4_frag.h 
b/examples/ipv4_frag/rte_ipv4_frag.h
index 84fa9c9..6234224 100644
--- a/examples/ipv4_frag/rte_ipv4_frag.h
+++ b/examples/ipv4_frag/rte_ipv4_frag.h
@@ -145,9 +145,9 @@ static inline int32_t rte_ipv4_fragmentation(struct 
rte_mbuf *pkt_in,

        /* Fragment size should be a multiply of 8. */
        RTE_IPV4_FRAG_ASSERT(IPV4_MAX_FRAGS_PER_PACKET * frag_size >=
-           (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr)));
+           (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr)));

-       in_hdr = (struct ipv4_hdr*) pkt_in->pkt.data;
+       in_hdr = (struct ipv4_hdr*) pkt_in->data;
        flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);

        /* If Don't Fragment flag is set */
@@ -156,7 +156,7 @@ static inline int32_t rte_ipv4_fragmentation(struct 
rte_mbuf *pkt_in,

        /* Check that pkts_out is big enough to hold all fragments */
        if (unlikely (frag_size * nb_pkts_out <
-           (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
+           (uint16_t)(pkt_in->pkt_len - sizeof (struct ipv4_hdr))))
                return (-EINVAL);

        in_seg = pkt_in;
@@ -178,8 +178,8 @@ static inline int32_t rte_ipv4_fragmentation(struct 
rte_mbuf *pkt_in,
                }

                /* Reserve space for the IP header that will be built later */
-               out_pkt->pkt.data_len = sizeof(struct ipv4_hdr);
-               out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr);
+               out_pkt->data_len = sizeof(struct ipv4_hdr);
+               out_pkt->pkt_len = sizeof(struct ipv4_hdr);

                out_seg_prev = out_pkt;
                more_out_segs = 1;
@@ -194,30 +194,30 @@ static inline int32_t rte_ipv4_fragmentation(struct 
rte_mbuf *pkt_in,
                                __free_fragments(pkts_out, out_pkt_pos);
                                return (-ENOMEM);
                        }
-                       out_seg_prev->pkt.next = out_seg;
+                       out_seg_prev->next = out_seg;
                        out_seg_prev = out_seg;

                        /* Prepare indirect buffer */
                        rte_pktmbuf_attach(out_seg, in_seg);
-                       len = mtu_size - out_pkt->pkt.pkt_len;
-                       if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
-                               len = in_seg->pkt.data_len - in_seg_data_pos;
+                       len = mtu_size - out_pkt->pkt_len;
+                       if (len > (in_seg->data_len - in_seg_data_pos)) {
+                               len = in_seg->data_len - in_seg_data_pos;
                        }
-                       out_seg->pkt.data = (char*) in_seg->pkt.data + 
(uint16_t)in_seg_data_pos;
-                       out_seg->pkt.data_len = (uint16_t)len;
-                       out_pkt->pkt.pkt_len = (uint16_t)(len +
-                           out_pkt->pkt.pkt_len);
-                       out_pkt->pkt.nb_segs += 1;
+                       out_seg->data = (char*) in_seg->data + 
(uint16_t)in_seg_data_pos;
+                       out_seg->data_len = (uint16_t)len;
+                       out_pkt->pkt_len = (uint16_t)(len +
+                           out_pkt->pkt_len);
+                       out_pkt->nb_segs += 1;
                        in_seg_data_pos += len;

                        /* Current output packet (i.e. fragment) done ? */
-                       if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
+                       if (unlikely(out_pkt->pkt_len >= mtu_size)) {
                                more_out_segs = 0;
                        }

                        /* Current input segment done ? */
-                       if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
-                               in_seg = in_seg->pkt.next;
+                       if (unlikely(in_seg_data_pos == in_seg->data_len)) {
+                               in_seg = in_seg->next;
                                in_seg_data_pos = 0;

                                if (unlikely(in_seg == NULL)) {
@@ -228,17 +228,17 @@ static inline int32_t rte_ipv4_fragmentation(struct 
rte_mbuf *pkt_in,

                /* Build the IP header */

-               out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data;
+               out_hdr = (struct ipv4_hdr*) out_pkt->data;

                __fill_ipv4hdr_frag(out_hdr, in_hdr,
-                   (uint16_t)out_pkt->pkt.pkt_len,
+                   (uint16_t)out_pkt->pkt_len,
                    flag_offset, fragment_offset, more_in_segs);

                fragment_offset = (uint16_t)(fragment_offset +
-                   out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr));
+                   out_pkt->pkt_len - sizeof(struct ipv4_hdr));

                out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
-               out_pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+               out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);

                /* Write the fragment to the output list */
                pkts_out[out_pkt_pos] = out_pkt;
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 3967d7a..9c57ce0 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -329,17 +329,17 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
        }

        /* prepend new header */
-       hdr->pkt.next = pkt;
+       hdr->next = pkt;


        /* update header's fields */
-       hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
-       hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
+       hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
+       hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);

        /* copy metadata from source packet*/
-       hdr->pkt.in_port = pkt->pkt.in_port;
-       hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
-       hdr->pkt.hash = pkt->pkt.hash;
+       hdr->in_port = pkt->in_port;
+       hdr->vlan_macip = pkt->vlan_macip;
+       hdr->hash = pkt->hash;

        hdr->ol_flags = pkt->ol_flags;

@@ -412,7 +412,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf 
*qconf)

        /* Should we use rte_pktmbuf_clone() or not. */
        use_clone = (port_num <= MCAST_CLONE_PORTS &&
-           m->pkt.nb_segs <= MCAST_CLONE_SEGS);
+           m->nb_segs <= MCAST_CLONE_SEGS);

        /* Mark all packet's segments as referenced port_num times */
        if (use_clone == 0)
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index 219f802..a991809 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -687,7 +687,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,

 #ifdef DO_RFC_1812_CHECKS
                /* Check to make sure the packet is valid (RFC1812) */
-               if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+               if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
                        rte_pktmbuf_free(m);
                        return;
                }
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index fb811fa..7420d89 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -489,7 +489,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, 
lookup_struct_t * l3fwd

 #ifdef DO_RFC_1812_CHECKS
        /* Check to make sure the packet is valid (RFC1812) */
-       if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+       if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
                rte_pktmbuf_free(m);
                return;
        }
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 1ba4ca2..3eff4ea 100755
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -705,19 +705,19 @@ simple_ipv4_fwd_4pkts(struct rte_mbuf* m[4], uint8_t 
portid, struct lcore_conf *
 #ifdef DO_RFC_1812_CHECKS
        /* Check to make sure the packet is valid (RFC1812) */
        uint8_t valid_mask = MASK_ALL_PKTS;
-       if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt.pkt_len) < 0) {
+       if (is_valid_ipv4_pkt(ipv4_hdr[0], m[0]->pkt_len) < 0) {
                rte_pktmbuf_free(m[0]);
                valid_mask &= EXECLUDE_1ST_PKT;
        }
-       if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt.pkt_len) < 0) {
+       if (is_valid_ipv4_pkt(ipv4_hdr[1], m[1]->pkt_len) < 0) {
                rte_pktmbuf_free(m[1]);
                valid_mask &= EXECLUDE_2ND_PKT;
        }
-       if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt.pkt_len) < 0) {
+       if (is_valid_ipv4_pkt(ipv4_hdr[2], m[2]->pkt_len) < 0) {
                rte_pktmbuf_free(m[2]);
                valid_mask &= EXECLUDE_3RD_PKT;
        }
-       if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt.pkt_len) < 0) {
+       if (is_valid_ipv4_pkt(ipv4_hdr[3], m[3]->pkt_len) < 0) {
                rte_pktmbuf_free(m[3]);
                valid_mask &= EXECLUDE_4TH_PKT;
        }
@@ -905,7 +905,7 @@ l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, 
struct lcore_conf *qcon

 #ifdef DO_RFC_1812_CHECKS
                /* Check to make sure the packet is valid (RFC1812) */
-               if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+               if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt_len) < 0) {
                        rte_pktmbuf_free(m);
                        return;
                }
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index e85abdb..bfa7c58 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -540,7 +540,7 @@ app_lcore_worker(
                        ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);

                        if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, 
&port) != 0)) {
-                               port = pkt->pkt.in_port;
+                               port = pkt->in_port;
                        }

                        pos = lp->mbuf_out[port].n_mbufs;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c 
b/examples/multi_process/client_server_mp/mp_client/client.c
index 7543db4..187c80f 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -211,7 +211,7 @@ enqueue_packet(struct rte_mbuf *buf, uint8_t port)
 static void
 handle_packet(struct rte_mbuf *buf)
 {
-       const uint8_t in_port = buf->pkt.in_port;
+       const uint8_t in_port = buf->in_port;
        const uint8_t out_port = output_ports[in_port];

        enqueue_packet(buf, out_port);
diff --git a/examples/quota_watermark/qw/main.c 
b/examples/quota_watermark/qw/main.c
index 21e0fc7..421b43d 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -104,8 +104,8 @@ static void send_pause_frame(uint8_t port_id, uint16_t 
duration)
     pause_frame->opcode = rte_cpu_to_be_16(0x0001);
     pause_frame->param  = rte_cpu_to_be_16(duration);

-    mbuf->pkt.pkt_len  = 60;
-    mbuf->pkt.data_len = 60;
+    mbuf->pkt_len  = 60;
+    mbuf->data_len = 60;

     rte_eth_tx_burst(port_id, 0, &mbuf, 1);
 }
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 816a71a..26cfc8e 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -815,7 +815,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf 
**pkts, uint32_t count)
                vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;

                /* Copy mbuf data to buffer */
-               rte_memcpy((void *)(uintptr_t)buff_addr, (const 
void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
+               rte_memcpy((void *)(uintptr_t)buff_addr, (const 
void*)buff->data, rte_pktmbuf_data_len(buff));

                res_cur_idx++;
                packet_success++;
@@ -877,7 +877,7 @@ link_vmdq(struct virtio_net *dev, struct rte_mbuf *m)
        int i, ret;

        /* Learn MAC address of guest device from packet */
-       pkt_hdr = (struct ether_hdr *)m->pkt.data;
+       pkt_hdr = (struct ether_hdr *)m->data;

        dev_ll = ll_root_used;

@@ -965,7 +965,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
        struct ether_hdr *pkt_hdr;
        uint64_t ret = 0;

-       pkt_hdr = (struct ether_hdr *)m->pkt.data;
+       pkt_hdr = (struct ether_hdr *)m->data;

        /*get the used devices list*/
        dev_ll = ll_root_used;
@@ -1038,22 +1038,22 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf 
*m, struct rte_mempool *
                return;
        }

-       mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN;
-       mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+       mbuf->data_len = m->data_len + VLAN_HLEN;
+       mbuf->pkt_len = mbuf->data_len;

        /* Copy ethernet header to mbuf. */
-       rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+       rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);


        /* Setup vlan header. Bytes need to be re-ordered for network with 
htons()*/
-       vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+       vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
        vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
        vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
        vlan_hdr->h_vlan_TCI = htons(vlan_tag);

        /* Copy the remaining packet contents to the mbuf. */
-       rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
-               (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), 
(m->pkt.data_len - ETH_HLEN));
+       rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+               (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - 
ETH_HLEN));
        tx_q->m_table[len] = mbuf;
        len++;
        if (enable_stats) {
@@ -1143,8 +1143,8 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool 
*mbuf_pool)
                vq->used->ring[used_idx].len = 0;

                /* Setup dummy mbuf. This is copied to a real mbuf if 
transmitted out the physical port. */
-               m.pkt.data_len = desc->len;
-               m.pkt.data = (void*)(uintptr_t)buff_addr;
+               m.data_len = desc->len;
+               m.data = (void*)(uintptr_t)buff_addr;

                PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);

diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
index eafc0aa..2cf0029 100644
--- a/examples/vhost_xen/main.c
+++ b/examples/vhost_xen/main.c
@@ -677,7 +677,7 @@ virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf 
**pkts, uint32_t count)
                vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;

                /* Copy mbuf data to buffer */
-               rte_memcpy((void *)(uintptr_t)buff_addr, (const 
void*)buff->pkt.data, rte_pktmbuf_data_len(buff));
+               rte_memcpy((void *)(uintptr_t)buff_addr, (const 
void*)buff->data, rte_pktmbuf_data_len(buff));

                res_cur_idx++;
                packet_success++;
@@ -808,7 +808,7 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
        struct ether_hdr *pkt_hdr;
        uint64_t ret = 0;

-       pkt_hdr = (struct ether_hdr *)m->pkt.data;
+       pkt_hdr = (struct ether_hdr *)m->data;

        /*get the used devices list*/
        dev_ll = ll_root_used;
@@ -879,22 +879,22 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf 
*m, struct rte_mempool *
        if(!mbuf)
                return;

-       mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN;
-       mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+       mbuf->data_len = m->data_len + VLAN_HLEN;
+       mbuf->pkt_len = mbuf->data_len;

        /* Copy ethernet header to mbuf. */
-       rte_memcpy((void*)mbuf->pkt.data, (const void*)m->pkt.data, ETH_HLEN);
+       rte_memcpy((void*)mbuf->data, (const void*)m->data, ETH_HLEN);


        /* Setup vlan header. Bytes need to be re-ordered for network with 
htons()*/
-       vlan_hdr = (struct vlan_ethhdr *) mbuf->pkt.data;
+       vlan_hdr = (struct vlan_ethhdr *) mbuf->data;
        vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
        vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
        vlan_hdr->h_vlan_TCI = htons(vlan_tag);

        /* Copy the remaining packet contents to the mbuf. */
-       rte_memcpy((void*) ((uint8_t*)mbuf->pkt.data + VLAN_ETH_HLEN),
-               (const void*) ((uint8_t*)m->pkt.data + ETH_HLEN), 
(m->pkt.data_len - ETH_HLEN));
+       rte_memcpy((void*) ((uint8_t*)mbuf->data + VLAN_ETH_HLEN),
+               (const void*) ((uint8_t*)m->data + ETH_HLEN), (m->data_len - 
ETH_HLEN));
        tx_q->m_table[len] = mbuf;
        len++;
        if (enable_stats) {
@@ -980,9 +980,9 @@ virtio_dev_tx(struct virtio_net* dev, struct rte_mempool 
*mbuf_pool)
                rte_prefetch0((void*)(uintptr_t)buff_addr);

                /* Setup dummy mbuf. This is copied to a real mbuf if 
transmitted out the physical port. */
-               m.pkt.data_len = desc->len;
-               m.pkt.data = (void*)(uintptr_t)buff_addr;
-               m.pkt.nb_segs = 1; 
+               m.data_len = desc->len;
+               m.data = (void*)(uintptr_t)buff_addr;
+               m.nb_segs = 1; 

                virtio_tx_route(dev, &m, mbuf_pool, 0);

diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index b2e2f0f..c229525 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -104,12 +104,12 @@ rte_pktmbuf_init(struct rte_mempool *mp,
        m->buf_len = (uint16_t)buf_len;

        /* keep some headroom between start of buffer and data */
-       m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, 
m->buf_len);
+       m->data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, 
m->buf_len);

        /* init some constant fields */
        m->pool = mp;
-       m->pkt.nb_segs = 1;
-       m->pkt.in_port = 0xff;
+       m->nb_segs = 1;
+       m->in_port = 0xff;
 }

 /* do some sanity checks on a mbuf: panic if it fails */
@@ -140,10 +140,10 @@ rte_mbuf_sanity_check(const struct rte_mbuf *m, int 
is_header)
        if (is_header == 0)
                return;

-       nb_segs = m->pkt.nb_segs;
+       nb_segs = m->nb_segs;
        m_seg = m;
        while (m_seg && nb_segs != 0) {
-               m_seg = m_seg->pkt.next;
+               m_seg = m_seg->next;
                nb_segs --;
        }
        if (nb_segs != 0)
@@ -162,22 +162,22 @@ rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned 
dump_len)
        printf("dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
               m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
        printf("  pkt_len=%"PRIu32", ol_flags=%"PRIx16", nb_segs=%u, "
-              "in_port=%u\n", m->pkt.pkt_len, m->ol_flags,
-              (unsigned)m->pkt.nb_segs, (unsigned)m->pkt.in_port);
-       nb_segs = m->pkt.nb_segs;
+              "in_port=%u\n", m->pkt_len, m->ol_flags,
+              (unsigned)m->nb_segs, (unsigned)m->in_port);
+       nb_segs = m->nb_segs;

        while (m && nb_segs != 0) {
                __rte_mbuf_sanity_check(m, 0);

                printf("  segment at 0x%p, data=0x%p, data_len=%u\n",
-                      m, m->pkt.data, (unsigned)m->pkt.data_len);
+                      m, m->data, (unsigned)m->data_len);
                len = dump_len;
-               if (len > m->pkt.data_len)
-                       len = m->pkt.data_len;
+               if (len > m->data_len)
+                       len = m->data_len;
                if (len != 0)
-                       rte_hexdump(NULL, m->pkt.data, len);
+                       rte_hexdump(NULL, m->data, len);
                dump_len -= len;
-               m = m->pkt.next;
+               m = m->next;
                nb_segs --;
        }
 }
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 22e1ac1..803b223 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -125,32 +125,6 @@ union rte_vlan_macip {
 #define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)

 /**
- * A packet message buffer.
- */
-struct rte_pktmbuf {
-       /* valid for any segment */
-       struct rte_mbuf *next;  /**< Next segment of scattered packet. */
-       void* data;             /**< Start address of data in segment buffer. */
-       uint16_t data_len;      /**< Amount of data in segment buffer. */
-
-       /* these fields are valid for first segment only */
-       uint8_t nb_segs;        /**< Number of segments. */
-       uint8_t in_port;        /**< Input port. */
-       uint32_t pkt_len;       /**< Total pkt len: sum of all segment 
data_len. */
-
-       /* offload features */
-       union rte_vlan_macip vlan_macip;
-       union {
-               uint32_t rss;       /**< RSS hash result if RSS enabled */
-               struct {
-                       uint16_t hash;
-                       uint16_t id;
-               } fdir;             /**< Filter identifier if FDIR enabled */
-               uint32_t sched;     /**< Hierarchical scheduler */
-       } hash;                 /**< hash information */
-};
-
-/**
  * The generic rte_mbuf, containing a packet mbuf.
  */
 struct rte_mbuf {
@@ -177,7 +151,26 @@ struct rte_mbuf {
        uint16_t reserved;             /**< Unused field. Required for padding. 
*/
        uint16_t ol_flags;            /**< Offload features. */

-       struct rte_pktmbuf pkt;
+       /* valid for any segment */
+       struct rte_mbuf *next;  /**< Next segment of scattered packet. */
+       void* data;             /**< Start address of data in segment buffer. */
+       uint16_t data_len;      /**< Amount of data in segment buffer. */
+
+       /* these fields are valid for first segment only */
+       uint8_t nb_segs;        /**< Number of segments. */
+       uint8_t in_port;        /**< Input port. */
+       uint32_t pkt_len;       /**< Total pkt len: sum of all segment 
data_len. */
+
+       /* offload features, valid for first segment only */
+       union rte_vlan_macip vlan_macip;
+       union {
+               uint32_t rss;       /**< RSS hash result if RSS enabled */
+               struct {
+                       uint16_t hash;
+                       uint16_t id;
+               } fdir;             /**< Filter identifier if FDIR enabled */
+               uint32_t sched;     /**< Hierarchical scheduler */
+       } hash;                 /**< hash information */
 } __rte_cache_aligned;

 /**
@@ -444,18 +437,18 @@ static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
 {
        uint32_t buf_ofs;

-       m->pkt.next = NULL;
-       m->pkt.pkt_len = 0;
-       m->pkt.vlan_macip.data = 0;
-       m->pkt.nb_segs = 1;
-       m->pkt.in_port = 0xff;
+       m->next = NULL;
+       m->pkt_len = 0;
+       m->vlan_macip.data = 0;
+       m->nb_segs = 1;
+       m->in_port = 0xff;

        m->ol_flags = 0;
        buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
                        RTE_PKTMBUF_HEADROOM : m->buf_len;
-       m->pkt.data = (char*) m->buf_addr + buf_ofs;
+       m->data = (char*) m->buf_addr + buf_ofs;

-       m->pkt.data_len = 0;
+       m->data_len = 0;
        __rte_mbuf_sanity_check(m, 1);
 }

@@ -509,11 +502,16 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf 
*mi, struct rte_mbuf *md)
        mi->buf_addr = md->buf_addr;
        mi->buf_len = md->buf_len;

-       mi->pkt = md->pkt;
+       mi->next = md->next;
+       mi->data = md->data;
+       mi->data_len = md->data_len;
+       mi->in_port = md->in_port;
+       mi->vlan_macip = md->vlan_macip;
+       mi->hash = md->hash;

-       mi->pkt.next = NULL;
-       mi->pkt.pkt_len = mi->pkt.data_len;
-       mi->pkt.nb_segs = 1;
+       mi->next = NULL;
+       mi->pkt_len = mi->data_len;
+       mi->nb_segs = 1;
        mi->ol_flags = md->ol_flags;

        __rte_mbuf_sanity_check(mi, 1);
@@ -543,9 +541,9 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)

        buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
                        RTE_PKTMBUF_HEADROOM : m->buf_len;
-       m->pkt.data = (char*) m->buf_addr + buf_ofs;
+       m->data = (char*) m->buf_addr + buf_ofs;

-       m->pkt.data_len = 0;
+       m->data_len = 0;
 }

 #endif /* RTE_MBUF_REFCNT */
@@ -612,7 +610,7 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
        __rte_mbuf_sanity_check(m, 1);

        while (m != NULL) {
-               m_next = m->pkt.next;
+               m_next = m->next;
                rte_pktmbuf_free_seg(m);
                m = m_next;
        }
@@ -648,21 +646,21 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct 
rte_mbuf *md,
                return (NULL);

        mi = mc;
-       prev = &mi->pkt.next;
-       pktlen = md->pkt.pkt_len;
+       prev = &mi->next;
+       pktlen = md->pkt_len;
        nseg = 0;

        do {
                nseg++;
                rte_pktmbuf_attach(mi, md);
                *prev = mi;
-               prev = &mi->pkt.next;
-       } while ((md = md->pkt.next) != NULL &&
+               prev = &mi->next;
+       } while ((md = md->next) != NULL &&
            (mi = rte_pktmbuf_alloc(mp)) != NULL);

        *prev = NULL;
-       mc->pkt.nb_segs = nseg;
-       mc->pkt.pkt_len = pktlen;
+       mc->nb_segs = nseg;
+       mc->pkt_len = pktlen;

        /* Allocation of new indirect segment failed */
        if (unlikely (mi == NULL)) {
@@ -691,7 +689,7 @@ static inline void rte_pktmbuf_refcnt_update(struct 
rte_mbuf *m, int16_t v)

        do {
                rte_mbuf_refcnt_update(m, v);
-       } while ((m = m->pkt.next) != NULL);
+       } while ((m = m->next) != NULL);
 }

 #endif /* RTE_MBUF_REFCNT */
@@ -707,7 +705,7 @@ static inline void rte_pktmbuf_refcnt_update(struct 
rte_mbuf *m, int16_t v)
 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
 {
        __rte_mbuf_sanity_check(m, 1);
-       return (uint16_t) ((char*) m->pkt.data - (char*) m->buf_addr);
+       return (uint16_t) ((char*) m->data - (char*) m->buf_addr);
 }

 /**
@@ -722,7 +720,7 @@ static inline uint16_t rte_pktmbuf_tailroom(const struct 
rte_mbuf *m)
 {
        __rte_mbuf_sanity_check(m, 1);
        return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
-                         m->pkt.data_len);
+                         m->data_len);
 }

 /**
@@ -738,8 +736,8 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct 
rte_mbuf *m)
        struct rte_mbuf *m2 = (struct rte_mbuf *)m;

        __rte_mbuf_sanity_check(m, 1);
-       while (m2->pkt.next != NULL)
-               m2 = m2->pkt.next;
+       while (m2->next != NULL)
+               m2 = m2->next;
        return m2;
 }

@@ -755,7 +753,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct 
rte_mbuf *m)
  * @param t
  *   The type to cast the result into.
  */
-#define rte_pktmbuf_mtod(m, t) ((t)((m)->pkt.data))
+#define rte_pktmbuf_mtod(m, t) ((t)((m)->data))

 /**
  * A macro that returns the length of the packet.
@@ -765,7 +763,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct 
rte_mbuf *m)
  * @param m
  *   The packet mbuf.
  */
-#define rte_pktmbuf_pkt_len(m) ((m)->pkt.pkt_len)
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)

 /**
  * A macro that returns the length of the segment.
@@ -775,7 +773,7 @@ static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct 
rte_mbuf *m)
  * @param m
  *   The packet mbuf.
  */
-#define rte_pktmbuf_data_len(m) ((m)->pkt.data_len)
+#define rte_pktmbuf_data_len(m) ((m)->data_len)

 /**
  * Prepend len bytes to an mbuf data area.
@@ -800,11 +798,11 @@ static inline char *rte_pktmbuf_prepend(struct rte_mbuf 
*m,
        if (unlikely(len > rte_pktmbuf_headroom(m)))
                return NULL;

-       m->pkt.data = (char*) m->pkt.data - len;
-       m->pkt.data_len = (uint16_t)(m->pkt.data_len + len);
-       m->pkt.pkt_len  = (m->pkt.pkt_len + len);
+       m->data = (char*) m->data - len;
+       m->data_len = (uint16_t)(m->data_len + len);
+       m->pkt_len  = (m->pkt_len + len);

-       return (char*) m->pkt.data;
+       return (char*) m->data;
 }

 /**
@@ -833,9 +831,9 @@ static inline char *rte_pktmbuf_append(struct rte_mbuf *m, 
uint16_t len)
        if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
                return NULL;

-       tail = (char*) m_last->pkt.data + m_last->pkt.data_len;
-       m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len + len);
-       m->pkt.pkt_len  = (m->pkt.pkt_len + len);
+       tail = (char*) m_last->data + m_last->data_len;
+       m_last->data_len = (uint16_t)(m_last->data_len + len);
+       m->pkt_len  = (m->pkt_len + len);
        return (char*) tail;
 }

@@ -857,13 +855,13 @@ static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, 
uint16_t len)
 {
        __rte_mbuf_sanity_check(m, 1);

-       if (unlikely(len > m->pkt.data_len))
+       if (unlikely(len > m->data_len))
                return NULL;

-       m->pkt.data_len = (uint16_t)(m->pkt.data_len - len);
-       m->pkt.data = ((char*) m->pkt.data + len);
-       m->pkt.pkt_len  = (m->pkt.pkt_len - len);
-       return (char*) m->pkt.data;
+       m->data_len = (uint16_t)(m->data_len - len);
+       m->data = ((char*) m->data + len);
+       m->pkt_len  = (m->pkt_len - len);
+       return (char*) m->data;
 }

 /**
@@ -887,11 +885,11 @@ static inline int rte_pktmbuf_trim(struct rte_mbuf *m, 
uint16_t len)
        __rte_mbuf_sanity_check(m, 1);

        m_last = rte_pktmbuf_lastseg(m);
-       if (unlikely(len > m_last->pkt.data_len))
+       if (unlikely(len > m_last->data_len))
                return -1;

-       m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len - len);
-       m->pkt.pkt_len  = (m->pkt.pkt_len - len);
+       m_last->data_len = (uint16_t)(m_last->data_len - len);
+       m->pkt_len  = (m->pkt_len - len);
        return 0;
 }

@@ -907,7 +905,7 @@ static inline int rte_pktmbuf_trim(struct rte_mbuf *m, 
uint16_t len)
 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
 {
        __rte_mbuf_sanity_check(m, 1);
-       return !!(m->pkt.nb_segs == 1);
+       return !!(m->nb_segs == 1);
 }

 /**
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 31f480a..b9e66eb 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -91,7 +91,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)

 #define RTE_MBUF_DATA_DMA_ADDR(mb)             \
        (uint64_t) ((mb)->buf_physaddr +       \
-       (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))
+       (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))

 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
        (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
@@ -421,7 +421,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
                                                        PKT_TX_L4_MASK));
                if (tx_ol_req) {
-                       hdrlen = tx_pkt->pkt.vlan_macip;
+                       hdrlen = tx_pkt->vlan_macip;
                        /* If new context to be built or reuse the exist ctx. */
                        ctx = what_ctx_update(txq, tx_ol_req, hdrlen);

@@ -434,7 +434,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * This will always be the number of segments + the number of
                 * Context descriptors required to transmit the packet
                 */
-               nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+               nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);

                /*
                 * The number of descriptors that must be allocated for a
@@ -454,7 +454,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        " tx_first=%u tx_last=%u\n",
                        (unsigned) txq->port_id,
                        (unsigned) txq->queue_id,
-                       (unsigned) tx_pkt->pkt.pkt_len,
+                       (unsigned) tx_pkt->pkt_len,
                        (unsigned) tx_id,
                        (unsigned) tx_last);

@@ -516,7 +516,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                /* Set VLAN Tag offload fields. */
                if (ol_flags & PKT_TX_VLAN_PKT) {
                        cmd_type_len |= E1000_TXD_CMD_VLE;
-                       popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci <<
+                       popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
                                E1000_TXD_VLAN_SHIFT;
                }

@@ -566,7 +566,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /*
                         * Set up Transmit Data Descriptor.
                         */
-                       slen = m_seg->pkt.data_len;
+                       slen = m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);

                        txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
@@ -576,7 +576,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
-                       m_seg = m_seg->pkt.next;
+                       m_seg = m_seg->next;
                } while (m_seg != NULL);

                /*
@@ -771,20 +771,20 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 */
                pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
                                rxq->crc_len);
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rte_packet_prefetch(rxm->pkt.data);
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len = pkt_len;
-               rxm->pkt.data_len = pkt_len;
-               rxm->pkt.in_port = rxq->port_id;
+               rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch(rxm->data);
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len = pkt_len;
+               rxm->data_len = pkt_len;
+               rxm->in_port = rxq->port_id;

                rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
                rxm->ol_flags = (uint16_t)(rxm->ol_flags |
                                rx_desc_error_to_pkt_flags(rxd.errors));

                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+               rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);

                /*
                 * Store the mbuf address into the next entry of the array
@@ -940,8 +940,8 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 * Set data length & data buffer address of mbuf.
                 */
                data_len = rte_le_to_cpu_16(rxd.length);
-               rxm->pkt.data_len = data_len;
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->data_len = data_len;
+               rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;

                /*
                 * If this is the first buffer of the received packet,
@@ -953,12 +953,12 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 */
                if (first_seg == NULL) {
                        first_seg = rxm;
-                       first_seg->pkt.pkt_len = data_len;
-                       first_seg->pkt.nb_segs = 1;
+                       first_seg->pkt_len = data_len;
+                       first_seg->nb_segs = 1;
                } else {
-                       first_seg->pkt.pkt_len += data_len;
-                       first_seg->pkt.nb_segs++;
-                       last_seg->pkt.next = rxm;
+                       first_seg->pkt_len += data_len;
+                       first_seg->nb_segs++;
+                       last_seg->next = rxm;
                }

                /*
@@ -981,18 +981,18 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 *     mbuf, subtract the length of that CRC part from the
                 *     data length of the previous mbuf.
                 */
-               rxm->pkt.next = NULL;
+               rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       first_seg->pkt_len -= ETHER_CRC_LEN;
                        if (data_len <= ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
-                               first_seg->pkt.nb_segs--;
-                               last_seg->pkt.data_len = (uint16_t)
-                                       (last_seg->pkt.data_len -
+                               first_seg->nb_segs--;
+                               last_seg->data_len = (uint16_t)
+                                       (last_seg->data_len -
                                         (ETHER_CRC_LEN - data_len));
-                               last_seg->pkt.next = NULL;
+                               last_seg->next = NULL;
                        } else
-                               rxm->pkt.data_len =
+                               rxm->data_len =
                                        (uint16_t) (data_len - ETHER_CRC_LEN);
                }

@@ -1003,17 +1003,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 *      - IP checksum flag,
                 *      - error flags.
                 */
-               first_seg->pkt.in_port = rxq->port_id;
+               first_seg->in_port = rxq->port_id;

                first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
                first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
                                        rx_desc_error_to_pkt_flags(rxd.errors));

                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+               rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);

                /* Prefetch data of first segment, if configured to do so. */
-               rte_packet_prefetch(first_seg->pkt.data);
+               rte_packet_prefetch(first_seg->data);

                /*
                 * Store the mbuf address into the next entry of the array
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index 62ff7bc..da33171 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -85,7 +85,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp)

 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
        (uint64_t) ((mb)->buf_physaddr +                   \
-                       (uint64_t) ((char *)((mb)->pkt.data) -     \
+                       (uint64_t) ((char *)((mb)->data) -     \
                                (char *)(mb)->buf_addr))

 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -354,7 +354,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                tx_pkt = *tx_pkts++;
-               pkt_len = tx_pkt->pkt.pkt_len;
+               pkt_len = tx_pkt->pkt_len;

                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);

@@ -366,10 +366,10 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
                 * for the packet, starting from the current position (tx_id)
                 * in the ring.
                 */
-               tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+               tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);

                ol_flags = tx_pkt->ol_flags;
-               vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+               vlan_macip_lens = tx_pkt->vlan_macip.data;
                tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);

                /* If a Context Descriptor need be built . */
@@ -516,7 +516,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /*
                         * Set up transmit descriptor.
                         */
-                       slen = (uint16_t) m_seg->pkt.data_len;
+                       slen = (uint16_t) m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
@@ -527,7 +527,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
-                       m_seg = m_seg->pkt.next;
+                       m_seg = m_seg->next;
                } while (m_seg != NULL);

                /*
@@ -742,18 +742,18 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 */
                pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
                                      rxq->crc_len);
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rte_packet_prefetch(rxm->pkt.data);
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len = pkt_len;
-               rxm->pkt.data_len = pkt_len;
-               rxm->pkt.in_port = rxq->port_id;
-
-               rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch(rxm->data);
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len = pkt_len;
+               rxm->data_len = pkt_len;
+               rxm->in_port = rxq->port_id;
+
+               rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_macip.f.vlan_tci =
+               rxm->vlan_macip.f.vlan_tci =
                        rte_le_to_cpu_16(rxd.wb.upper.vlan);

                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -918,8 +918,8 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 * Set data length & data buffer address of mbuf.
                 */
                data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-               rxm->pkt.data_len = data_len;
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->data_len = data_len;
+               rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;

                /*
                 * If this is the first buffer of the received packet,
@@ -931,12 +931,12 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 */
                if (first_seg == NULL) {
                        first_seg = rxm;
-                       first_seg->pkt.pkt_len = data_len;
-                       first_seg->pkt.nb_segs = 1;
+                       first_seg->pkt_len = data_len;
+                       first_seg->nb_segs = 1;
                } else {
-                       first_seg->pkt.pkt_len += data_len;
-                       first_seg->pkt.nb_segs++;
-                       last_seg->pkt.next = rxm;
+                       first_seg->pkt_len += data_len;
+                       first_seg->nb_segs++;
+                       last_seg->next = rxm;
                }

                /*
@@ -959,18 +959,18 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 *     mbuf, subtract the length of that CRC part from the
                 *     data length of the previous mbuf.
                 */
-               rxm->pkt.next = NULL;
+               rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       first_seg->pkt_len -= ETHER_CRC_LEN;
                        if (data_len <= ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
-                               first_seg->pkt.nb_segs--;
-                               last_seg->pkt.data_len = (uint16_t)
-                                       (last_seg->pkt.data_len -
+                               first_seg->nb_segs--;
+                               last_seg->data_len = (uint16_t)
+                                       (last_seg->data_len -
                                         (ETHER_CRC_LEN - data_len));
-                               last_seg->pkt.next = NULL;
+                               last_seg->next = NULL;
                        } else
-                               rxm->pkt.data_len =
+                               rxm->data_len =
                                        (uint16_t) (data_len - ETHER_CRC_LEN);
                }

@@ -983,14 +983,14 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 *      - VLAN TCI, if any,
                 *      - error flags.
                 */
-               first_seg->pkt.in_port = rxq->port_id;
-               first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               first_seg->in_port = rxq->port_id;
+               first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;

                /*
                 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
                 * set in the pkt_flags field.
                 */
-               first_seg->pkt.vlan_macip.f.vlan_tci =
+               first_seg->vlan_macip.f.vlan_tci =
                        rte_le_to_cpu_16(rxd.wb.upper.vlan);
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1001,7 +1001,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;

                /* Prefetch data of first segment, if configured to do so. */
-               rte_packet_prefetch(first_seg->pkt.data);
+               rte_packet_prefetch(first_seg->data);

                /*
                 * Store the mbuf address into the next entry of the array
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 76448ab..6cb1640 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -170,7 +170,7 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf 
**pkts)

        for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
                buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-               pkt_len = (*pkts)->pkt.data_len;
+               pkt_len = (*pkts)->data_len;

                /* write data to descriptor */
                txdp->read.buffer_addr = buf_dma_addr;
@@ -189,7 +189,7 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf 
**pkts)
        uint32_t pkt_len;

        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(*pkts);
-       pkt_len = (*pkts)->pkt.data_len;
+       pkt_len = (*pkts)->data_len;

        /* write data to descriptor */
        txdp->read.buffer_addr = buf_dma_addr;
@@ -562,7 +562,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                new_ctx = 0;
                tx_pkt = *tx_pkts++;
-               pkt_len = tx_pkt->pkt.pkt_len;
+               pkt_len = tx_pkt->pkt_len;

                RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);

@@ -571,7 +571,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * are needed for offload functionality.
                 */
                ol_flags = tx_pkt->ol_flags;
-               vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+               vlan_macip_lens = tx_pkt->vlan_macip.data;

                /* If hardware offload required */
                tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
@@ -589,7 +589,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * This will always be the number of segments + the number of
                 * Context descriptors required to transmit the packet
                 */
-               nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
+               nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);

                /*
                 * The number of descriptors that must be allocated for a
@@ -749,7 +749,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        /*
                         * Set up Transmit Data Descriptor.
                         */
-                       slen = m_seg->pkt.data_len;
+                       slen = m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
                        txd->read.buffer_addr =
                                rte_cpu_to_le_64(buf_dma_addr);
@@ -760,7 +760,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
-                       m_seg = m_seg->pkt.next;
+                       m_seg = m_seg->next;
                } while (m_seg != NULL);

                /*
@@ -929,10 +929,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
                        mb = rxep[j].mbuf;
                        pkt_len = (uint16_t)(rxdp[j].wb.upper.length -
                                                        rxq->crc_len);
-                       mb->pkt.data_len = pkt_len;
-                       mb->pkt.pkt_len = pkt_len;
-                       mb->pkt.vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
-                       mb->pkt.hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+                       mb->data_len = pkt_len;
+                       mb->pkt_len = pkt_len;
+                       mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+                       mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;

                        /* convert descriptor fields to rte mbuf flags */
                        mb->ol_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
@@ -987,10 +987,10 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
                /* populate the static rte mbuf fields */
                mb = rxep[i].mbuf;
                rte_mbuf_refcnt_set(mb, 1);
-               mb->pkt.next = NULL;
-               mb->pkt.data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
-               mb->pkt.nb_segs = 1;
-               mb->pkt.in_port = rxq->port_id;
+               mb->next = NULL;
+               mb->data = (char *)mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+               mb->nb_segs = 1;
+               mb->in_port = rxq->port_id;

                /* populate the descriptors */
                dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -1239,17 +1239,17 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 */
                pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
                                      rxq->crc_len);
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rte_packet_prefetch(rxm->pkt.data);
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len = pkt_len;
-               rxm->pkt.data_len = pkt_len;
-               rxm->pkt.in_port = rxq->port_id;
+               rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch(rxm->data);
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len = pkt_len;
+               rxm->data_len = pkt_len;
+               rxm->in_port = rxq->port_id;

                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
-               rxm->pkt.vlan_macip.f.vlan_tci =
+               rxm->vlan_macip.f.vlan_tci =
                        rte_le_to_cpu_16(rxd.wb.upper.vlan);

                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1260,12 +1260,12 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                rxm->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+                       rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
                else if (pkt_flags & PKT_RX_FDIR) {
-                       rxm->pkt.hash.fdir.hash =
+                       rxm->hash.fdir.hash =
                                (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
                                           & IXGBE_ATR_HASH_MASK);
-                       rxm->pkt.hash.fdir.id = 
rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                       rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
                }
                /*
                 * Store the mbuf address into the next entry of the array
@@ -1422,8 +1422,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 * Set data length & data buffer address of mbuf.
                 */
                data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
-               rxm->pkt.data_len = data_len;
-               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->data_len = data_len;
+               rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;

                /*
                 * If this is the first buffer of the received packet,
@@ -1435,13 +1435,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 */
                if (first_seg == NULL) {
                        first_seg = rxm;
-                       first_seg->pkt.pkt_len = data_len;
-                       first_seg->pkt.nb_segs = 1;
+                       first_seg->pkt_len = data_len;
+                       first_seg->nb_segs = 1;
                } else {
-                       first_seg->pkt.pkt_len = 
(uint16_t)(first_seg->pkt.pkt_len
+                       first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
                                        + data_len);
-                       first_seg->pkt.nb_segs++;
-                       last_seg->pkt.next = rxm;
+                       first_seg->nb_segs++;
+                       last_seg->next = rxm;
                }

                /*
@@ -1464,18 +1464,18 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 *     mbuf, subtract the length of that CRC part from the
                 *     data length of the previous mbuf.
                 */
-               rxm->pkt.next = NULL;
+               rxm->next = NULL;
                if (unlikely(rxq->crc_len > 0)) {
-                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       first_seg->pkt_len -= ETHER_CRC_LEN;
                        if (data_len <= ETHER_CRC_LEN) {
                                rte_pktmbuf_free_seg(rxm);
-                               first_seg->pkt.nb_segs--;
-                               last_seg->pkt.data_len = (uint16_t)
-                                       (last_seg->pkt.data_len -
+                               first_seg->nb_segs--;
+                               last_seg->data_len = (uint16_t)
+                                       (last_seg->data_len -
                                         (ETHER_CRC_LEN - data_len));
-                               last_seg->pkt.next = NULL;
+                               last_seg->next = NULL;
                        } else
-                               rxm->pkt.data_len =
+                               rxm->data_len =
                                        (uint16_t) (data_len - ETHER_CRC_LEN);
                }

@@ -1488,13 +1488,13 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                 *      - VLAN TCI, if any,
                 *      - error flags.
                 */
-               first_seg->pkt.in_port = rxq->port_id;
+               first_seg->in_port = rxq->port_id;

                /*
                 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
                 * set in the pkt_flags field.
                 */
-               first_seg->pkt.vlan_macip.f.vlan_tci =
+               first_seg->vlan_macip.f.vlan_tci =
                                rte_le_to_cpu_16(rxd.wb.upper.vlan);
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
@@ -1505,17 +1505,17 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                first_seg->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+                       first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
                else if (pkt_flags & PKT_RX_FDIR) {
-                       first_seg->pkt.hash.fdir.hash =
+                       first_seg->hash.fdir.hash =
                                (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
                                           & IXGBE_ATR_HASH_MASK);
-                       first_seg->pkt.hash.fdir.id =
+                       first_seg->hash.fdir.id =
                                rxd.wb.lower.hi_dword.csum_ip.ip_id;
                }

                /* Prefetch data of first segment, if configured to do so. */
-               rte_packet_prefetch(first_seg->pkt.data);
+               rte_packet_prefetch(first_seg->data);

                /*
                 * Store the mbuf address into the next entry of the array
@@ -3083,10 +3083,10 @@ ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
                }

                rte_mbuf_refcnt_set(mbuf, 1);
-               mbuf->pkt.next = NULL;
-               mbuf->pkt.data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
-               mbuf->pkt.nb_segs = 1;
-               mbuf->pkt.in_port = rxq->port_id;
+               mbuf->next = NULL;
+               mbuf->data = (char *)mbuf->buf_addr + RTE_PKTMBUF_HEADROOM;
+               mbuf->nb_segs = 1;
+               mbuf->in_port = rxq->port_id;

                dma_addr =
                        rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 446eeb7..e32a417 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -45,7 +45,7 @@
 #endif

 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
        (char *)(mb)->buf_addr))

 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
diff --git a/lib/librte_pmd_pcap/rte_eth_pcap.c 
b/lib/librte_pmd_pcap/rte_eth_pcap.c
index 680dfdc..aa80478 100644
--- a/lib/librte_pmd_pcap/rte_eth_pcap.c
+++ b/lib/librte_pmd_pcap/rte_eth_pcap.c
@@ -151,9 +151,9 @@ eth_pcap_rx(void *queue,

                if (header.len <= buf_size) {
                        /* pcap packet will fit in the mbuf, go ahead and copy 
*/
-                       rte_memcpy(mbuf->pkt.data, packet, header.len);
-                       mbuf->pkt.data_len = (uint16_t)header.len;
-                       mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+                       rte_memcpy(mbuf->data, packet, header.len);
+                       mbuf->data_len = (uint16_t)header.len;
+                       mbuf->pkt_len = mbuf->data_len;
                        bufs[i] = mbuf;
                        num_rx++;
                } else {
@@ -200,9 +200,9 @@ eth_pcap_tx_dumper(void *queue,
        for (i = 0; i < nb_pkts; i++) {
                mbuf = bufs[i];
                calculate_timestamp(&header.ts);
-               header.len = mbuf->pkt.data_len;
+               header.len = mbuf->data_len;
                header.caplen = header.len;
-               pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->pkt.data);
+               pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->data);
                rte_pktmbuf_free(mbuf);
                num_tx++;
        }
@@ -237,8 +237,8 @@ eth_pcap_tx(void *queue,

        for (i = 0; i < nb_pkts; i++) {
                mbuf = bufs[i];
-               ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->pkt.data,
-                               mbuf->pkt.data_len);
+               ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->data,
+                               mbuf->data_len);
                if(likely(!ret))
                        num_tx++;
                rte_pktmbuf_free(mbuf);
diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c 
b/lib/librte_pmd_virtio/virtio_rxtx.c
index 0db3ba0..7deaeb6 100644
--- a/lib/librte_pmd_virtio/virtio_rxtx.c
+++ b/lib/librte_pmd_virtio/virtio_rxtx.c
@@ -267,13 +267,13 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts)
                        hw->eth_stats.ierrors++;
                        continue;
                }
-               rxm->pkt.in_port = rxvq->port_id;
-               rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.next = NULL;
-               rxm->pkt.pkt_len  = (uint32_t)(len[i] - sizeof(struct 
virtio_net_hdr));
-               rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct 
virtio_net_hdr));
-               VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
+               rxm->in_port = rxvq->port_id;
+               rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->nb_segs = 1;
+               rxm->next = NULL;
+               rxm->pkt_len  = (uint32_t)(len[i] - sizeof(struct 
virtio_net_hdr));
+               rxm->data_len = (uint16_t)(len[i] - sizeof(struct 
virtio_net_hdr));
+               VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
                rx_pkts[nb_rx++] = rxm;
                hw->eth_stats.ibytes += len[i] - sizeof(struct virtio_net_hdr);
        }
@@ -346,7 +346,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 
uint16_t nb_pkts)
                                break;
                        }
                        nb_tx++;
-                       hw->eth_stats.obytes += txm->pkt.data_len;
+                       hw->eth_stats.obytes += txm->data_len;
                } else {
                        PMD_TX_LOG(ERR, "No free tx descriptors to transmit\n");
                        break;
diff --git a/lib/librte_pmd_virtio/virtqueue.h 
b/lib/librte_pmd_virtio/virtqueue.h
index b67c223..210944e 100644
--- a/lib/librte_pmd_virtio/virtqueue.h
+++ b/lib/librte_pmd_virtio/virtqueue.h
@@ -59,7 +59,7 @@
 #define VIRTQUEUE_MAX_NAME_SZ 32

 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
        (char *)(mb)->buf_addr))

 #define VTNET_SQ_RQ_QUEUE_IDX 0
@@ -330,7 +330,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct 
rte_mbuf *cookie)
        start_dp[idx].flags = VRING_DESC_F_NEXT;
        idx = start_dp[idx].next;
        start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
-       start_dp[idx].len   = cookie->pkt.data_len;
+       start_dp[idx].len   = cookie->data_len;
        start_dp[idx].flags = 0;
        idx = start_dp[idx].next;
        txvq->vq_desc_head_idx = idx;
@@ -363,7 +363,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct 
rte_mbuf **rx_pkts, uint
                        break;
                }
                rte_prefetch0(cookie);
-               rte_packet_prefetch(cookie->pkt.data);
+               rte_packet_prefetch(cookie->data);
                rx_pkts[i]  = cookie;
                vq->vq_used_cons_idx++;
                vq_ring_free_chain(vq, desc_idx);
diff --git a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c 
b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
index d91404a..60f26ba 100644
--- a/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
+++ b/lib/librte_pmd_vmxnet3/vmxnet3_rxtx.c
@@ -80,7 +80,7 @@


 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->data) - \
        (char *)(mb)->buf_addr))

 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
@@ -276,7 +276,7 @@ vmxnet3_xmit_pkts( void *tx_queue, struct rte_mbuf 
**tx_pkts,

                        txm = tx_pkts[nb_tx];
                        /* Don't support scatter packets yet, free them if met 
*/
-                       if (txm->pkt.nb_segs != 1) {
+                       if (txm->nb_segs != 1) {
                                PMD_TX_LOG(DEBUG, "Don't support scatter 
packets yet, drop!\n");
                                rte_pktmbuf_free(tx_pkts[nb_tx]);
                                txq->stats.drop_total++;
@@ -286,7 +286,7 @@ vmxnet3_xmit_pkts( void *tx_queue, struct rte_mbuf 
**tx_pkts,
                        }

                        /* Needs to minus ether header len */
-                       if(txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+                       if(txm->data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
                                PMD_TX_LOG(DEBUG, "Packet data_len higher than 
MTU\n");
                                rte_pktmbuf_free(tx_pkts[nb_tx]);
                                txq->stats.drop_total++;
@@ -301,7 +301,7 @@ vmxnet3_xmit_pkts( void *tx_queue, struct rte_mbuf 
**tx_pkts,
                        tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
                        tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
                        txd->addr = tbi->bufPA;
-                       txd->len = txm->pkt.data_len;
+                       txd->len = txm->data_len;

                        /* Mark the last descriptor as End of Packet. */
                        txd->cq = 1;
@@ -537,21 +537,21 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts)
                                        rte_pktmbuf_mtod(rxm, void *));
 #endif
                                //Copy vlan tag in packet buffer
-                               rxm->pkt.vlan_macip.f.vlan_tci =
+                               rxm->vlan_macip.f.vlan_tci =
                                        rte_le_to_cpu_16((uint16_t)rcd->tci);

                        } else
                                rxm->ol_flags = 0;

                        /* Initialize newly received packet buffer */
-                       rxm->pkt.in_port = rxq->port_id;
-                       rxm->pkt.nb_segs = 1;
-                       rxm->pkt.next = NULL;
-                       rxm->pkt.pkt_len = (uint16_t)rcd->len;
-                       rxm->pkt.data_len = (uint16_t)rcd->len;
-                       rxm->pkt.in_port = rxq->port_id;
-                       rxm->pkt.vlan_macip.f.vlan_tci = 0;
-                       rxm->pkt.data = (char *)rxm->buf_addr + 
RTE_PKTMBUF_HEADROOM;
+                       rxm->in_port = rxq->port_id;
+                       rxm->nb_segs = 1;
+                       rxm->next = NULL;
+                       rxm->pkt_len = (uint16_t)rcd->len;
+                       rxm->data_len = (uint16_t)rcd->len;
+                       rxm->in_port = rxq->port_id;
+                       rxm->vlan_macip.f.vlan_tci = 0;
+                       rxm->data = (char *)rxm->buf_addr + 
RTE_PKTMBUF_HEADROOM;

                        rx_pkts[nb_rx++] = rxm;

diff --git a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c 
b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
index 5cd1cdb..40108c4 100644
--- a/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
+++ b/lib/librte_pmd_xenvirt/rte_eth_xenvirt.c
@@ -108,12 +108,12 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, 
uint16_t nb_pkts)
        for (i = 0; i < num ; i ++) {
                rxm = rx_pkts[i];
                PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
-               rxm->pkt.next = NULL;
-               rxm->pkt.data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
-               rxm->pkt.data_len = (uint16_t)(len[i] - sizeof(struct 
virtio_net_hdr));
-               rxm->pkt.nb_segs = 1;
-               rxm->pkt.in_port = pi->port_id;
-               rxm->pkt.pkt_len  = (uint32_t)(len[i] - sizeof(struct 
virtio_net_hdr));
+               rxm->next = NULL;
+               rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rxm->data_len = (uint16_t)(len[i] - sizeof(struct 
virtio_net_hdr));
+               rxm->nb_segs = 1;
+               rxm->in_port = pi->port_id;
+               rxm->pkt_len  = (uint32_t)(len[i] - sizeof(struct 
virtio_net_hdr));
        }
        /* allocate new mbuf for the used descriptor */
        while (likely(!virtqueue_full(rxvq))) {
diff --git a/lib/librte_pmd_xenvirt/virtqueue.h 
b/lib/librte_pmd_xenvirt/virtqueue.h
index 3844448..f36030f 100644
--- a/lib/librte_pmd_xenvirt/virtqueue.h
+++ b/lib/librte_pmd_xenvirt/virtqueue.h
@@ -54,7 +54,7 @@
  * rather than gpa<->hva in virito spec.
  */
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
-       ((uint64_t)((mb)->pkt.data))
+       ((uint64_t)((mb)->data))

 enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };

@@ -238,7 +238,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct 
rte_mbuf *cookie)
        start_dp[idx].addr  = (uintptr_t)NULL;
        idx = start_dp[idx].next;
        start_dp[idx].addr  = RTE_MBUF_DATA_DMA_ADDR(cookie);
-       start_dp[idx].len   = cookie->pkt.data_len;
+       start_dp[idx].len   = cookie->data_len;
        start_dp[idx].flags = 0;
        idx = start_dp[idx].next;
        txvq->vq_desc_head_idx = idx;
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 24e8bdf..7f0bf67 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -1032,7 +1032,7 @@ rte_sched_port_update_subport_stats(struct rte_sched_port 
*port, uint32_t qindex
 {
        struct rte_sched_subport *s = port->subport + (qindex / 
rte_sched_port_queues_per_subport(port));
        uint32_t tc_index = (qindex >> 2) & 0x3;
-       uint32_t pkt_len = pkt->pkt.pkt_len;
+       uint32_t pkt_len = pkt->pkt_len;

        s->stats.n_pkts_tc[tc_index] += 1;
        s->stats.n_bytes_tc[tc_index] += pkt_len;
@@ -1043,7 +1043,7 @@ rte_sched_port_update_subport_stats_on_drop(struct 
rte_sched_port *port, uint32_
 {
        struct rte_sched_subport *s = port->subport + (qindex / 
rte_sched_port_queues_per_subport(port));
        uint32_t tc_index = (qindex >> 2) & 0x3;
-       uint32_t pkt_len = pkt->pkt.pkt_len;
+       uint32_t pkt_len = pkt->pkt_len;

        s->stats.n_pkts_tc_dropped[tc_index] += 1;
        s->stats.n_bytes_tc_dropped[tc_index] += pkt_len;
@@ -1053,7 +1053,7 @@ static inline void
 rte_sched_port_update_queue_stats(struct rte_sched_port *port, uint32_t 
qindex, struct rte_mbuf *pkt)
 {
        struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
-       uint32_t pkt_len = pkt->pkt.pkt_len;
+       uint32_t pkt_len = pkt->pkt_len;

        qe->stats.n_pkts += 1;
        qe->stats.n_bytes += pkt_len;
@@ -1063,7 +1063,7 @@ static inline void
 rte_sched_port_update_queue_stats_on_drop(struct rte_sched_port *port, 
uint32_t qindex, struct rte_mbuf *pkt)
 {
        struct rte_sched_queue_extra *qe = port->queue_extra + qindex;
-       uint32_t pkt_len = pkt->pkt.pkt_len;
+       uint32_t pkt_len = pkt->pkt_len;

        qe->stats.n_pkts_dropped += 1;
        qe->stats.n_bytes_dropped += pkt_len;
@@ -1580,7 +1580,7 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
        struct rte_sched_pipe *pipe = grinder->pipe;
        struct rte_mbuf *pkt = grinder->pkt;
        uint32_t tc_index = grinder->tc_index;
-       uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
        uint32_t subport_tb_credits = subport->tb_credits;
        uint32_t subport_tc_credits = subport->tc_credits[tc_index];
        uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1616,7 +1616,7 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
        struct rte_sched_pipe *pipe = grinder->pipe;
        struct rte_mbuf *pkt = grinder->pkt;
        uint32_t tc_index = grinder->tc_index;
-       uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
        uint32_t subport_tb_credits = subport->tb_credits;
        uint32_t subport_tc_credits = subport->tc_credits[tc_index];
        uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -1657,7 +1657,7 @@ grinder_schedule(struct rte_sched_port *port, uint32_t 
pos)
        struct rte_sched_grinder *grinder = port->grinder + pos;
        struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
        struct rte_mbuf *pkt = grinder->pkt;
-       uint32_t pkt_len = pkt->pkt.pkt_len + port->frame_overhead;
+       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;

 #if RTE_SCHED_TS_CREDITS_CHECK
        if (!grinder_credits_check(port, pos)) {
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 1c4ebc5..5c4c0cd 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -106,7 +106,7 @@ extern "C" {
    2. Start of Frame Delimiter (SFD):       1 byte;
    3. Frame Check Sequence (FCS):           4 bytes;
    4. Inter Frame Gap (IFG):               12 bytes.
-The FCS is considered overhead only if not included in the packet length 
(field pkt.pkt_len
+The FCS is considered overhead only if not included in the packet length 
(field pkt_len
 of struct rte_mbuf). */
 #ifndef RTE_SCHED_FRAME_OVERHEAD_DEFAULT
 #define RTE_SCHED_FRAME_OVERHEAD_DEFAULT      24
@@ -196,7 +196,7 @@ struct rte_sched_port_params {
 };

 /** Path through the scheduler hierarchy used by the scheduler enqueue 
operation to
-identify the destination queue for the current packet. Stored in the field 
pkt.hash.sched
+identify the destination queue for the current packet. Stored in the field 
hash.sched
 of struct rte_mbuf of each packet, typically written by the classification 
stage and read by 
 scheduler enqueue.*/
 struct rte_sched_port_hierarchy {
@@ -352,7 +352,7 @@ static inline void
 rte_sched_port_pkt_write(struct rte_mbuf *pkt, 
        uint32_t subport, uint32_t pipe, uint32_t traffic_class, uint32_t 
queue, enum rte_meter_color color)
 {
-       struct rte_sched_port_hierarchy *sched = (struct 
rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+       struct rte_sched_port_hierarchy *sched = (struct 
rte_sched_port_hierarchy *) &pkt->hash.sched;

        sched->color = (uint32_t) color;
        sched->subport = subport;
@@ -381,7 +381,7 @@ rte_sched_port_pkt_write(struct rte_mbuf *pkt,
 static inline void
 rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, uint32_t *subport, 
uint32_t *pipe, uint32_t *traffic_class, uint32_t *queue)
 {
-       struct rte_sched_port_hierarchy *sched = (struct 
rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+       struct rte_sched_port_hierarchy *sched = (struct 
rte_sched_port_hierarchy *) &pkt->hash.sched;

        *subport = sched->subport;
        *pipe = sched->pipe;
@@ -392,7 +392,7 @@ rte_sched_port_pkt_read_tree_path(struct rte_mbuf *pkt, 
uint32_t *subport, uint3
 static inline enum rte_meter_color
 rte_sched_port_pkt_read_color(struct rte_mbuf *pkt)
 {
-       struct rte_sched_port_hierarchy *sched = (struct 
rte_sched_port_hierarchy *) &pkt->pkt.hash.sched;
+       struct rte_sched_port_hierarchy *sched = (struct 
rte_sched_port_hierarchy *) &pkt->hash.sched;

        return (enum rte_meter_color) sched->color;
 }
-- 
1.9.2

Reply via email to