There is no room to add other offload flags in the current 16 bits
fields.  Since we have more room in the mbuf structure, we can change
the ol_flags to 32 bits.

A next commit will add the support of TSO (TCP Segmentation Offload)
which require a new ol_flags, justifying this commit.

Thanks to this modification, another possible improvement (which is not
part of this series) could be to change the checksum flags from:
  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD
to:
  PKT_RX_L4_CKSUM, PKT_RX_IP_CKSUM, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD
in order to detect if the checksum has been processed by hw or not.

Signed-off-by: Olivier Matz <olivier.matz at 6wind.com>
---
 app/test-pmd/cmdline.c                             | 13 +++-
 app/test-pmd/config.c                              | 10 +--
 app/test-pmd/csumonly.c                            | 26 ++++----
 app/test-pmd/rxonly.c                              |  4 +-
 app/test-pmd/testpmd.h                             | 11 +---
 app/test-pmd/txonly.c                              |  2 +-
 .../bsdapp/eal/include/exec-env/rte_kni_common.h   |  2 +-
 .../linuxapp/eal/include/exec-env/rte_kni_common.h |  2 +-
 lib/librte_mbuf/rte_mbuf.c                         |  2 +-
 lib/librte_mbuf/rte_mbuf.h                         | 52 +++++++--------
 lib/librte_pmd_e1000/em_rxtx.c                     | 35 +++++-----
 lib/librte_pmd_e1000/igb_rxtx.c                    | 71 ++++++++++----------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c                  | 77 +++++++++++-----------
 lib/librte_pmd_ixgbe/ixgbe_rxtx.h                  |  2 +-
 14 files changed, 157 insertions(+), 152 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index a51eee5..8f155e9 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -2257,8 +2257,17 @@ cmd_tx_cksum_set_parsed(void *parsed_result,
                       __attribute__((unused)) void *data)
 {
        struct cmd_tx_cksum_set_result *res = parsed_result;
-
-       tx_cksum_set(res->port_id, res->cksum_mask);
+       uint32_t ol_flags = 0;
+
+       if (res->cksum_mask & 0x1)
+               ol_flags |= PKT_TX_IP_CKSUM;
+       if (res->cksum_mask & 0x2)
+               ol_flags |= PKT_TX_TCP_CKSUM;
+       if (res->cksum_mask & 0x4)
+               ol_flags |= PKT_TX_UDP_CKSUM;
+       if (res->cksum_mask & 0x8)
+               ol_flags |= PKT_TX_SCTP_CKSUM;
+       tx_cksum_set(res->port_id, ol_flags);
 }

 cmdline_parse_token_string_t cmd_tx_cksum_set_tx_cksum =
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 20ad0a8..018a278 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -1532,14 +1532,16 @@ set_qmap(portid_t port_id, uint8_t is_rx, uint16_t 
queue_id, uint8_t map_value)
 }

 void
-tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
+tx_cksum_set(portid_t port_id, uint32_t ol_flags)
 {
-       uint16_t tx_ol_flags;
+       uint32_t cksum_mask = PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
+
        if (port_id_is_invalid(port_id))
                return;
+
        /* Clear last 4 bits and then set L3/4 checksum mask again */
-       tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
-       ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | 
tx_ol_flags);
+       ports[port_id].tx_ol_flags &= ~cksum_mask;
+       ports[port_id].tx_ol_flags |= (ol_flags & cksum_mask);
 }

 void
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 3313b87..69b90a7 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -217,9 +217,9 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
        uint16_t nb_rx;
        uint16_t nb_tx;
        uint16_t i;
-       uint16_t ol_flags;
-       uint16_t pkt_ol_flags;
-       uint16_t tx_ol_flags;
+       uint32_t ol_flags;
+       uint32_t pkt_ol_flags;
+       uint32_t tx_ol_flags;
        uint16_t l4_proto;
        uint16_t eth_type;
        uint8_t  l2_len;
@@ -261,7 +261,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                mb = pkts_burst[i];
                l2_len  = sizeof(struct ether_hdr);
                pkt_ol_flags = mb->ol_flags;
-               ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
+               ol_flags = (pkt_ol_flags & (~PKT_TX_L4_MASK));

                eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
                eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
@@ -274,8 +274,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                }

                /* Update the L3/L4 checksum error packet count  */
-               rx_bad_ip_csum += (uint16_t) ((pkt_ol_flags & 
PKT_RX_IP_CKSUM_BAD) != 0);
-               rx_bad_l4_csum += (uint16_t) ((pkt_ol_flags & 
PKT_RX_L4_CKSUM_BAD) != 0);
+               rx_bad_ip_csum += ((pkt_ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
+               rx_bad_l4_csum += ((pkt_ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);

                /*
                 * Try to figure out L3 packet type by SW.
@@ -308,7 +308,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                        /* Do not delete, this is required by HW*/
                        ipv4_hdr->hdr_checksum = 0;

-                       if (tx_ol_flags & 0x1) {
+                       if (tx_ol_flags & PKT_TX_IP_CKSUM) {
                                /* HW checksum */
                                ol_flags |= PKT_TX_IP_CKSUM;
                        }
@@ -321,7 +321,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                        if (l4_proto == IPPROTO_UDP) {
                                udp_hdr = (struct udp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);
-                               if (tx_ol_flags & 0x2) {
+                               if (tx_ol_flags & PKT_TX_UDP_CKSUM) {
                                        /* HW Offload */
                                        ol_flags |= PKT_TX_UDP_CKSUM;
                                        /* Pseudo header sum need be set 
properly */
@@ -337,7 +337,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                        else if (l4_proto == IPPROTO_TCP){
                                tcp_hdr = (struct tcp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);
-                               if (tx_ol_flags & 0x4) {
+                               if (tx_ol_flags & PKT_TX_TCP_CKSUM) {
                                        ol_flags |= PKT_TX_TCP_CKSUM;
                                        tcp_hdr->cksum = 
get_ipv4_psd_sum(ipv4_hdr);
                                }
@@ -351,7 +351,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                                sctp_hdr = (struct sctp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);

-                               if (tx_ol_flags & 0x8) {
+                               if (tx_ol_flags & PKT_TX_SCTP_CKSUM) {
                                        ol_flags |= PKT_TX_SCTP_CKSUM;
                                        sctp_hdr->cksum = 0;

@@ -377,7 +377,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                        if (l4_proto == IPPROTO_UDP) {
                                udp_hdr = (struct udp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);
-                               if (tx_ol_flags & 0x2) {
+                               if (tx_ol_flags & PKT_TX_UDP_CKSUM) {
                                        /* HW Offload */
                                        ol_flags |= PKT_TX_UDP_CKSUM;
                                        udp_hdr->dgram_cksum = 
get_ipv6_psd_sum(ipv6_hdr);
@@ -393,7 +393,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                        else if (l4_proto == IPPROTO_TCP) {
                                tcp_hdr = (struct tcp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);
-                               if (tx_ol_flags & 0x4) {
+                               if (tx_ol_flags & PKT_TX_TCP_CKSUM) {
                                        ol_flags |= PKT_TX_TCP_CKSUM;
                                        tcp_hdr->cksum = 
get_ipv6_psd_sum(ipv6_hdr);
                                }
@@ -407,7 +407,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
                                sctp_hdr = (struct sctp_hdr*) 
(rte_pktmbuf_mtod(mb,
                                                unsigned char *) + l2_len + 
l3_len);

-                               if (tx_ol_flags & 0x8) {
+                               if (tx_ol_flags & PKT_TX_SCTP_CKSUM) {
                                        ol_flags |= PKT_TX_SCTP_CKSUM;
                                        sctp_hdr->cksum = 0;
                                        /* Sanity check, only number of 4 bytes 
supported by HW */
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index 94f71c7..0bf4440 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -92,7 +92,7 @@ pkt_burst_receive(struct fwd_stream *fs)
        struct rte_mbuf  *mb;
        struct ether_hdr *eth_hdr;
        uint16_t eth_type;
-       uint16_t ol_flags;
+       uint32_t ol_flags;
        uint16_t nb_rx;
        uint16_t i;
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
@@ -152,7 +152,7 @@ pkt_burst_receive(struct fwd_stream *fs)
                                mb->vlan_macip.f.vlan_tci);
                printf("\n");
                if (ol_flags != 0) {
-                       uint16_t rxf;
+                       uint32_t rxf;
                        const char *name;

                        for (rxf = 0; rxf < sizeof(mb->ol_flags) * 8; rxf++) {
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index c80ea09..68eccfa 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -123,12 +123,7 @@ struct fwd_stream {

 /**
  * The data structure associated with each port.
- * tx_ol_flags is slightly different from ol_flags of rte_mbuf.
- *   Bit  0: Insert IP checksum
- *   Bit  1: Insert UDP checksum
- *   Bit  2: Insert TCP checksum
- *   Bit  3: Insert SCTP checksum
- *   Bit 11: Insert VLAN Label
+ * tx_ol_flags use the same flags than ol_flags of rte_mbuf.
  */
 struct rte_port {
        struct rte_eth_dev_info dev_info;   /**< PCI info + driver name */
@@ -139,7 +134,7 @@ struct rte_port {
        struct fwd_stream       *rx_stream; /**< Port RX stream, if unique */
        struct fwd_stream       *tx_stream; /**< Port TX stream, if unique */
        unsigned int            socket_id;  /**< For NUMA support */
-       uint16_t                tx_ol_flags;/**< Offload Flags of TX packets. */
+       uint32_t                tx_ol_flags;/**< Offload Flags of TX packets. */
        uint16_t                tx_vlan_id; /**< Tag Id. in TX VLAN packets. */
        void                    *fwd_ctx;   /**< Forwarding mode context */
        uint64_t                rx_bad_ip_csum; /**< rx pkts with bad ip 
checksum  */
@@ -490,7 +485,7 @@ void tx_vlan_reset(portid_t port_id);

 void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t 
map_value);

-void tx_cksum_set(portid_t port_id, uint8_t cksum_mask);
+void tx_cksum_set(portid_t port_id, uint32_t ol_flags);

 void set_verbose_level(uint16_t vb_level);
 void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs);
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
index 9d6cffb..b2d8dbd 100644
--- a/app/test-pmd/txonly.c
+++ b/app/test-pmd/txonly.c
@@ -203,7 +203,7 @@ pkt_burst_transmit(struct fwd_stream *fs)
        uint16_t nb_tx;
        uint16_t nb_pkt;
        uint16_t vlan_tci;
-       uint16_t ol_flags;
+       uint32_t ol_flags;
        uint8_t  i;
 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
        uint64_t start_tsc;
diff --git a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h 
b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h
index e13afb2..66a32fe 100755
--- a/lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/bsdapp/eal/include/exec-env/rte_kni_common.h
@@ -111,7 +111,7 @@ struct rte_kni_mbuf {
        void *pool;
        void *buf_addr;
        char pad0[14];
-       uint16_t ol_flags;      /**< Offload features. */
+       uint32_t ol_flags;      /**< Offload features. */
        void *next;
        void *data;             /**< Start address of data in segment buffer. */
        uint16_t data_len;      /**< Amount of data in segment buffer. */
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h 
b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index e13afb2..66a32fe 100755
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -111,7 +111,7 @@ struct rte_kni_mbuf {
        void *pool;
        void *buf_addr;
        char pad0[14];
-       uint16_t ol_flags;      /**< Offload features. */
+       uint32_t ol_flags;      /**< Offload features. */
        void *next;
        void *data;             /**< Start address of data in segment buffer. */
        uint16_t data_len;      /**< Amount of data in segment buffer. */
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index e791aa6..5519c76 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -161,7 +161,7 @@ rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, 
unsigned dump_len)

        fprintf(f, "dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
               m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
-       fprintf(f, "  pkt_len=%"PRIu32", ol_flags=%"PRIx16", nb_segs=%u, "
+       fprintf(f, "  pkt_len=%"PRIu32", ol_flags=%"PRIx32", nb_segs=%u, "
               "in_port=%u\n", m->pkt_len, m->ol_flags,
               (unsigned)m->nb_segs, (unsigned)m->in_port);
        nb_segs = m->nb_segs;
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 7556a86..540a62c 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -69,34 +69,33 @@ extern "C" {
  * Packet Offload Features Flags. It also carry packet type information.
  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
  */
-#define PKT_RX_VLAN_PKT      0x0001 /**< RX packet is a 802.1q VLAN packet. */
-#define PKT_RX_RSS_HASH      0x0002 /**< RX packet with RSS hash result. */
-#define PKT_RX_FDIR          0x0004 /**< RX packet with FDIR infos. */
-#define PKT_RX_L4_CKSUM_BAD  0x0008 /**< L4 cksum of RX pkt. is not OK. */
-#define PKT_RX_IP_CKSUM_BAD  0x0010 /**< IP cksum of RX pkt. is not OK. */
-#define PKT_RX_IPV4_HDR      0x0020 /**< RX packet with IPv4 header. */
-#define PKT_RX_IPV4_HDR_EXT  0x0040 /**< RX packet with extended IPv4 header. 
*/
-#define PKT_RX_IPV6_HDR      0x0080 /**< RX packet with IPv6 header. */
-#define PKT_RX_IPV6_HDR_EXT  0x0100 /**< RX packet with extended IPv6 header. 
*/
-#define PKT_RX_IEEE1588_PTP  0x0200 /**< RX IEEE1588 L2 Ethernet PT Packet. */
-#define PKT_RX_IEEE1588_TMST 0x0400 /**< RX IEEE1588 L2/L4 timestamped 
packet.*/
-
-#define PKT_TX_VLAN_PKT      0x0800 /**< TX packet is a 802.1q VLAN packet. */
-#define PKT_TX_IP_CKSUM      0x1000 /**< IP cksum of TX pkt. computed by NIC. 
*/
+#define PKT_RX_VLAN_PKT      0x00000001 /**< RX packet is a 802.1q VLAN 
packet. */
+#define PKT_RX_RSS_HASH      0x00000002 /**< RX packet with RSS hash result. */
+#define PKT_RX_FDIR          0x00000004 /**< RX packet with FDIR infos. */
+#define PKT_RX_L4_CKSUM_BAD  0x00000008 /**< L4 cksum of RX pkt. is not OK. */
+#define PKT_RX_IP_CKSUM_BAD  0x00000010 /**< IP cksum of RX pkt. is not OK. */
+#define PKT_RX_IPV4_HDR      0x00000020 /**< RX packet with IPv4 header. */
+#define PKT_RX_IPV4_HDR_EXT  0x00000040 /**< RX packet with extended IPv4 
header. */
+#define PKT_RX_IPV6_HDR      0x00000080 /**< RX packet with IPv6 header. */
+#define PKT_RX_IPV6_HDR_EXT  0x00000100 /**< RX packet with extended IPv6 
header. */
+#define PKT_RX_IEEE1588_PTP  0x00000200 /**< RX IEEE1588 L2 Ethernet PT 
Packet. */
+#define PKT_RX_IEEE1588_TMST 0x00000400 /**< RX IEEE1588 L2/L4 timestamped 
packet.*/
+
+#define PKT_TX_VLAN_PKT      0x00010000 /**< TX packet is a 802.1q VLAN 
packet. */
+#define PKT_TX_IP_CKSUM      0x00020000 /**< IP cksum of TX pkt. computed by 
NIC. */
 /*
- * Bit 14~13 used for L4 packet type with checksum enabled.
+ * Bits used for L4 packet type with checksum enabled.
  *     00: Reserved
  *     01: TCP checksum
  *     10: SCTP checksum
  *     11: UDP checksum
  */
-#define PKT_TX_L4_MASK       0x6000 /**< Mask bits for L4 checksum offload 
request. */
-#define PKT_TX_L4_NO_CKSUM   0x0000 /**< Disable L4 cksum of TX pkt. */
-#define PKT_TX_TCP_CKSUM     0x2000 /**< TCP cksum of TX pkt. computed by NIC. 
*/
-#define PKT_TX_SCTP_CKSUM    0x4000 /**< SCTP cksum of TX pkt. computed by 
NIC. */
-#define PKT_TX_UDP_CKSUM     0x6000 /**< UDP cksum of TX pkt. computed by NIC. 
*/
-/* Bit 15 */
-#define PKT_TX_IEEE1588_TMST 0x8000 /**< TX IEEE1588 packet to timestamp. */
+#define PKT_TX_L4_MASK       0x000C0000 /**< Mask bits for L4 checksum offload 
request. */
+#define PKT_TX_L4_NO_CKSUM   0x00000000 /**< Disable L4 cksum of TX pkt. */
+#define PKT_TX_TCP_CKSUM     0x00040000 /**< TCP cksum of TX pkt. computed by 
NIC. */
+#define PKT_TX_SCTP_CKSUM    0x00080000 /**< SCTP cksum of TX pkt. computed by 
NIC. */
+#define PKT_TX_UDP_CKSUM     0x000C0000 /**< UDP cksum of TX pkt. computed by 
NIC. */
+#define PKT_TX_IEEE1588_TMST 0x00100000 /**< TX IEEE1588 packet to timestamp. 
*/

 /**
  * Get the name of a RX offload flag
@@ -108,7 +107,7 @@ extern "C" {
  * @return
  *   The name of this flag, or NULL if it's not a valid RX flag.
  */
-static inline const char *rte_get_rx_ol_flag_name(uint16_t mask)
+static inline const char *rte_get_rx_ol_flag_name(uint32_t mask)
 {
        switch (mask) {
        case PKT_RX_VLAN_PKT: return "PKT_RX_VLAN_PKT";
@@ -136,7 +135,7 @@ static inline const char *rte_get_rx_ol_flag_name(uint16_t 
mask)
  * @return
  *   The name of this flag, or NULL if it's not a valid TX flag.
  */
-static inline const char *rte_get_tx_ol_flag_name(uint16_t mask)
+static inline const char *rte_get_tx_ol_flag_name(uint32_t mask)
 {
        switch (mask) {
        case PKT_TX_VLAN_PKT: return "PKT_TX_VLAN_PKT";
@@ -205,8 +204,7 @@ struct rte_mbuf {
        /* these fields are valid for first segment only */
        uint8_t nb_segs;          /**< Number of segments. */
        uint8_t in_port;          /**< Input port. */
-       uint16_t ol_flags;        /**< Offload features. */
-       uint16_t reserved;        /**< Unused field. Required for padding. */
+       uint32_t ol_flags;        /**< Offload features. */

        /* offload features, valid for first segment only */
        union rte_vlan_macip vlan_macip;
@@ -218,7 +216,7 @@ struct rte_mbuf {
                } fdir;           /**< Filter identifier if FDIR enabled */
                uint32_t sched;   /**< Hierarchical scheduler */
        } hash;                   /**< hash information */
-       uint64_t reserved2;       /**< Unused field. Required for padding. */
+       uint64_t reserved;        /**< Unused field. Required for padding. */
 } __rte_cache_aligned;

 /**
diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c
index 8c302b6..1a34340 100644
--- a/lib/librte_pmd_e1000/em_rxtx.c
+++ b/lib/librte_pmd_e1000/em_rxtx.c
@@ -147,7 +147,7 @@ enum {
  * Structure to check if new context need be built
  */
 struct em_ctx_info {
-       uint16_t flags;               /**< ol_flags related to context build. */
+       uint32_t flags;               /**< ol_flags related to context build. */
        uint32_t cmp_mask;            /**< compare mask */
        union rte_vlan_macip hdrlen;  /**< L2 and L3 header lenghts */
 };
@@ -217,7 +217,7 @@ struct em_tx_queue {
 static inline void
 em_set_xmit_ctx(struct em_tx_queue* txq,
                volatile struct e1000_context_desc *ctx_txd,
-               uint16_t flags,
+               uint32_t flags,
                union rte_vlan_macip hdrlen)
 {
        uint32_t cmp_mask, cmd_len;
@@ -283,7 +283,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_ctx_update(struct em_tx_queue *txq, uint16_t flags,
+what_ctx_update(struct em_tx_queue *txq, uint32_t flags,
                union rte_vlan_macip hdrlen)
 {
        /* If match with the current context */
@@ -356,7 +356,7 @@ em_xmit_cleanup(struct em_tx_queue *txq)
 }

 static inline uint32_t
-tx_desc_cksum_flags_to_upper(uint16_t ol_flags)
+tx_desc_cksum_flags_to_upper(uint32_t ol_flags)
 {
        static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
        static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
@@ -382,12 +382,12 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        uint32_t popts_spec;
        uint32_t cmd_type_len;
        uint16_t slen;
-       uint16_t ol_flags;
+       uint32_t ol_flags;
        uint16_t tx_id;
        uint16_t tx_last;
        uint16_t nb_tx;
        uint16_t nb_used;
-       uint16_t tx_ol_req;
+       uint32_t tx_ol_req;
        uint32_t ctx;
        uint32_t new_ctx;
        union rte_vlan_macip hdrlen;
@@ -417,8 +417,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                ol_flags = tx_pkt->ol_flags;

                /* If hardware offload required */
-               tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
-                                                       PKT_TX_L4_MASK));
+               tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
                if (tx_ol_req) {
                        hdrlen = tx_pkt->vlan_macip;
                        /* If new context to be built or reuse the exist ctx. */
@@ -620,22 +619,22 @@ end_of_tx:
  *
  **********************************************************************/

-static inline uint16_t
+static inline uint32_t
 rx_desc_status_to_pkt_flags(uint32_t rx_status)
 {
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        /* Check if VLAN present */
-       pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
-                                               PKT_RX_VLAN_PKT : 0);
+       pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+               PKT_RX_VLAN_PKT : 0);

        return pkt_flags;
 }

-static inline uint16_t
+static inline uint32_t
 rx_desc_error_to_pkt_flags(uint32_t rx_error)
 {
-       uint16_t pkt_flags = 0;
+       uint32_t pkt_flags = 0;

        if (rx_error & E1000_RXD_ERR_IPE)
                pkt_flags |= PKT_RX_IP_CKSUM_BAD;
@@ -779,8 +778,8 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                rxm->in_port = rxq->port_id;

                rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
-               rxm->ol_flags = (uint16_t)(rxm->ol_flags |
-                               rx_desc_error_to_pkt_flags(rxd.errors));
+               rxm->ol_flags = rxm->ol_flags |
+                       rx_desc_error_to_pkt_flags(rxd.errors);

                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
                rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
@@ -1005,8 +1004,8 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                first_seg->in_port = rxq->port_id;

                first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
-               first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
-                                       rx_desc_error_to_pkt_flags(rxd.errors));
+               first_seg->ol_flags = first_seg->ol_flags |
+                       rx_desc_error_to_pkt_flags(rxd.errors);

                /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
                rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
diff --git a/lib/librte_pmd_e1000/igb_rxtx.c b/lib/librte_pmd_e1000/igb_rxtx.c
index ab0ff01..322dfa0 100644
--- a/lib/librte_pmd_e1000/igb_rxtx.c
+++ b/lib/librte_pmd_e1000/igb_rxtx.c
@@ -144,7 +144,7 @@ enum igb_advctx_num {
  * Strucutre to check if new context need be built
  */
 struct igb_advctx_info {
-       uint16_t flags;           /**< ol_flags related to context build. */
+       uint32_t flags;           /**< ol_flags related to context build. */
        uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
        union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
 };
@@ -212,7 +212,7 @@ struct igb_tx_queue {
 static inline void
 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
                volatile struct e1000_adv_tx_context_desc *ctx_txd,
-               uint16_t ol_flags, uint32_t vlan_macip_lens)
+               uint32_t ol_flags, uint32_t vlan_macip_lens)
 {
        uint32_t type_tucmd_mlhl;
        uint32_t mss_l4len_idx;
@@ -277,7 +277,7 @@ igbe_set_xmit_ctx(struct igb_tx_queue* txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
+what_advctx_update(struct igb_tx_queue *txq, uint32_t flags,
                uint32_t vlan_macip_lens)
 {
        /* If match with the current context */
@@ -300,7 +300,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
 }

 static inline uint32_t
-tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
+tx_desc_cksum_flags_to_olinfo(uint32_t ol_flags)
 {
        static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
        static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
@@ -312,7 +312,7 @@ tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
 }

 static inline uint32_t
-tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
+tx_desc_vlan_flags_to_cmdtype(uint32_t ol_flags)
 {
        static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
        return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
@@ -334,12 +334,12 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
        uint32_t cmd_type_len;
        uint32_t pkt_len;
        uint16_t slen;
-       uint16_t ol_flags;
+       uint32_t ol_flags;
        uint16_t tx_end;
        uint16_t tx_id;
        uint16_t tx_last;
        uint16_t nb_tx;
-       uint16_t tx_ol_req;
+       uint32_t tx_ol_req;
        uint32_t new_ctx = 0;
        uint32_t ctx = 0;
        uint32_t vlan_macip_lens;
@@ -368,7 +368,8 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,

                ol_flags = tx_pkt->ol_flags;
                vlan_macip_lens = tx_pkt->vlan_macip.data;
-               tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
+               tx_ol_req = ol_flags &
+                       (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK);

                /* If a Context Descriptor need be built . */
                if (tx_ol_req) {
@@ -555,12 +556,12 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
  *  RX functions
  *
  **********************************************************************/
-static inline uint16_t
+static inline uint32_t
 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
 {
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

-       static uint16_t ip_pkt_types_map[16] = {
+       static uint32_t ip_pkt_types_map[16] = {
                0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
                PKT_RX_IPV6_HDR, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
@@ -573,34 +574,34 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
                0, 0, 0, 0,
        };

-       pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
-                               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
-                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
+       pkt_flags = ((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
+               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
 #else
-       pkt_flags = (uint16_t)((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
-                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
+       pkt_flags = ((hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
+               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
 #endif
-       return (uint16_t)(pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
-                                               0 : PKT_RX_RSS_HASH));
+       return (pkt_flags | (((hl_tp_rs & 0x0F) == 0) ?
+                       0 : PKT_RX_RSS_HASH));
 }

-static inline uint16_t
+static inline uint32_t
 rx_desc_status_to_pkt_flags(uint32_t rx_status)
 {
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        /* Check if VLAN present */
-       pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
-                                               PKT_RX_VLAN_PKT : 0);
+       pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+               PKT_RX_VLAN_PKT : 0);

 #if defined(RTE_LIBRTE_IEEE1588)
        if (rx_status & E1000_RXD_STAT_TMST)
-               pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
+               pkt_flags = (pkt_flags | PKT_RX_IEEE1588_TMST);
 #endif
        return pkt_flags;
 }

-static inline uint16_t
+static inline uint32_t
 rx_desc_error_to_pkt_flags(uint32_t rx_status)
 {
        /*
@@ -608,7 +609,7 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
         * Bit 29: L4I, L4I integrity error
         */

-       static uint16_t error_to_pkt_flags_map[4] = {
+       static uint32_t error_to_pkt_flags_map[4] = {
                0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
                PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
        };
@@ -635,7 +636,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        uint16_t rx_id;
        uint16_t nb_rx;
        uint16_t nb_hold;
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        nb_rx = 0;
        nb_hold = 0;
@@ -755,10 +756,10 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                        rte_le_to_cpu_16(rxd.wb.upper.vlan);

                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_status_to_pkt_flags(staterr));
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_error_to_pkt_flags(staterr));
+               pkt_flags = pkt_flags |
+                       rx_desc_status_to_pkt_flags(staterr);
+               pkt_flags = pkt_flags |
+                       rx_desc_error_to_pkt_flags(staterr);
                rxm->ol_flags = pkt_flags;

                /*
@@ -815,7 +816,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
        uint16_t nb_rx;
        uint16_t nb_hold;
        uint16_t data_len;
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        nb_rx = 0;
        nb_hold = 0;
@@ -992,10 +993,10 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                        rte_le_to_cpu_16(rxd.wb.upper.vlan);
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_status_to_pkt_flags(staterr));
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_error_to_pkt_flags(staterr));
+               pkt_flags = pkt_flags |
+                       rx_desc_status_to_pkt_flags(staterr);
+               pkt_flags = pkt_flags |
+                       rx_desc_error_to_pkt_flags(staterr);
                first_seg->ol_flags = pkt_flags;

                /* Prefetch data of first segment, if configured to do so. */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index acde64b..0ff1a07 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -350,7 +350,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 static inline void
 ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
                volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
-               uint16_t ol_flags, uint32_t vlan_macip_lens)
+               uint32_t ol_flags, uint32_t vlan_macip_lens)
 {
        uint32_t type_tucmd_mlhl;
        uint32_t mss_l4len_idx;
@@ -413,7 +413,7 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
  * or create a new context descriptor.
  */
 static inline uint32_t
-what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
+what_advctx_update(struct igb_tx_queue *txq, uint32_t flags,
                uint32_t vlan_macip_lens)
 {
        /* If match with the current used context */
@@ -436,7 +436,7 @@ what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
 }

 static inline uint32_t
-tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
+tx_desc_cksum_flags_to_olinfo(uint32_t ol_flags)
 {
        static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM};
        static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM};
@@ -448,7 +448,7 @@ tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
 }

 static inline uint32_t
-tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
+tx_desc_vlan_flags_to_cmdtype(uint32_t ol_flags)
 {
        static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE};
        return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
@@ -537,12 +537,12 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        uint32_t cmd_type_len;
        uint32_t pkt_len;
        uint16_t slen;
-       uint16_t ol_flags;
+       uint32_t ol_flags;
        uint16_t tx_id;
        uint16_t tx_last;
        uint16_t nb_tx;
        uint16_t nb_used;
-       uint16_t tx_ol_req;
+       uint32_t tx_ol_req;
        uint32_t vlan_macip_lens;
        uint32_t ctx = 0;
        uint32_t new_ctx;
@@ -574,7 +574,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                vlan_macip_lens = tx_pkt->vlan_macip.data;

                /* If hardware offload required */
-               tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
+               tx_ol_req = ol_flags &
+                       (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK);
                if (tx_ol_req) {
                        /* If new context need be built or reuse the exist ctx. 
*/
                        ctx = what_advctx_update(txq, tx_ol_req,
@@ -804,19 +805,19 @@ end_of_tx:
  *  RX functions
  *
  **********************************************************************/
-static inline uint16_t
+static inline uint32_t
 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
 {
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

-       static uint16_t ip_pkt_types_map[16] = {
+       static uint32_t ip_pkt_types_map[16] = {
                0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
                PKT_RX_IPV6_HDR, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
                PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
        };

-       static uint16_t ip_rss_types_map[16] = {
+       static uint32_t ip_rss_types_map[16] = {
                0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
                0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
                PKT_RX_RSS_HASH, 0, 0, 0,
@@ -829,45 +830,45 @@ rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
                0, 0, 0, 0,
        };

-       pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
-                               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
-                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
+       pkt_flags = ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
+               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
 #else
-       pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
-                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
+       pkt_flags = ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
+               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);

 #endif
-       return (uint16_t)(pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
+       return (pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
 }

-static inline uint16_t
+static inline uint32_t
 rx_desc_status_to_pkt_flags(uint32_t rx_status)
 {
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        /*
         * Check if VLAN present only.
         * Do not check whether L3/L4 rx checksum done by NIC or not,
         * That can be found from rte_eth_rxmode.hw_ip_checksum flag
         */
-       pkt_flags = (uint16_t)((rx_status & IXGBE_RXD_STAT_VP) ?
-                                               PKT_RX_VLAN_PKT : 0);
+       pkt_flags = ((rx_status & IXGBE_RXD_STAT_VP) ?
+               PKT_RX_VLAN_PKT : 0);

 #ifdef RTE_LIBRTE_IEEE1588
        if (rx_status & IXGBE_RXD_STAT_TMST)
-               pkt_flags = (uint16_t)(pkt_flags | PKT_RX_IEEE1588_TMST);
+               pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
 #endif
        return pkt_flags;
 }

-static inline uint16_t
+static inline uint32_t
 rx_desc_error_to_pkt_flags(uint32_t rx_status)
 {
        /*
         * Bit 31: IPE, IPv4 checksum error
         * Bit 30: L4I, L4I integrity error
         */
-       static uint16_t error_to_pkt_flags_map[4] = {
+       static uint32_t error_to_pkt_flags_map[4] = {
                0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
                PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
        };
@@ -938,10 +939,10 @@ ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
                        mb->ol_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
                                        rxdp[j].wb.lower.lo_dword.data);
                        /* reuse status field from scan list */
-                       mb->ol_flags = (uint16_t)(mb->ol_flags |
-                                       rx_desc_status_to_pkt_flags(s[j]));
-                       mb->ol_flags = (uint16_t)(mb->ol_flags |
-                                       rx_desc_error_to_pkt_flags(s[j]));
+                       mb->ol_flags = mb->ol_flags |
+                               rx_desc_status_to_pkt_flags(s[j]);
+                       mb->ol_flags = mb->ol_flags |
+                               rx_desc_error_to_pkt_flags(s[j]);
                }

                /* Move mbuf pointers from the S/W ring to the stage */
@@ -1134,7 +1135,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        uint16_t rx_id;
        uint16_t nb_rx;
        uint16_t nb_hold;
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        nb_rx = 0;
        nb_hold = 0;
@@ -1253,10 +1254,10 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                        rte_le_to_cpu_16(rxd.wb.upper.vlan);

                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_status_to_pkt_flags(staterr));
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_error_to_pkt_flags(staterr));
+               pkt_flags = pkt_flags |
+                       rx_desc_status_to_pkt_flags(staterr);
+               pkt_flags = pkt_flags |
+                       rx_desc_error_to_pkt_flags(staterr);
                rxm->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
@@ -1321,7 +1322,7 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
        uint16_t nb_rx;
        uint16_t nb_hold;
        uint16_t data_len;
-       uint16_t pkt_flags;
+       uint32_t pkt_flags;

        nb_rx = 0;
        nb_hold = 0;
@@ -1498,10 +1499,10 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
rte_mbuf **rx_pkts,
                                rte_le_to_cpu_16(rxd.wb.upper.vlan);
                hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
                pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_status_to_pkt_flags(staterr));
-               pkt_flags = (uint16_t)(pkt_flags |
-                               rx_desc_error_to_pkt_flags(staterr));
+               pkt_flags = pkt_flags |
+                       rx_desc_status_to_pkt_flags(staterr);
+               pkt_flags = pkt_flags |
+                       rx_desc_error_to_pkt_flags(staterr);
                first_seg->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
index 8cf66bf..75f8239 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.h
@@ -150,7 +150,7 @@ enum ixgbe_advctx_num {
  */

 struct ixgbe_advctx_info {
-       uint16_t flags;           /**< ol_flags for context build. */
+       uint32_t flags;           /**< ol_flags for context build. */
        uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
        union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
 };
-- 
1.9.2

Reply via email to