Replace the use of RTE_MARKER<x> with C11 anonymous unions to improve
code portability between toolchains.

Update use of rte_mbuf rearm_data field in net/ionic, net/sfc and
net/virtio which were accessing field as a zero-length array.

Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
---
 drivers/net/ionic/ionic_lif.c               |   8 +-
 drivers/net/ionic/ionic_rxtx_sg.c           |   4 +-
 drivers/net/ionic/ionic_rxtx_simple.c       |   2 +-
 drivers/net/sfc/sfc_ef100_rx.c              |   8 +-
 drivers/net/sfc/sfc_ef10_rx.c               |  12 +--
 drivers/net/virtio/virtio_rxtx_packed_avx.h |   8 +-
 lib/mbuf/rte_mbuf_core.h                    | 135 +++++++++++++++-------------
 7 files changed, 94 insertions(+), 83 deletions(-)

diff --git a/drivers/net/ionic/ionic_lif.c b/drivers/net/ionic/ionic_lif.c
index 25b490d..fd99f39 100644
--- a/drivers/net/ionic/ionic_lif.c
+++ b/drivers/net/ionic/ionic_lif.c
@@ -725,8 +725,8 @@
 
        rte_compiler_barrier();
 
-       RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
-       return rxm.rearm_data[0];
+       RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data) != sizeof(uint64_t));
+       return rxm.rearm_data;
 }
 
 static uint64_t
@@ -743,8 +743,8 @@
 
        rte_compiler_barrier();
 
-       RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data[0]) != sizeof(uint64_t));
-       return rxm.rearm_data[0];
+       RTE_BUILD_BUG_ON(sizeof(rxm.rearm_data) != sizeof(uint64_t));
+       return rxm.rearm_data;
 }
 
 int
diff --git a/drivers/net/ionic/ionic_rxtx_sg.c 
b/drivers/net/ionic/ionic_rxtx_sg.c
index ab8e56e..a569dd1 100644
--- a/drivers/net/ionic/ionic_rxtx_sg.c
+++ b/drivers/net/ionic/ionic_rxtx_sg.c
@@ -285,7 +285,7 @@
        info[0] = NULL;
 
        /* Set the mbuf metadata based on the cq entry */
-       rxm->rearm_data[0] = rxq->rearm_data;
+       rxm->rearm_data = rxq->rearm_data;
        rxm->pkt_len = cq_desc_len;
        rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
        left = cq_desc_len - rxm->data_len;
@@ -298,7 +298,7 @@
                info[i] = NULL;
 
                /* Set the chained mbuf metadata */
-               rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
+               rxm_seg->rearm_data = rxq->rearm_seg_data;
                rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
                left -= rxm_seg->data_len;
 
diff --git a/drivers/net/ionic/ionic_rxtx_simple.c 
b/drivers/net/ionic/ionic_rxtx_simple.c
index 5f81856..1978610 100644
--- a/drivers/net/ionic/ionic_rxtx_simple.c
+++ b/drivers/net/ionic/ionic_rxtx_simple.c
@@ -256,7 +256,7 @@
        info[0] = NULL;
 
        /* Set the mbuf metadata based on the cq entry */
-       rxm->rearm_data[0] = rxq->rearm_data;
+       rxm->rearm_data = rxq->rearm_data;
        rxm->pkt_len = cq_desc_len;
        rxm->data_len = cq_desc_len;
 
diff --git a/drivers/net/sfc/sfc_ef100_rx.c b/drivers/net/sfc/sfc_ef100_rx.c
index 2677003..23918d5 100644
--- a/drivers/net/sfc/sfc_ef100_rx.c
+++ b/drivers/net/sfc/sfc_ef100_rx.c
@@ -553,9 +553,9 @@ struct sfc_ef100_rxq {
                pkt = sfc_ef100_rx_next_mbuf(rxq);
                __rte_mbuf_raw_sanity_check(pkt);
 
-               RTE_BUILD_BUG_ON(sizeof(pkt->rearm_data[0]) !=
+               RTE_BUILD_BUG_ON(sizeof(pkt->rearm_data) !=
                                 sizeof(rxq->rearm_data));
-               pkt->rearm_data[0] = rxq->rearm_data;
+               pkt->rearm_data = rxq->rearm_data;
 
                /* data_off already moved past Rx prefix */
                rx_prefix = (const efx_xword_t *)sfc_ef100_rx_pkt_prefix(pkt);
@@ -759,8 +759,8 @@ struct sfc_ef100_rxq {
 
        /* rearm_data covers structure members filled in above */
        rte_compiler_barrier();
-       RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
-       return m.rearm_data[0];
+       RTE_BUILD_BUG_ON(sizeof(m.rearm_data) != sizeof(uint64_t));
+       return m.rearm_data;
 }
 
 static sfc_dp_rx_qcreate_t sfc_ef100_rx_qcreate;
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 30a320d..60bc098 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -322,8 +322,8 @@ struct sfc_ef10_rxq {
 
        m = rxd->mbuf;
 
-       RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
-       m->rearm_data[0] = rxq->rearm_data;
+       RTE_BUILD_BUG_ON(sizeof(m->rearm_data) != sizeof(rxq->rearm_data));
+       m->rearm_data = rxq->rearm_data;
 
        /* Classify packet based on Rx event */
        /* Mask RSS hash offload flag if RSS is not enabled */
@@ -377,9 +377,9 @@ struct sfc_ef10_rxq {
                        rxq->completed = pending;
                }
 
-               RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
+               RTE_BUILD_BUG_ON(sizeof(m->rearm_data) !=
                                 sizeof(rxq->rearm_data));
-               m->rearm_data[0] = rxq->rearm_data;
+               m->rearm_data = rxq->rearm_data;
 
                /* Event-dependent information is the same */
                m->ol_flags = m0->ol_flags;
@@ -633,8 +633,8 @@ struct sfc_ef10_rxq {
 
        /* rearm_data covers structure members filled in above */
        rte_compiler_barrier();
-       RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
-       return m.rearm_data[0];
+       RTE_BUILD_BUG_ON(sizeof(m.rearm_data) != sizeof(uint64_t));
+       return m.rearm_data;
 }
 
 static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.h 
b/drivers/net/virtio/virtio_rxtx_packed_avx.h
index 584ac72..a9ce53f 100644
--- a/drivers/net/virtio/virtio_rxtx_packed_avx.h
+++ b/drivers/net/virtio/virtio_rxtx_packed_avx.h
@@ -36,10 +36,10 @@
        /* Load four mbufs rearm data */
        RTE_BUILD_BUG_ON(REFCNT_BITS_OFFSET >= 64);
        RTE_BUILD_BUG_ON(SEG_NUM_BITS_OFFSET >= 64);
-       __m256i mbufs = _mm256_set_epi64x(*tx_pkts[3]->rearm_data,
-                                         *tx_pkts[2]->rearm_data,
-                                         *tx_pkts[1]->rearm_data,
-                                         *tx_pkts[0]->rearm_data);
+       __m256i mbufs = _mm256_set_epi64x(tx_pkts[3]->rearm_data,
+                                         tx_pkts[2]->rearm_data,
+                                         tx_pkts[1]->rearm_data,
+                                         tx_pkts[0]->rearm_data);
 
        /* refcnt=1 and nb_segs=1 */
        __m256i mbuf_ref = _mm256_set1_epi64x(DEFAULT_REARM_DATA);
diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h
index 5688683..d731ea0 100644
--- a/lib/mbuf/rte_mbuf_core.h
+++ b/lib/mbuf/rte_mbuf_core.h
@@ -464,9 +464,10 @@ enum {
  * The generic rte_mbuf, containing a packet mbuf.
  */
 struct rte_mbuf {
-       RTE_MARKER cacheline0;
-
-       void *buf_addr;           /**< Virtual address of segment buffer. */
+       union {
+           void *cacheline0;
+           void *buf_addr;           /**< Virtual address of segment buffer. */
+       };
 #if RTE_IOVA_IN_MBUF
        /**
         * Physical address of segment buffer.
@@ -487,69 +488,77 @@ struct rte_mbuf {
 #endif
 
        /* next 8 bytes are initialised on RX descriptor rearm */
-       RTE_MARKER64 rearm_data;
-       uint16_t data_off;
-
-       /**
-        * Reference counter. Its size should at least equal to the size
-        * of port field (16 bits), to support zero-copy broadcast.
-        * It should only be accessed using the following functions:
-        * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
-        * rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
-        * or non-atomic) is controlled by the RTE_MBUF_REFCNT_ATOMIC flag.
-        */
-       RTE_ATOMIC(uint16_t) refcnt;
+       union {
+               uint64_t rearm_data;
+               struct {
+                       uint16_t data_off;
+
+                       /**
+                        * Reference counter. Its size should at least equal to 
the size
+                        * of port field (16 bits), to support zero-copy 
broadcast.
+                        * It should only be accessed using the following 
functions:
+                        * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
+                        * rte_mbuf_refcnt_set(). The functionality of these 
functions (atomic,
+                        * or non-atomic) is controlled by the 
RTE_MBUF_REFCNT_ATOMIC flag.
+                        */
+                       RTE_ATOMIC(uint16_t) refcnt;
 
-       /**
-        * Number of segments. Only valid for the first segment of an mbuf
-        * chain.
-        */
-       uint16_t nb_segs;
+                       /**
+                        * Number of segments. Only valid for the first segment 
of an mbuf
+                        * chain.
+                        */
+                       uint16_t nb_segs;
 
-       /** Input port (16 bits to support more than 256 virtual ports).
-        * The event eth Tx adapter uses this field to specify the output port.
-        */
-       uint16_t port;
+                       /** Input port (16 bits to support more than 256 
virtual ports).
+                        * The event eth Tx adapter uses this field to specify 
the output port.
+                        */
+                       uint16_t port;
 
-       uint64_t ol_flags;        /**< Offload features. */
+                       uint64_t ol_flags;        /**< Offload features. */
+               };
+       };
 
        /* remaining bytes are set on RX when pulling packet from descriptor */
-       RTE_MARKER rx_descriptor_fields1;
-
-       /*
-        * The packet type, which is the combination of outer/inner L2, L3, L4
-        * and tunnel types. The packet_type is about data really present in the
-        * mbuf. Example: if vlan stripping is enabled, a received vlan packet
-        * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
-        * vlan is stripped from the data.
-        */
        union {
-               uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */
-               __extension__
+               void *rx_descriptor_fields1;
+
+               /*
+                * The packet type, which is the combination of outer/inner L2, 
L3, L4
+                * and tunnel types. The packet_type is about data really 
present in the
+                * mbuf. Example: if vlan stripping is enabled, a received vlan 
packet
+                * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN 
because the
+                * vlan is stripped from the data.
+                */
                struct {
-                       uint8_t l2_type:4;   /**< (Outer) L2 type. */
-                       uint8_t l3_type:4;   /**< (Outer) L3 type. */
-                       uint8_t l4_type:4;   /**< (Outer) L4 type. */
-                       uint8_t tun_type:4;  /**< Tunnel type. */
                        union {
-                               uint8_t inner_esp_next_proto;
-                               /**< ESP next protocol type, valid if
-                                * RTE_PTYPE_TUNNEL_ESP tunnel type is set
-                                * on both Tx and Rx.
-                                */
+                               uint32_t packet_type; /**< L2/L3/L4 and tunnel 
information. */
                                __extension__
                                struct {
-                                       uint8_t inner_l2_type:4;
-                                       /**< Inner L2 type. */
-                                       uint8_t inner_l3_type:4;
-                                       /**< Inner L3 type. */
+                                       uint8_t l2_type:4;   /**< (Outer) L2 
type. */
+                                       uint8_t l3_type:4;   /**< (Outer) L3 
type. */
+                                       uint8_t l4_type:4;   /**< (Outer) L4 
type. */
+                                       uint8_t tun_type:4;  /**< Tunnel type. 
*/
+                                       union {
+                                               uint8_t inner_esp_next_proto;
+                                               /**< ESP next protocol type, 
valid if
+                                                * RTE_PTYPE_TUNNEL_ESP tunnel 
type is set
+                                                * on both Tx and Rx.
+                                                */
+                                               __extension__
+                                               struct {
+                                                       uint8_t inner_l2_type:4;
+                                                       /**< Inner L2 type. */
+                                                       uint8_t inner_l3_type:4;
+                                                       /**< Inner L3 type. */
+                                               };
+                                       };
+                                       uint8_t inner_l4_type:4; /**< Inner L4 
type. */
                                };
                        };
-                       uint8_t inner_l4_type:4; /**< Inner L4 type. */
+                       uint32_t pkt_len;         /**< Total pkt len: sum of 
all segments. */
                };
        };
 
-       uint32_t pkt_len;         /**< Total pkt len: sum of all segments. */
        uint16_t data_len;        /**< Amount of data in segment buffer. */
        /** VLAN TCI (CPU order), valid if RTE_MBUF_F_RX_VLAN is set. */
        uint16_t vlan_tci;
@@ -595,21 +604,23 @@ struct rte_mbuf {
        struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
 
        /* second cache line - fields only used in slow path or on TX */
-       RTE_MARKER cacheline1 __rte_cache_min_aligned;
+       union {
+               void *cacheline1;
 
 #if RTE_IOVA_IN_MBUF
-       /**
-        * Next segment of scattered packet. Must be NULL in the last
-        * segment or in case of non-segmented packet.
-        */
-       struct rte_mbuf *next;
+               /**
+                * Next segment of scattered packet. Must be NULL in the last
+                * segment or in case of non-segmented packet.
+                */
+               struct rte_mbuf *next;
 #else
-       /**
-        * Reserved for dynamic fields
-        * when the next pointer is in first cache line (i.e. RTE_IOVA_IN_MBUF 
is 0).
-        */
-       uint64_t dynfield2;
+               /**
+                * Reserved for dynamic fields
+                * when the next pointer is in first cache line (i.e. 
RTE_IOVA_IN_MBUF is 0).
+                */
+               uint64_t dynfield2;
 #endif
+       };
 
        /* fields to support TX offloads */
        union {
-- 
1.8.3.1

Reply via email to