RTE_MARKER typedefs are a GCC extension unsupported by MSVC. Use new rte_mbuf_rearm_data and rte_mbuf_rx_descriptor_fields1 accessors that provide a compatible type pointer without using the marker fields.
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com> --- drivers/net/ice/ice_rxtx_vec_avx2.c | 36 +++++++--------------------- drivers/net/ice/ice_rxtx_vec_avx512.c | 37 +++++++---------------------- drivers/net/ice/ice_rxtx_vec_common.h | 4 +--- drivers/net/ice/ice_rxtx_vec_sse.c | 44 +++++++---------------------------- 4 files changed, 25 insertions(+), 96 deletions(-) diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index 6f6d790..853f99b 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -114,19 +114,6 @@ 0xFF, 0xFF, /* pkt_type set as unknown */ 0xFF, 0xFF /*pkt_type set as unknown */ ); - /** - * compile-time check the above crc and shuffle layout is correct. - * NOTE: the first field (lowest address) is given last in set_epi - * calls above. - */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); /* Status/Error flag masks */ /** @@ -570,13 +557,6 @@ * add in the previously computed rx_descriptor fields to * make a single 256-bit write per mbuf */ - /* check the structure matches expectations */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != - offsetof(struct rte_mbuf, rearm_data) + 8); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != - RTE_ALIGN(offsetof(struct rte_mbuf, - rearm_data), - 16)); /* build up data and do writes */ __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, rearm6, rearm7; @@ -596,13 +576,13 @@ rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); /* write to mbuf */ - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 6]), rearm6); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 4]), rearm4); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 2]), rearm2); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 0]), rearm0); /* repeat for the odd mbufs */ @@ -625,13 +605,13 @@ rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); /* again write to mbufs */ - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 7]), rearm7); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 5]), rearm5); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 3]), rearm3); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 1]), rearm1); /* extract and record EOP bit */ diff --git a/drivers/net/ice/ice_rxtx_vec_avx512.c b/drivers/net/ice/ice_rxtx_vec_avx512.c index 04148e8..6d04bf7 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx512.c +++ b/drivers/net/ice/ice_rxtx_vec_avx512.c @@ -100,20 +100,6 @@ 0xFFFFFFFF ); - /** - * compile-time check the above crc and shuffle layout is correct. - * NOTE: the first field (lowest address) is given last in set_epi - * calls above. - */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); - /* following code block is for Rx Checksum Offload */ /* Status/Error flag masks */ /** @@ -568,13 +554,6 @@ * add in the previously computed rx_descriptor fields to * make a single 256-bit write per mbuf */ - /* check the structure matches expectations */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != - offsetof(struct rte_mbuf, rearm_data) + 8); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != - RTE_ALIGN(offsetof(struct rte_mbuf, - rearm_data), - 16)); /* build up data and do writes */ __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, rearm6, rearm7; @@ -597,13 +576,13 @@ rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); /* write to mbuf */ - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 6]), rearm6); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 4]), rearm4); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 2]), rearm2); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 0]), rearm0); /* repeat for the odd mbufs */ @@ -627,13 +606,13 @@ rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); /* again write to mbufs */ - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 7]), rearm7); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 5]), rearm5); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 3]), rearm3); - _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, + _mm256_storeu_si256((__m256i *)rte_mbuf_rearm_data(rx_pkts[i + 1]), rearm1); /* extract and record EOP bit */ diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 4b73465..c284d2d 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -232,7 +232,6 @@ static inline int ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) { - uintptr_t p; struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ mb_def.nb_segs = 1; @@ -242,8 +241,7 @@ /* prevent compiler reordering: rearm_data covers previous fields */ rte_compiler_barrier(); - p = (uintptr_t)&mb_def.rearm_data; - rxq->mbuf_initializer = *(uint64_t *)p; + rxq->mbuf_initializer = *rte_mbuf_rearm_data(&mb_def); return 0; } diff --git a/drivers/net/ice/ice_rxtx_vec_sse.c b/drivers/net/ice/ice_rxtx_vec_sse.c index 9a1b7e3..ed9928b 100644 --- a/drivers/net/ice/ice_rxtx_vec_sse.c +++ b/drivers/net/ice/ice_rxtx_vec_sse.c @@ -267,14 +267,10 @@ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x30); /* write the rearm data and the olflags in one write */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != - offsetof(struct rte_mbuf, rearm_data) + 8); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != - RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); - _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0); - _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1); - _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2); - _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3); + _mm_store_si128((__m128i *)rte_mbuf_rearm_data(rx_pkts[0]), rearm0); + _mm_store_si128((__m128i *)rte_mbuf_rearm_data(rx_pkts[1]), rearm1); + _mm_store_si128((__m128i *)rte_mbuf_rearm_data(rx_pkts[2]), rearm2); + _mm_store_si128((__m128i *)rte_mbuf_rearm_data(rx_pkts[3]), rearm3); } static inline void @@ -342,16 +338,6 @@ 0x04, 0x0C, 0x00, 0x08); - /** - * compile-time check the above crc_adjust layout is correct. - * NOTE: the first field (lowest address) is given last in set_epi16 - * call above. - */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); - /* 4 packets DD mask */ const __m128i dd_check = _mm_set_epi64x(0x0000000100000001LL, 0x0000000100000001LL); @@ -382,20 +368,6 @@ rte_cpu_to_le_32(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S))) return 0; - /** - * Compile-time verify the shuffle mask - * NOTE: some field positions already verified above, but duplicated - * here for completeness in case of future modifications. - */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != - offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); - /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache */ @@ -542,10 +514,10 @@ /* D.3 copy final 3,4 data to rx_pkts */ _mm_storeu_si128 - ((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + (rte_mbuf_rx_descriptor_fields1(rx_pkts[pos + 3]), pkt_mb3); _mm_storeu_si128 - ((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + (rte_mbuf_rx_descriptor_fields1(rx_pkts[pos + 2]), pkt_mb2); /* C* extract and record EOP bit */ @@ -569,9 +541,9 @@ /* D.3 copy final 1,2 data to rx_pkts */ _mm_storeu_si128 - ((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + (rte_mbuf_rx_descriptor_fields1(rx_pkts[pos + 1]), pkt_mb1); - _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, + _mm_storeu_si128(rte_mbuf_rx_descriptor_fields1(rx_pkts[pos]), pkt_mb0); ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc available number of desc */ -- 1.8.3.1