* Expand __rte_aligned(a) to __declspec(align(a)) when building with MSVC.
* Move __rte_aligned from the end of {struct,union} definitions to be between {struct,union} and tag. The placement between {struct,union} and the tag allows the desired alignment to be imparted on the type regardless of the toolchain being used for all of GCC, LLVM, MSVC compilers building both C and C++. * Replace use of __rte_aligned(a) on variables/fields with alignas(a). Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com> --- lib/eal/arm/include/rte_vect.h | 4 ++-- lib/eal/include/generic/rte_atomic.h | 4 ++-- lib/eal/include/rte_common.h | 2 +- lib/eal/loongarch/include/rte_vect.h | 8 ++++---- lib/eal/ppc/include/rte_vect.h | 4 ++-- lib/eal/riscv/include/rte_vect.h | 4 ++-- lib/eal/x86/include/rte_vect.h | 4 ++-- 7 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/eal/arm/include/rte_vect.h b/lib/eal/arm/include/rte_vect.h index 8cfe4bd..c97d299 100644 --- a/lib/eal/arm/include/rte_vect.h +++ b/lib/eal/arm/include/rte_vect.h @@ -24,14 +24,14 @@ #define XMM_SIZE (sizeof(xmm_t)) #define XMM_MASK (XMM_SIZE - 1) -typedef union rte_xmm { +typedef union __rte_aligned(16) rte_xmm { xmm_t x; uint8_t u8[XMM_SIZE / sizeof(uint8_t)]; uint16_t u16[XMM_SIZE / sizeof(uint16_t)]; uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; double pd[XMM_SIZE / sizeof(double)]; -} __rte_aligned(16) rte_xmm_t; +} rte_xmm_t; #if defined(RTE_ARCH_ARM) && defined(RTE_ARCH_32) /* NEON intrinsic vqtbl1q_u8() is not supported in ARMv7-A(AArch32) */ diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h index 0e639da..f859707 100644 --- a/lib/eal/include/generic/rte_atomic.h +++ b/lib/eal/include/generic/rte_atomic.h @@ -1094,7 +1094,7 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) /** * 128-bit integer structure. */ -typedef struct { +typedef struct __rte_aligned(16) { union { uint64_t val[2]; #ifdef RTE_ARCH_64 @@ -1103,7 +1103,7 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v) #endif #endif }; -} __rte_aligned(16) rte_int128_t; +} rte_int128_t; #ifdef __DOXYGEN__ diff --git a/lib/eal/include/rte_common.h b/lib/eal/include/rte_common.h index d7d6390..8367b96 100644 --- a/lib/eal/include/rte_common.h +++ b/lib/eal/include/rte_common.h @@ -65,7 +65,7 @@ * Force alignment */ #ifdef RTE_TOOLCHAIN_MSVC -#define __rte_aligned(a) +#define __rte_aligned(a) __declspec(align(a)) #else #define __rte_aligned(a) __attribute__((__aligned__(a))) #endif diff --git a/lib/eal/loongarch/include/rte_vect.h b/lib/eal/loongarch/include/rte_vect.h index 1546515..aa334e8 100644 --- a/lib/eal/loongarch/include/rte_vect.h +++ b/lib/eal/loongarch/include/rte_vect.h @@ -15,7 +15,7 @@ #define RTE_VECT_DEFAULT_SIMD_BITWIDTH RTE_VECT_SIMD_DISABLED -typedef union xmm { +typedef union __rte_aligned(16) xmm { int8_t i8[16]; int16_t i16[8]; int32_t i32[4]; @@ -25,19 +25,19 @@ uint32_t u32[4]; uint64_t u64[2]; double pd[2]; -} __rte_aligned(16) xmm_t; +} xmm_t; #define XMM_SIZE (sizeof(xmm_t)) #define XMM_MASK (XMM_SIZE - 1) -typedef union rte_xmm { +typedef union __rte_aligned(16) rte_xmm { xmm_t x; uint8_t u8[XMM_SIZE / sizeof(uint8_t)]; uint16_t u16[XMM_SIZE / sizeof(uint16_t)]; uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; double pd[XMM_SIZE / sizeof(double)]; -} __rte_aligned(16) rte_xmm_t; +} rte_xmm_t; static inline xmm_t vect_load_128(void *p) diff --git a/lib/eal/ppc/include/rte_vect.h b/lib/eal/ppc/include/rte_vect.h index a5f009b..c8bace2 100644 --- a/lib/eal/ppc/include/rte_vect.h +++ b/lib/eal/ppc/include/rte_vect.h @@ -22,14 +22,14 @@ #define XMM_SIZE (sizeof(xmm_t)) #define XMM_MASK (XMM_SIZE - 1) -typedef union rte_xmm { +typedef union __rte_aligned(16) rte_xmm { xmm_t x; uint8_t u8[XMM_SIZE / sizeof(uint8_t)]; uint16_t u16[XMM_SIZE / sizeof(uint16_t)]; uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; double pd[XMM_SIZE / sizeof(double)]; -} __rte_aligned(16) rte_xmm_t; +} rte_xmm_t; #ifdef __cplusplus } diff --git a/lib/eal/riscv/include/rte_vect.h b/lib/eal/riscv/include/rte_vect.h index da9092a..6df10fa 100644 --- a/lib/eal/riscv/include/rte_vect.h +++ b/lib/eal/riscv/include/rte_vect.h @@ -22,14 +22,14 @@ #define XMM_SIZE (sizeof(xmm_t)) #define XMM_MASK (XMM_SIZE - 1) -typedef union rte_xmm { +typedef union __rte_aligned(16) rte_xmm { xmm_t x; uint8_t u8[XMM_SIZE / sizeof(uint8_t)]; uint16_t u16[XMM_SIZE / sizeof(uint16_t)]; uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; double pd[XMM_SIZE / sizeof(double)]; -} __rte_aligned(16) rte_xmm_t; +} rte_xmm_t; static inline xmm_t vect_load_128(void *p) diff --git a/lib/eal/x86/include/rte_vect.h b/lib/eal/x86/include/rte_vect.h index 560f9e4..a1a537e 100644 --- a/lib/eal/x86/include/rte_vect.h +++ b/lib/eal/x86/include/rte_vect.h @@ -91,7 +91,7 @@ #define RTE_X86_ZMM_SIZE (sizeof(__m512i)) #define RTE_X86_ZMM_MASK (RTE_X86_ZMM_SIZE - 1) -typedef union __rte_x86_zmm { +typedef union __rte_aligned(RTE_X86_ZMM_SIZE) __rte_x86_zmm { __m512i z; ymm_t y[RTE_X86_ZMM_SIZE / sizeof(ymm_t)]; xmm_t x[RTE_X86_ZMM_SIZE / sizeof(xmm_t)]; @@ -100,7 +100,7 @@ uint32_t u32[RTE_X86_ZMM_SIZE / sizeof(uint32_t)]; uint64_t u64[RTE_X86_ZMM_SIZE / sizeof(uint64_t)]; double pd[RTE_X86_ZMM_SIZE / sizeof(double)]; -} __rte_aligned(RTE_X86_ZMM_SIZE) __rte_x86_zmm_t; +} __rte_x86_zmm_t; #endif /* __AVX512F__ */ -- 1.8.3.1