Inline assembly is not supported for MSVC x64 instead expand rte_compiler_barrier as _ReadWriteBarrier and for rte_smp_mb _m_mfence intrinsics.
Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com> Acked-by: Bruce Richardson <bruce.richard...@intel.com> Acked-by: Konstantin Ananyev <konstantin.v.anan...@yandex.ru> Acked-by: Morten Brørup <m...@smartsharesystems.com> --- lib/eal/include/generic/rte_atomic.h | 4 ++++ lib/eal/x86/include/rte_atomic.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h index 58df843..6a6c394 100644 --- a/lib/eal/include/generic/rte_atomic.h +++ b/lib/eal/include/generic/rte_atomic.h @@ -116,9 +116,13 @@ * Guarantees that operation reordering does not occur at compile time * for operations directly before and after the barrier. */ +#ifdef RTE_TOOLCHAIN_MSVC +#define rte_compiler_barrier() _ReadWriteBarrier() +#else #define rte_compiler_barrier() do { \ asm volatile ("" : : : "memory"); \ } while(0) +#endif /** * Synchronization fence between threads based on the specified memory order. diff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h index f2ee1a9..7aba1c3 100644 --- a/lib/eal/x86/include/rte_atomic.h +++ b/lib/eal/x86/include/rte_atomic.h @@ -66,11 +66,15 @@ static __rte_always_inline void rte_smp_mb(void) { +#ifdef RTE_TOOLCHAIN_MSVC + _mm_mfence(); +#else #ifdef RTE_ARCH_I686 asm volatile("lock addl $0, -128(%%esp); " ::: "memory"); #else asm volatile("lock addl $0, -128(%%rsp); " ::: "memory"); #endif +#endif } #define rte_io_mb() rte_mb() -- 1.8.3.1