> Inline assembly is not supported for msvc x64 instead use
> _mm_{s,l,m}fence() intrinsics.
> 
> Signed-off-by: Tyler Retzlaff <roret...@linux.microsoft.com>
> ---
>  lib/eal/include/generic/rte_atomic.h |  4 ++++
>  lib/eal/x86/include/rte_atomic.h     | 10 +++++++++-
>  2 files changed, 13 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/eal/include/generic/rte_atomic.h 
> b/lib/eal/include/generic/rte_atomic.h
> index 234b268..e973184 100644
> --- a/lib/eal/include/generic/rte_atomic.h
> +++ b/lib/eal/include/generic/rte_atomic.h
> @@ -116,9 +116,13 @@
>   * Guarantees that operation reordering does not occur at compile time
>   * for operations directly before and after the barrier.
>   */
> +#ifndef RTE_TOOLCHAIN_MSVC
>  #define      rte_compiler_barrier() do {             \
>       asm volatile ("" : : : "memory");       \
>  } while(0)
> +#else
> +#define rte_compiler_barrier() _ReadWriteBarrier()
> +#endif
> 
>  /**
>   * Synchronization fence between threads based on the specified memory order.
> diff --git a/lib/eal/x86/include/rte_atomic.h 
> b/lib/eal/x86/include/rte_atomic.h
> index f2ee1a9..7ae3a41 100644
> --- a/lib/eal/x86/include/rte_atomic.h
> +++ b/lib/eal/x86/include/rte_atomic.h
> @@ -27,9 +27,13 @@
> 
>  #define      rte_rmb() _mm_lfence()
> 
> +#ifndef RTE_TOOLCHAIN_MSVC
>  #define rte_smp_wmb() rte_compiler_barrier()
> -
>  #define rte_smp_rmb() rte_compiler_barrier()
> +#else
> +#define rte_smp_wmb() _mm_sfence()
> +#define rte_smp_rmb() _mm_lfence()

With x86 memory model CPU doesn't reorder with older reads and write with older 
writes
(there are few exceptions for writes: NT stores, fast string ops, but I think 
it can be skipped here).
For more info pls refer to: IA Software Developer's Manual, 3.8.3 8.2 MEMORY 
ORDERING.
That's why DPDK uses compiler_barrier() as expansion of smp_wmb() and smp_rmb() 
for x86 platforms.
There is nothing wrong in using sfence and lfence here, except that it is 
probably an overkill. 

> +#endif
> 
>  /*
>   * From Intel Software Development Manual; Vol 3;
> @@ -66,11 +70,15 @@
>  static __rte_always_inline void
>  rte_smp_mb(void)
>  {
> +#ifndef RTE_TOOLCHAIN_MSVC
>  #ifdef RTE_ARCH_I686
>       asm volatile("lock addl $0, -128(%%esp); " ::: "memory");
>  #else
>       asm volatile("lock addl $0, -128(%%rsp); " ::: "memory");
>  #endif
> +#else
> +     _mm_mfence();
> +#endif
>  }
> 
>  #define rte_io_mb() rte_mb()
> --
> 1.8.3.1

Reply via email to