On 2021-10-27 09:28, Aman Kumar wrote: > This patch provides a rte_memcpy* call with temporal stores. > Use -Dcpu_instruction_set=znverX with build to enable this API. > > Signed-off-by: Aman Kumar <aman.ku...@vvdntech.in> > --- > config/x86/meson.build | 2 + > lib/eal/x86/include/rte_memcpy.h | 114 +++++++++++++++++++++++++++++++ > 2 files changed, 116 insertions(+) > > diff --git a/config/x86/meson.build b/config/x86/meson.build > index 21cda6fd33..56dae4aca7 100644 > --- a/config/x86/meson.build > +++ b/config/x86/meson.build > @@ -78,6 +78,8 @@ if get_option('cpu_instruction_set') == 'znver1' > dpdk_conf.set('RTE_MAX_LCORE', 256) > elif get_option('cpu_instruction_set') == 'znver2' > dpdk_conf.set('RTE_MAX_LCORE', 512) > + dpdk_conf.set('RTE_MEMCPY_AMDEPYC', 1) > elif get_option('cpu_instruction_set') == 'znver3' > dpdk_conf.set('RTE_MAX_LCORE', 512) > + dpdk_conf.set('RTE_MEMCPY_AMDEPYC', 1) > endif > diff --git a/lib/eal/x86/include/rte_memcpy.h > b/lib/eal/x86/include/rte_memcpy.h > index 1b6c6e585f..8fe7822cb4 100644 > --- a/lib/eal/x86/include/rte_memcpy.h > +++ b/lib/eal/x86/include/rte_memcpy.h > @@ -376,6 +376,120 @@ rte_mov128blocks(uint8_t *dst, const uint8_t *src, > size_t n) > } > } > > +#if defined RTE_MEMCPY_AMDEPYC > + > +/** > + * Copy 16 bytes from one location to another, > + * with temporal stores > + */ > +static __rte_always_inline void > +rte_copy16_ts(uint8_t *dst, uint8_t *src) > +{ > + __m128i var128; > + > + var128 = _mm_stream_load_si128((__m128i *)src); > + _mm_storeu_si128((__m128i *)dst, var128); > +} > + > +/** > + * Copy 32 bytes from one location to another, > + * with temporal stores > + */ > +static __rte_always_inline void > +rte_copy32_ts(uint8_t *dst, uint8_t *src) > +{ > + __m256i ymm0; > + > + ymm0 = _mm256_stream_load_si256((const __m256i *)src); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > +} > + > +/** > + * Copy 64 bytes from one location to another, > + * with temporal stores > + */ > +static __rte_always_inline void > +rte_copy64_ts(uint8_t *dst, uint8_t *src) > +{ > + rte_copy32_ts(dst + 0 * 32, src + 0 * 32); > + rte_copy32_ts(dst + 1 * 32, src + 1 * 32); > +} > + > +/** > + * Copy 128 bytes from one location to another, > + * with temporal stores > + */ > +static __rte_always_inline void > +rte_copy128_ts(uint8_t *dst, uint8_t *src) > +{ > + rte_copy32_ts(dst + 0 * 32, src + 0 * 32); > + rte_copy32_ts(dst + 1 * 32, src + 1 * 32); > + rte_copy32_ts(dst + 2 * 32, src + 2 * 32); > + rte_copy32_ts(dst + 3 * 32, src + 3 * 32); > +} > + > +/** > + * Copy len bytes from one location to another, > + * with temporal stores 16B aligned > + */ > +static __rte_always_inline void * > +rte_memcpy_aligned_tstore16_generic(void *dst, void *src, int len) > +{ > + void *dest = dst; > + > + while (len >= 128) { > + rte_copy128_ts((uint8_t *)dst, (uint8_t *)src); > + dst = (uint8_t *)dst + 128; > + src = (uint8_t *)src + 128; > + len -= 128; > + } > + while (len >= 64) { > + rte_copy64_ts((uint8_t *)dst, (uint8_t *)src); > + dst = (uint8_t *)dst + 64; > + src = (uint8_t *)src + 64; > + len -= 64; > + } > + while (len >= 32) { > + rte_copy32_ts((uint8_t *)dst, (uint8_t *)src); > + dst = (uint8_t *)dst + 32; > + src = (uint8_t *)src + 32; > + len -= 32; > + } > + if (len >= 16) { > + rte_copy16_ts((uint8_t *)dst, (uint8_t *)src); > + dst = (uint8_t *)dst + 16; > + src = (uint8_t *)src + 16; > + len -= 16; > + } > + if (len >= 8) { > + *(uint64_t *)dst = *(const uint64_t *)src; > + dst = (uint8_t *)dst + 8; > + src = (uint8_t *)src + 8; > + len -= 8; > + } > + if (len >= 4) { > + *(uint32_t *)dst = *(const uint32_t *)src; > + dst = (uint8_t *)dst + 4; > + src = (uint8_t *)src + 4; > + len -= 4; > + } > + if (len != 0) { > + dst = (uint8_t *)dst - (4 - len); > + src = (uint8_t *)src - (4 - len); > + *(uint32_t *)dst = *(const uint32_t *)src; > + } > + > + return dest;
You don't need a _mm_sfence after the NT stores to avoid surprises (e.g, if you use this NT memcpy() in combination with DPDK rings)? NT stores are weakly ordered on x86_64, from what I understand. > +} > + > +static __rte_always_inline void * > +rte_memcpy_aligned_tstore16(void *dst, void *src, int len) Shouldn't both dst and src be marked __restrict? Goes for all these functions. > +{ > + return rte_memcpy_aligned_tstore16_generic(dst, src, len); > +} > + > +#endif /* RTE_MEMCPY_AMDEPYC */ What does x86_64 NT stores have to do with EPYC? > + > static __rte_always_inline void * > rte_memcpy_generic(void *dst, const void *src, size_t n) > {