Hi Xiaoyun, > This patch dynamically selects functions of memcpy at run-time based > on CPU flags that current machine supports. This patch uses function > pointers which are bind to the relative functions at constrctor time. > In addition, AVX512 instructions set would be compiled only if users > config it enabled and the compiler supports it. > > Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com> > --- > v2 > * Use gcc function multi-versioning to avoid compilation issues. > * Add macros for AVX512 and AVX2. Only if users enable AVX512 and the > compiler supports it, the AVX512 codes would be compiled. Only if the > compiler supports AVX2, the AVX2 codes would be compiled. > > v3 > * Reduce function calls via only keep rte_memcpy_xxx. > * Add conditions that when copy size is small, use inline code path. > Otherwise, use dynamic code path. > * To support attribute target, clang version must be greater than 3.7. > Otherwise, would choose SSE/AVX code path, the same as before. > * Move two mocro functions to the top of the code since they would be > used in inline SSE/AVX and dynamic SSE/AVX codes. > > .../common/include/arch/x86/rte_memcpy.h | 1232 > ++++++++++++++++++-- > 1 file changed, 1135 insertions(+), 97 deletions(-) > > diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h > b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h > index 74c280c..ed6c412 100644 > --- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h > +++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h > @@ -45,6 +45,8 @@ > #include <string.h> > #include <rte_vect.h> > #include <rte_common.h> > +#include <rte_cpuflags.h> > +#include <rte_log.h> > > #ifdef __cplusplus > extern "C" { > @@ -68,6 +70,100 @@ extern "C" { > static __rte_always_inline void * > rte_memcpy(void *dst, const void *src, size_t n); > > +/** > + * Macro for copying unaligned block from one location to another with > constant load offset, > + * 47 bytes leftover maximum, > + * locations should not overlap. > + * Requirements: > + * - Store is aligned > + * - Load offset is <offset>, which must be immediate value within [1, 15] > + * - For <src>, make sure <offset> bit backwards & <16 - offset> bit > forwards are available for loading > + * - <dst>, <src>, <len> must be variables > + * - __m128i <xmm0> ~ <xmm8> must be pre-defined > + */ > +#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) > \ > +__extension__ ({ > \ > + int tmp; > \ > + while (len >= 128 + 16 - offset) { > \ > + xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 0 * 16)); \ > + len -= 128; > \ > + xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 1 * 16)); \ > + xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 2 * 16)); \ > + xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 3 * 16)); \ > + xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 4 * 16)); \ > + xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 5 * 16)); \ > + xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 6 * 16)); \ > + xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 7 * 16)); \ > + xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 8 * 16)); \ > + src = (const uint8_t *)src + 128; > \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), > _mm_alignr_epi8(xmm1, xmm0, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), > _mm_alignr_epi8(xmm2, xmm1, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), > _mm_alignr_epi8(xmm3, xmm2, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), > _mm_alignr_epi8(xmm4, xmm3, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), > _mm_alignr_epi8(xmm5, xmm4, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), > _mm_alignr_epi8(xmm6, xmm5, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), > _mm_alignr_epi8(xmm7, xmm6, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), > _mm_alignr_epi8(xmm8, xmm7, offset)); \ > + dst = (uint8_t *)dst + 128; > \ > + } > \ > + tmp = len; > \ > + len = ((len - 16 + offset) & 127) + 16 - offset; > \ > + tmp -= len; > \ > + src = (const uint8_t *)src + tmp; > \ > + dst = (uint8_t *)dst + tmp; > \ > + if (len >= 32 + 16 - offset) { > \ > + while (len >= 32 + 16 - offset) { > \ > + xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 0 * 16)); \ > + len -= 32; > \ > + xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 1 * 16)); \ > + xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 2 * 16)); \ > + src = (const uint8_t *)src + 32; > \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), > _mm_alignr_epi8(xmm1, xmm0, offset)); \ > + _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), > _mm_alignr_epi8(xmm2, xmm1, offset)); \ > + dst = (uint8_t *)dst + 32; > \ > + } > \ > + tmp = len; > \ > + len = ((len - 16 + offset) & 31) + 16 - offset; > \ > + tmp -= len; > \ > + src = (const uint8_t *)src + tmp; > \ > + dst = (uint8_t *)dst + tmp; > \ > + } > \ > +}) > + > +/** > + * Macro for copying unaligned block from one location to another, > + * 47 bytes leftover maximum, > + * locations should not overlap. > + * Use switch here because the aligning instruction requires immediate value > for shift count. > + * Requirements: > + * - Store is aligned > + * - Load offset is <offset>, which must be within [1, 15] > + * - For <src>, make sure <offset> bit backwards & <16 - offset> bit > forwards are available for loading > + * - <dst>, <src>, <len> must be variables > + * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be > pre-defined > + */ > +#define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \ > +__extension__ ({ \ > + switch (offset) { \ > + case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \ > + case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \ > + case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \ > + case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \ > + case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \ > + case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \ > + case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \ > + case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \ > + case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \ > + case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \ > + case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \ > + case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \ > + case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \ > + case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \ > + case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \ > + default:; \ > + } \ > +}) > + > #ifdef RTE_MACHINE_CPUFLAG_AVX512F > > #define ALIGNMENT_MASK 0x3F > @@ -589,100 +685,6 @@ rte_mov256(uint8_t *dst, const uint8_t *src) > rte_mov16((uint8_t *)dst + 15 * 16, (const uint8_t *)src + 15 * 16); > } > > -/** > - * Macro for copying unaligned block from one location to another with > constant load offset, > - * 47 bytes leftover maximum, > - * locations should not overlap. > - * Requirements: > - * - Store is aligned > - * - Load offset is <offset>, which must be immediate value within [1, 15] > - * - For <src>, make sure <offset> bit backwards & <16 - offset> bit > forwards are available for loading > - * - <dst>, <src>, <len> must be variables > - * - __m128i <xmm0> ~ <xmm8> must be pre-defined > - */ > -#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) > \ > -__extension__ ({ > \ > - int tmp; > \ > - while (len >= 128 + 16 - offset) { > \ > - xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 0 * 16)); \ > - len -= 128; > \ > - xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 1 * 16)); \ > - xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 2 * 16)); \ > - xmm3 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 3 * 16)); \ > - xmm4 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 4 * 16)); \ > - xmm5 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 5 * 16)); \ > - xmm6 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 6 * 16)); \ > - xmm7 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 7 * 16)); \ > - xmm8 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 8 * 16)); \ > - src = (const uint8_t *)src + 128; > \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), > _mm_alignr_epi8(xmm1, xmm0, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), > _mm_alignr_epi8(xmm2, xmm1, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 2 * 16), > _mm_alignr_epi8(xmm3, xmm2, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 3 * 16), > _mm_alignr_epi8(xmm4, xmm3, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 4 * 16), > _mm_alignr_epi8(xmm5, xmm4, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 5 * 16), > _mm_alignr_epi8(xmm6, xmm5, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 6 * 16), > _mm_alignr_epi8(xmm7, xmm6, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 7 * 16), > _mm_alignr_epi8(xmm8, xmm7, offset)); \ > - dst = (uint8_t *)dst + 128; > \ > - } > \ > - tmp = len; > \ > - len = ((len - 16 + offset) & 127) + 16 - offset; > \ > - tmp -= len; > \ > - src = (const uint8_t *)src + tmp; > \ > - dst = (uint8_t *)dst + tmp; > \ > - if (len >= 32 + 16 - offset) { > \ > - while (len >= 32 + 16 - offset) { > \ > - xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 0 * 16)); \ > - len -= 32; > \ > - xmm1 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 1 * 16)); \ > - xmm2 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - > offset + 2 * 16)); \ > - src = (const uint8_t *)src + 32; > \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 0 * 16), > _mm_alignr_epi8(xmm1, xmm0, offset)); \ > - _mm_storeu_si128((__m128i *)((uint8_t *)dst + 1 * 16), > _mm_alignr_epi8(xmm2, xmm1, offset)); \ > - dst = (uint8_t *)dst + 32; > \ > - } > \ > - tmp = len; > \ > - len = ((len - 16 + offset) & 31) + 16 - offset; > \ > - tmp -= len; > \ > - src = (const uint8_t *)src + tmp; > \ > - dst = (uint8_t *)dst + tmp; > \ > - } > \ > -}) > - > -/** > - * Macro for copying unaligned block from one location to another, > - * 47 bytes leftover maximum, > - * locations should not overlap. > - * Use switch here because the aligning instruction requires immediate value > for shift count. > - * Requirements: > - * - Store is aligned > - * - Load offset is <offset>, which must be within [1, 15] > - * - For <src>, make sure <offset> bit backwards & <16 - offset> bit > forwards are available for loading > - * - <dst>, <src>, <len> must be variables > - * - __m128i <xmm0> ~ <xmm8> used in MOVEUNALIGNED_LEFT47_IMM must be > pre-defined > - */ > -#define MOVEUNALIGNED_LEFT47(dst, src, len, offset) \ > -__extension__ ({ \ > - switch (offset) { \ > - case 0x01: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x01); break; \ > - case 0x02: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x02); break; \ > - case 0x03: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x03); break; \ > - case 0x04: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x04); break; \ > - case 0x05: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x05); break; \ > - case 0x06: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x06); break; \ > - case 0x07: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x07); break; \ > - case 0x08: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x08); break; \ > - case 0x09: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x09); break; \ > - case 0x0A: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0A); break; \ > - case 0x0B: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0B); break; \ > - case 0x0C: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0C); break; \ > - case 0x0D: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0D); break; \ > - case 0x0E: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0E); break; \ > - case 0x0F: MOVEUNALIGNED_LEFT47_IMM(dst, src, n, 0x0F); break; \ > - default:; \ > - } \ > -}) > - > static inline void * > rte_memcpy_generic(void *dst, const void *src, size_t n) > { > @@ -888,13 +890,1049 @@ rte_memcpy_aligned(void *dst, const void *src, size_t > n) > return ret; > } > > +/* > + * Run-time dispatch impementation of memcpy. > + */ > + > +typedef void * (*rte_memcpy_t)(void *dst, const void *src, size_t n); > +static rte_memcpy_t rte_memcpy_ptr; > + > +/** > + * AVX512 implementation below > + */ > +#ifdef CC_SUPPORT_AVX512 > +__attribute__((target("avx512f"))) > +static inline void * > +rte_memcpy_AVX512F(void *dst, const void *src, size_t n) > +{ > + if (!(((uintptr_t)dst | (uintptr_t)src) & 0x3F)) { > + void *ret = dst; > + > + /* Copy size <= 16 bytes */ > + if (n < 16) { > + if (n & 0x01) { > + *(uint8_t *)dst = *(const uint8_t *)src; > + src = (const uint8_t *)src + 1; > + dst = (uint8_t *)dst + 1; > + } > + if (n & 0x02) { > + *(uint16_t *)dst = *(const uint16_t *)src; > + src = (const uint16_t *)src + 1; > + dst = (uint16_t *)dst + 1; > + } > + if (n & 0x04) { > + *(uint32_t *)dst = *(const uint32_t *)src; > + src = (const uint32_t *)src + 1; > + dst = (uint32_t *)dst + 1; > + } > + if (n & 0x08) > + *(uint64_t *)dst = *(const uint64_t *)src; > + > + return ret; > + } > + > + /* Copy 16 <= size <= 32 bytes */ > + if (n <= 32) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + > + return ret; > + } > + > + /* Copy 32 < size <= 64 bytes */ > + if (n <= 64) { > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst - 32 + n), ymm1); > + > + return ret; > + } > + > + /* Copy 64 bytes blocks */ > + for (; n >= 64; n -= 64) { > + __m512i zmm0; > + zmm0 = _mm512_loadu_si512((const void *)src); > + _mm512_storeu_si512((void *)dst, zmm0); > + dst = (uint8_t *)dst + 64; > + src = (const uint8_t *)src + 64; > + } > + > + /* Copy whatever left */ > + __m512i zmm0; > + zmm0 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src - 64 + n)); > + _mm512_storeu_si512((void *)((uint8_t *)dst - 64 + n), zmm0); > + > + return ret; > + } else { > + uintptr_t dstu = (uintptr_t)dst; > + uintptr_t srcu = (uintptr_t)src; > + void *ret = dst; > + size_t dstofss; > + size_t bits; > + > + /** > + * Copy less than 16 bytes > + */ > + if (n < 16) { > + if (n & 0x01) { > + *(uint8_t *)dstu = *(const uint8_t *)srcu; > + srcu = (uintptr_t)((const uint8_t *)srcu + 1); > + dstu = (uintptr_t)((uint8_t *)dstu + 1); > + } > + if (n & 0x02) { > + *(uint16_t *)dstu = *(const uint16_t *)srcu; > + srcu = (uintptr_t)((const uint16_t *)srcu + 1); > + dstu = (uintptr_t)((uint16_t *)dstu + 1); > + } > + if (n & 0x04) { > + *(uint32_t *)dstu = *(const uint32_t *)srcu; > + srcu = (uintptr_t)((const uint32_t *)srcu + 1); > + dstu = (uintptr_t)((uint32_t *)dstu + 1); > + } > + if (n & 0x08) > + *(uint64_t *)dstu = *(const uint64_t *)srcu; > + return ret; > + } > + > + /** > + * Fast way when copy size doesn't exceed 512 bytes > + */ > + if (n <= 32) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + return ret; > + } > + if (n <= 64) { > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst - 32 + n), ymm1); > + return ret; > + } > + if (n <= 512) { > + if (n >= 256) { > + n -= 256; > + __m512i zmm0, zmm1, zmm2, zmm3; > + zmm0 = _mm512_loadu_si512((const void *)src); > + zmm1 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 64)); > + zmm2 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 2*64)); > + zmm3 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 3*64)); > + _mm512_storeu_si512((void *)dst, zmm0); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 64), zmm1); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 2*64), zmm2); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 3*64), zmm3); > + src = (const uint8_t *)src + 256; > + dst = (uint8_t *)dst + 256; > + } > + if (n >= 128) { > + n -= 128; > + __m512i zmm0, zmm1; > + zmm0 = _mm512_loadu_si512((const void *)src); > + zmm1 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 64)); > + _mm512_storeu_si512((void *)dst, zmm0); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 64), zmm1); > + src = (const uint8_t *)src + 128; > + dst = (uint8_t *)dst + 128; > + } > +COPY_BLOCK_128_BACK63: > + if (n > 64) { > + __m512i zmm0, zmm1; > + zmm0 = _mm512_loadu_si512((const void *)src); > + zmm1 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src - 64 + n)); > + _mm512_storeu_si512((void *)dst, zmm0); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst - 64 + n), zmm1); > + return ret; > + } > + if (n > 0) { > + __m512i zmm0; > + zmm0 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src - 64 + n)); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst - 64 + n), zmm0); > + } > + return ret; > + } > + > + /** > + * Make store aligned when copy size exceeds 512 bytes > + */ > + dstofss = ((uintptr_t)dst & 0x3F); > + if (dstofss > 0) { > + dstofss = 64 - dstofss; > + n -= dstofss; > + __m512i zmm0; > + zmm0 = _mm512_loadu_si512((const void *)src); > + _mm512_storeu_si512((void *)dst, zmm0); > + src = (const uint8_t *)src + dstofss; > + dst = (uint8_t *)dst + dstofss; > + } > + > + /** > + * Copy 512-byte blocks. > + * Use copy block function for better instruction order control, > + * which is important when load is unaligned. > + */ > + __m512i zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, zmm7; > + > + while (n >= 512) { > + zmm0 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 0 * 64)); > + n -= 512; > + zmm1 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 1 * 64)); > + zmm2 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 2 * 64)); > + zmm3 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 3 * 64)); > + zmm4 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 4 * 64)); > + zmm5 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 5 * 64)); > + zmm6 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 6 * 64)); > + zmm7 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 7 * 64)); > + src = (const uint8_t *)src + 512; > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 0 * 64), zmm0); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 1 * 64), zmm1); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 2 * 64), zmm2); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 3 * 64), zmm3); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 4 * 64), zmm4); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 5 * 64), zmm5); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 6 * 64), zmm6); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 7 * 64), zmm7); > + dst = (uint8_t *)dst + 512; > + } > + bits = n; > + n = n & 511; > + bits -= n; > + src = (const uint8_t *)src + bits; > + dst = (uint8_t *)dst + bits; > + > + /** > + * Copy 128-byte blocks. > + * Use copy block function for better instruction order control, > + * which is important when load is unaligned. > + */ > + if (n >= 128) { > + __m512i zmm0, zmm1; > + > + while (n >= 128) { > + zmm0 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 0 * 64)); > + n -= 128; > + zmm1 = _mm512_loadu_si512((const void *) > + ((const uint8_t *)src + 1 * 64)); > + src = (const uint8_t *)src + 128; > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 0 * 64), zmm0); > + _mm512_storeu_si512((void *) > + ((uint8_t *)dst + 1 * 64), zmm1); > + dst = (uint8_t *)dst + 128; > + } > + bits = n; > + n = n & 127; > + bits -= n; > + src = (const uint8_t *)src + bits; > + dst = (uint8_t *)dst + bits; > + } > + > + /** > + * Copy whatever left > + */ > + goto COPY_BLOCK_128_BACK63; > + } > +} > +#endif > + > +/** > + * AVX2 implementation below > + */ > +#ifdef CC_SUPPORT_AVX2 > +__attribute__((target("avx2"))) > +static inline void * > +rte_memcpy_AVX2(void *dst, const void *src, size_t n) > +{ > + if (!(((uintptr_t)dst | (uintptr_t)src) & 0x1F)) { > + void *ret = dst; > + > + /* Copy size <= 16 bytes */ > + if (n < 16) { > + if (n & 0x01) { > + *(uint8_t *)dst = *(const uint8_t *)src; > + src = (const uint8_t *)src + 1; > + dst = (uint8_t *)dst + 1; > + } > + if (n & 0x02) { > + *(uint16_t *)dst = *(const uint16_t *)src; > + src = (const uint16_t *)src + 1; > + dst = (uint16_t *)dst + 1; > + } > + if (n & 0x04) { > + *(uint32_t *)dst = *(const uint32_t *)src; > + src = (const uint32_t *)src + 1; > + dst = (uint32_t *)dst + 1; > + } > + if (n & 0x08) > + *(uint64_t *)dst = *(const uint64_t *)src; > + > + return ret; > + } > + > + /* Copy 16 <= size <= 32 bytes */ > + if (n <= 32) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + > + return ret; > + } > + > + /* Copy 32 < size <= 64 bytes */ > + if (n <= 64) { > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst - 32 + n), ymm1); > + > + return ret; > + } > + > + /* Copy 64 bytes blocks */ > + for (; n >= 64; n -= 64) { > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 32)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 32), ymm1); > + dst = (uint8_t *)dst + 64; > + src = (const uint8_t *)src + 64; > + } > + > + /* Copy whatever left */ > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 64 + n)); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *)((uint8_t *)dst - 64 + n), ymm0); > + _mm256_storeu_si256((__m256i *)((uint8_t *)dst - 32 + n), ymm1); > + > + return ret; > + } else { > + uintptr_t dstu = (uintptr_t)dst; > + uintptr_t srcu = (uintptr_t)src; > + void *ret = dst; > + size_t dstofss; > + size_t bits; > + > + /** > + * Copy less than 16 bytes > + */ > + if (n < 16) { > + if (n & 0x01) { > + *(uint8_t *)dstu = *(const uint8_t *)srcu; > + srcu = (uintptr_t)((const uint8_t *)srcu + 1); > + dstu = (uintptr_t)((uint8_t *)dstu + 1); > + } > + if (n & 0x02) { > + *(uint16_t *)dstu = *(const uint16_t *)srcu; > + srcu = (uintptr_t)((const uint16_t *)srcu + 1); > + dstu = (uintptr_t)((uint16_t *)dstu + 1); > + } > + if (n & 0x04) { > + *(uint32_t *)dstu = *(const uint32_t *)srcu; > + srcu = (uintptr_t)((const uint32_t *)srcu + 1); > + dstu = (uintptr_t)((uint32_t *)dstu + 1); > + } > + if (n & 0x08) > + *(uint64_t *)dstu = *(const uint64_t *)srcu; > + return ret; > + } > + > + /** > + * Fast way when copy size doesn't exceed 256 bytes > + */ > + if (n <= 32) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + return ret; > + } > + if (n <= 48) { > + __m128i xmm0, xmm1, xmm2; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + xmm2 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm2); > + return ret; > + } > + if (n <= 64) { > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst - 32 + n), ymm1); > + return ret; > + } > + if (n <= 256) { > + if (n >= 128) { > + n -= 128; > + __m256i ymm0, ymm1, ymm2, ymm3; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 32)); > + ymm2 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 2*32)); > + ymm3 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 3*32)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 32), ymm1); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 2*32), ymm2); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 3*32), ymm3); > + src = (const uint8_t *)src + 128; > + dst = (uint8_t *)dst + 128; > + } > +COPY_BLOCK_128_BACK31: > + if (n >= 64) { > + n -= 64; > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 32)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 32), ymm1); > + src = (const uint8_t *)src + 64; > + dst = (uint8_t *)dst + 64; > + } > + if (n > 32) { > + __m256i ymm0, ymm1; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst - 32 + n), ymm1); > + return ret; > + } > + if (n > 0) { > + __m256i ymm0; > + ymm0 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src - 32 + n)); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst - 32 + n), ymm0); > + } > + return ret; > + } > + > + /** > + * Make store aligned when copy size exceeds 256 bytes > + */ > + dstofss = (uintptr_t)dst & 0x1F; > + if (dstofss > 0) { > + dstofss = 32 - dstofss; > + n -= dstofss; > + __m256i ymm0; > + ymm0 = _mm256_loadu_si256((const __m256i *)src); > + _mm256_storeu_si256((__m256i *)dst, ymm0); > + src = (const uint8_t *)src + dstofss; > + dst = (uint8_t *)dst + dstofss; > + } > + > + /** > + * Copy 128-byte blocks > + */ > + __m256i ymm0, ymm1, ymm2, ymm3; > + > + while (n >= 128) { > + ymm0 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 0 * 32)); > + n -= 128; > + ymm1 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 1 * 32)); > + ymm2 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 2 * 32)); > + ymm3 = _mm256_loadu_si256((const __m256i *) > + ((const uint8_t *)src + 3 * 32)); > + src = (const uint8_t *)src + 128; > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 0 * 32), ymm0); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 1 * 32), ymm1); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 2 * 32), ymm2); > + _mm256_storeu_si256((__m256i *) > + ((uint8_t *)dst + 3 * 32), ymm3); > + dst = (uint8_t *)dst + 128; > + } > + bits = n; > + n = n & 127; > + bits -= n; > + src = (const uint8_t *)src + bits; > + dst = (uint8_t *)dst + bits; > + > + /** > + * Copy whatever left > + */ > + goto COPY_BLOCK_128_BACK31; > + } > +} > +#endif > + > +/** > + * SSE & AVX implementation below > + */ > +static inline void * > +rte_memcpy_DEFAULT(void *dst, const void *src, size_t n) > +{ > + if (!(((uintptr_t)dst | (uintptr_t)src) & 0x0F)) { > + void *ret = dst; > + > + /* Copy size <= 16 bytes */ > + if (n < 16) { > + if (n & 0x01) { > + *(uint8_t *)dst = *(const uint8_t *)src; > + src = (const uint8_t *)src + 1; > + dst = (uint8_t *)dst + 1; > + } > + if (n & 0x02) { > + *(uint16_t *)dst = *(const uint16_t *)src; > + src = (const uint16_t *)src + 1; > + dst = (uint16_t *)dst + 1; > + } > + if (n & 0x04) { > + *(uint32_t *)dst = *(const uint32_t *)src; > + src = (const uint32_t *)src + 1; > + dst = (uint32_t *)dst + 1; > + } > + if (n & 0x08) > + *(uint64_t *)dst = *(const uint64_t *)src; > + > + return ret; > + } > + > + /* Copy 16 <= size <= 32 bytes */ > + if (n <= 32) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + > + return ret; > + } > + > + /* Copy 32 < size <= 64 bytes */ > + if (n <= 64) { > + __m128i xmm0, xmm1, xmm2, xmm3; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + xmm2 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 32 + n)); > + xmm3 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 32 + n), xmm2); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm3); > + > + return ret; > + } > + > + /* Copy 64 bytes blocks */ > + for (; n >= 64; n -= 64) { > + __m128i xmm0, xmm1, xmm2, xmm3; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + xmm2 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 2*16)); > + xmm3 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 3*16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 2*16), xmm2); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 3*16), xmm3); > + dst = (uint8_t *)dst + 64; > + src = (const uint8_t *)src + 64; > + } > + > + /* Copy whatever left */ > + __m128i xmm0, xmm1, xmm2, xmm3; > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 64 + n)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 48 + n)); > + xmm2 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 32 + n)); > + xmm3 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)((uint8_t *)dst - 64 + n), xmm0); > + _mm_storeu_si128((__m128i *)((uint8_t *)dst - 48 + n), xmm1); > + _mm_storeu_si128((__m128i *)((uint8_t *)dst - 32 + n), xmm2); > + _mm_storeu_si128((__m128i *)((uint8_t *)dst - 16 + n), xmm3); > + > + return ret; > + } else { > + __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8; > + uintptr_t dstu = (uintptr_t)dst; > + uintptr_t srcu = (uintptr_t)src; > + void *ret = dst; > + size_t dstofss; > + size_t srcofs; > + > + /** > + * Copy less than 16 bytes > + */ > + if (n < 16) { > + if (n & 0x01) { > + *(uint8_t *)dstu = *(const uint8_t *)srcu; > + srcu = (uintptr_t)((const uint8_t *)srcu + 1); > + dstu = (uintptr_t)((uint8_t *)dstu + 1); > + } > + if (n & 0x02) { > + *(uint16_t *)dstu = *(const uint16_t *)srcu; > + srcu = (uintptr_t)((const uint16_t *)srcu + 1); > + dstu = (uintptr_t)((uint16_t *)dstu + 1); > + } > + if (n & 0x04) { > + *(uint32_t *)dstu = *(const uint32_t *)srcu; > + srcu = (uintptr_t)((const uint32_t *)srcu + 1); > + dstu = (uintptr_t)((uint32_t *)dstu + 1); > + } > + if (n & 0x08) > + *(uint64_t *)dstu = *(const uint64_t *)srcu; > + return ret; > + } > + > + /** > + * Fast way when copy size doesn't exceed 512 bytes > + */ > + if (n <= 32) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + return ret; > + } > + if (n <= 48) { > + __m128i xmm0, xmm1, xmm2; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + xmm2 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm2); > + return ret; > + } > + if (n <= 64) { > + __m128i xmm0, xmm1, xmm2, xmm3; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + xmm2 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 32)); > + xmm3 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 32), xmm2); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm3); > + return ret; > + } > + if (n <= 128) > + goto COPY_BLOCK_128_BACK15; > + if (n <= 512) { > + if (n >= 256) { > + n -= 256; > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 2*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 3*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 2*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 3*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 4*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 5*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 4*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 5*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 6*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 7*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 6*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 7*16), xmm1); > + > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 2*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 3*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 2*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 3*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 4*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 5*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 4*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 5*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 6*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 128 + 7*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 6*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 128 + 7*16), xmm1); > + src = (const uint8_t *)src + 256; > + dst = (uint8_t *)dst + 256; > + } > +COPY_BLOCK_255_BACK15: > + if (n >= 128) { > + n -= 128; > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 2*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 3*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 2*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 3*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 4*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 5*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 4*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 5*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 6*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 7*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 6*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 7*16), xmm1); > + src = (const uint8_t *)src + 128; > + dst = (uint8_t *)dst + 128; > + } > +COPY_BLOCK_128_BACK15: > + if (n >= 64) { > + n -= 64; > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 2*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 3*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 2*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 3*16), xmm1); > + src = (const uint8_t *)src + 64; > + dst = (uint8_t *)dst + 64; > + } > +COPY_BLOCK_64_BACK15: > + if (n >= 32) { > + n -= 32; > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + src = (const uint8_t *)src + 32; > + dst = (uint8_t *)dst + 32; > + } > + if (n > 16) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm1); > + return ret; > + } > + if (n > 0) { > + __m128i xmm0; > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src - 16 + n)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst - 16 + n), xmm0); > + } > + return ret; > + } > + > + /** > + * Make store aligned when copy size exceeds 512 bytes, > + * and make sure the first 15 bytes are copied, because > + * unaligned copy functions require up to 15 bytes > + * backwards access. > + */ > + dstofss = (uintptr_t)dst & 0x0F; > + if (dstofss > 0) { > + dstofss = 16 - dstofss + 16; > + n -= dstofss; > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + src = (const uint8_t *)src + dstofss; > + dst = (uint8_t *)dst + dstofss; > + } > + srcofs = ((uintptr_t)src & 0x0F); > + > + /** > + * For aligned copy > + */ > + if (srcofs == 0) { > + /** > + * Copy 256-byte blocks > + */ > + for (; n >= 256; n -= 256) { > + __m128i xmm0, xmm1; > + xmm0 = _mm_loadu_si128((const __m128i *)src); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 16)); > + _mm_storeu_si128((__m128i *)dst, xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 2*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 3*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 2*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 3*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 4*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 5*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 4*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 5*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 6*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 7*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 6*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 7*16), xmm1); > + > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 8*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 9*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 8*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 9*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 10*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 11*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 10*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 11*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 12*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 13*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 12*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 13*16), xmm1); > + xmm0 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 14*16)); > + xmm1 = _mm_loadu_si128((const __m128i *) > + ((const uint8_t *)src + 15*16)); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 14*16), xmm0); > + _mm_storeu_si128((__m128i *) > + ((uint8_t *)dst + 15*16), xmm1); > + dst = (uint8_t *)dst + 256; > + src = (const uint8_t *)src + 256; > + } > + > + /** > + * Copy whatever left > + */ > + goto COPY_BLOCK_255_BACK15; > + } > + > + /** > + * For copy with unaligned load > + */ > + MOVEUNALIGNED_LEFT47(dst, src, n, srcofs); > + > + /** > + * Copy whatever left > + */ > + goto COPY_BLOCK_64_BACK15; > + } > +} > + > +static void __attribute__((constructor))
That means that each file with '#include <re_memcpy.h> will have its own copy of that function: $ objdump -d x86_64-native-linuxapp-gcc/app/testpmd | grep '<rte_memcpy_init>:' | sort -u | wc -l 233 Same story for rte_memcpy_ptr and rte_memcpy_DEFAULT, etc... Obviously we need (and want) only one copy of that stuff per binary. > +rte_memcpy_init(void) > +{ > +#ifdef CC_SUPPORT_AVX512 > + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) { > + rte_memcpy_ptr = rte_memcpy_AVX512F; > + RTE_LOG(DEBUG, EAL, "AVX512 is using!\n"); > + } else > +#endif > +#ifdef CC_SUPPORT_AVX2 Why do you assume this macro will be defined? By whom? There is no such macro with gcc: $ gcc -march=native -dM -E - </dev/null 2>&1 | grep AVX2 #define __AVX2__ 1 , and you don't define it yourself. When building with '-march=native' on BDW only rte_memcpy_DEFAULT get compiled. To summarize: as I understand the goal of that patch was (assuming that our current rte_memcpy() implementation is good in terms of both performance and functionality): 1. Based on current rte_memcpy() implementation define 3 x86 arch specific rte_memcpy flavors: a) rte_memcpy_SSE b) rte_memcpy_AVX2 c) rte_memcpy_AVX512 2. Select appropriate flavor based on current HW at runtime, i.e. both 3 flavors should be present in the binary and selection should be made at program startup. As I can see none of the goals was achieved with the current patch, instead a lot of redundant code was introduced. So I think it is NACK for the current version. What I think need to be done instead: 1. mv lib/librte_eal/common/include/arch/x86/rte_memcpy.h lib/librte_eal/common/include/arch/x86/rte_memcpy_internal.h 2. inside rte_memcpy_internal.h rename rte_memcpy() into rte_memcpy_internal(). 3. create 3 files: rte_memcpy_sse.c rte_memcpy_avx2.c rte_memcpy_avx512.c Inside each of these files we define corresponding rte_memcpy_xxx() function. I.E: rte_memcpy_avx2.c: .... #ifndef RTE_MACHINE_CPUFLAG_AVX2 #error "no avx2 support" endif #include "rte_memcpy_internal.h" ... void * rte_memcpy_avx2(void *dst, const void *src, size_t n) { return rte_memcpy_internal(dst, src, n); } 4. Make changes inside lib/librte_eal/Makefile to ensure that each of rte_memcpy_xxx() get build with appropriate -march flags (I.E: avx2 with -mavx2, etc.) You can use librte_acl/Makefile as a reference. 5. Create rte_memcpy.c and put rte_memcpy_ptr/rte_memcpy_init() definitions in that file. 6. Create new rte_memcpy.h and define rte_memcpy() in it: ... #include <rte_memcpy_internal.h> ... +#define RTE_X86_MEMCPY_THRESH 128 static inline void * rte_memcpy(void *dst, const void *src, size_t n) { if (n <= RTE_X86_MEMCPY_THRESH) return rte_memcpy_internal(dst, src, n); else return (*rte_memcpy_ptr)(dst, src, n); } 7. Test it properly - i.e. build dpdk with default target and make sure each of 3 flavors could be selected properly at runtime based on underlying arch. 8. As a possible future improvement - with such changes we don't need a generic inline implementation. We can think about creating a faster version that need to copy <= 128B. Konstantin > + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) { > + rte_memcpy_ptr = rte_memcpy_AVX2; > + RTE_LOG(DEBUG, EAL, "AVX2 is using!\n"); > + } else > +#endif > + { > + rte_memcpy_ptr = rte_memcpy_DEFAULT; > + RTE_LOG(DEBUG, EAL, "Default SSE/AVX is using!\n"); > + } > +} > + > +#define MEMCPY_THRESH 128 > static inline void * > rte_memcpy(void *dst, const void *src, size_t n) > { > - if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK)) > - return rte_memcpy_aligned(dst, src, n); > + if (n <= MEMCPY_THRESH) { > + if (!(((uintptr_t)dst | (uintptr_t)src) & ALIGNMENT_MASK)) > + return rte_memcpy_aligned(dst, src, n); > + else > + return rte_memcpy_generic(dst, src, n); > + } > else > - return rte_memcpy_generic(dst, src, n); > + return (*rte_memcpy_ptr)(dst, src, n); > } > > #ifdef __cplusplus > -- > 2.7.4