From: Benjamin Boren <ben.bo...@intel.com> Signed-off-by: Benjamin Boren <Ben.Boren at intel.com> Signed-off-by: David Hunt <david.hunt at intel.com> --- .../common/include/arch/arm64/rte_vect.h | 102 +++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 lib/librte_eal/common/include/arch/arm64/rte_vect.h
diff --git a/lib/librte_eal/common/include/arch/arm64/rte_vect.h b/lib/librte_eal/common/include/arch/arm64/rte_vect.h new file mode 100644 index 0000000..ceae710 --- /dev/null +++ b/lib/librte_eal/common/include/arch/arm64/rte_vect.h @@ -0,0 +1,102 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2015 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_VECT_ARM64_H_ +#define _RTE_VECT_ARM64_H_ + +/** + * @file + * + * RTE SSE/AVX related header. + */ + + +#include <arm_neon.h> + +#ifdef __cplusplus +extern "C" { +#endif + +typedef float32x4_t __m128; + +typedef int32x4_t __m128i; + +typedef __m128i xmm_t; + +#define XMM_SIZE (sizeof(xmm_t)) +#define XMM_MASK (XMM_SIZE - 1) + +typedef union rte_xmm { + xmm_t x; + uint8_t u8[XMM_SIZE / sizeof(uint8_t)]; + uint16_t u16[XMM_SIZE / sizeof(uint16_t)]; + uint32_t u32[XMM_SIZE / sizeof(uint32_t)]; + uint64_t u64[XMM_SIZE / sizeof(uint64_t)]; + double pd[XMM_SIZE / sizeof(double)]; +} rte_xmm_t __aligned(16); + +#define _mm_srli_epi32(a, imm) { (__m128i)vshrq_n_u32((uint32x4_t)a, imm) } + +#define _mm_srli_si128(a, imm) { (__m128i)vextq_s8((int8x16_t)a, \ + vdupq_n_s8(0), (imm)) } + +static inline __m128i +_mm_set_epi32(int i3, int i2, int i1, int i0); +static inline int +_mm_cvtsi128_si64(__m128i a); + +static inline __m128i +_mm_set_epi32(int i3, int i2, int i1, int i0) +{ + int32_t __aligned(16) data[4] = { i0, i1, i2, i3 }; + return vld1q_s32(data); +} + +static inline int +_mm_cvtsi128_si64(__m128i a) +{ + return vgetq_lane_s64(a, 0); +} + +static inline __m128i +_mm_and_si128(__m128i a, __m128i b) +{ + return (__m128i)vandq_s32(a, b); +} + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_VECT_ARM64_H_*/ + -- 2.1.4