Here is a new patch set that applies on top of v9-0001 in the json_lex_string patch set [0] and v3 of the is_valid_ascii patch [1].
[0] https://postgr.es/m/CAFBsxsFV4v802idV0-Bo%3DV7wLMHRbOZ4er0hgposhyGCikmVGA%40mail.gmail.com [1] https://postgr.es/m/CAFBsxsFFAZ6acUfyUALiem4DpCW%3DApXbF02zrc0G0oT9CPof0Q%40mail.gmail.com -- Nathan Bossart Amazon Web Services: https://aws.amazon.com
>From 5f973a39d67a744d514ee80e05a1c7f40bc0ebc6 Mon Sep 17 00:00:00 2001 From: Nathan Bossart <nathandboss...@gmail.com> Date: Thu, 25 Aug 2022 22:18:30 -0700 Subject: [PATCH v3 1/2] abstract architecture-specific implementation details from pg_lfind32() --- src/include/port/pg_lfind.h | 55 ++++++++++++++++++---------------- src/include/port/simd.h | 60 +++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+), 25 deletions(-) diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h index a4e13dffec..7a851ea42c 100644 --- a/src/include/port/pg_lfind.h +++ b/src/include/port/pg_lfind.h @@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem) { uint32 i = 0; -#ifdef USE_SSE2 +#ifndef USE_NO_SIMD /* - * A 16-byte register only has four 4-byte lanes. For better - * instruction-level parallelism, each loop iteration operates on a block - * of four registers. Testing has showed this is ~40% faster than using a - * block of two registers. + * For better instruction-level parallelism, each loop iteration operates + * on a block of four registers. Testing for SSE2 has showed this is ~40% + * faster than using a block of two registers. */ - const __m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */ - uint32 iterations = nelem & ~0xF; /* round down to multiple of 16 */ + const Vector32 keys = vector32_broadcast(key); /* load copies of key */ + uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32); + uint32 nelem_per_iteration = 4 * nelem_per_vector; + + /* round down to multiple of elements per iteration */ + uint32 tail_idx = nelem & ~(nelem_per_iteration - 1); #if defined(USE_ASSERT_CHECKING) bool assert_result = false; @@ -116,31 +119,33 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem) } #endif - for (i = 0; i < iterations; i += 16) + for (i = 0; i < tail_idx; i += nelem_per_iteration) { - /* load the next block into 4 registers holding 4 values each */ - const __m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]); - const __m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]); - const __m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]); - const __m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]); + Vector32 vals1, vals2, vals3, vals4, + result1, result2, result3, result4, + tmp1, tmp2, result; + + /* load the next block into 4 registers */ + vector32_load(&vals1, &base[i]); + vector32_load(&vals2, &base[i + nelem_per_vector]); + vector32_load(&vals3, &base[i + nelem_per_vector * 2]); + vector32_load(&vals4, &base[i + nelem_per_vector * 3]); /* compare each value to the key */ - const __m128i result1 = _mm_cmpeq_epi32(keys, vals1); - const __m128i result2 = _mm_cmpeq_epi32(keys, vals2); - const __m128i result3 = _mm_cmpeq_epi32(keys, vals3); - const __m128i result4 = _mm_cmpeq_epi32(keys, vals4); + result1 = vector32_eq(keys, vals1); + result2 = vector32_eq(keys, vals2); + result3 = vector32_eq(keys, vals3); + result4 = vector32_eq(keys, vals4); /* combine the results into a single variable */ - const __m128i tmp1 = _mm_or_si128(result1, result2); - const __m128i tmp2 = _mm_or_si128(result3, result4); - const __m128i result = _mm_or_si128(tmp1, tmp2); + tmp1 = vector32_or(result1, result2); + tmp2 = vector32_or(result3, result4); + result = vector32_or(tmp1, tmp2); /* see if there was a match */ - if (_mm_movemask_epi8(result) != 0) + if (vector32_any_lane_set(result)) { -#if defined(USE_ASSERT_CHECKING) Assert(assert_result == true); -#endif return true; } } @@ -151,14 +156,14 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem) { if (key == base[i]) { -#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING) +#ifndef USE_NO_SIMD Assert(assert_result == true); #endif return true; } } -#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING) +#ifndef USE_NO_SIMD Assert(assert_result == false); #endif return false; diff --git a/src/include/port/simd.h b/src/include/port/simd.h index 8f85153110..bd4f1a3f39 100644 --- a/src/include/port/simd.h +++ b/src/include/port/simd.h @@ -32,6 +32,7 @@ #include <emmintrin.h> #define USE_SSE2 typedef __m128i Vector8; +typedef __m128i Vector32; #else /* @@ -40,18 +41,24 @@ typedef __m128i Vector8; */ #define USE_NO_SIMD typedef uint64 Vector8; +typedef uint64 Vector32; #endif static inline void vector8_load(Vector8 *v, const uint8 *s); +static inline void vector32_load(Vector32 *v, const uint32 *s); static inline Vector8 vector8_broadcast(const uint8 c); +static inline Vector32 vector32_broadcast(const uint32 c); static inline bool vector8_has_zero(const Vector8 v); static inline bool vector8_has(const Vector8 v, const uint8 c); static inline bool vector8_has_le(const Vector8 v, const uint8 c); static inline bool vector8_is_highbit_set(const Vector8 v); static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2); +static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2); #ifndef USE_NO_SIMD static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2); +static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2); +static inline bool vector32_any_lane_set(const Vector32 v); #endif /* @@ -68,6 +75,16 @@ vector8_load(Vector8 *v, const uint8 *s) #endif } +static inline void +vector32_load(Vector32 *v, const uint32 *s) +{ +#ifdef USE_SSE2 + *v = _mm_loadu_si128((const __m128i *) s); +#else + memcpy(v, s, sizeof(Vector32)); +#endif +} + /* * Functions for creating a vector with all elements set to the same value. @@ -83,6 +100,16 @@ vector8_broadcast(const uint8 c) #endif } +static inline Vector32 +vector32_broadcast(const uint32 c) +{ +#ifdef USE_SSE2 + return _mm_set1_epi32(c); +#else + return ~UINT64CONST(0) / 0xFFFFFFFF * c; +#endif +} + /* * Functions for comparing vector elements to a given value. @@ -196,6 +223,21 @@ vector8_is_highbit_set(const Vector8 v) #endif } +/* + * vector32_any_lane_set() assumes that each lane in the given vector is set to + * either all ones or all zeroes. If this is not true, its behavior is + * undefined. + */ +#ifndef USE_NO_SIMD +static inline bool +vector32_any_lane_set(const Vector32 v) +{ +#ifdef USE_SSE2 + return _mm_movemask_epi8(v) != 0; +#endif +} +#endif + /* comparisons between vectors */ #ifndef USE_NO_SIMD @@ -206,6 +248,14 @@ vector8_eq(const Vector8 v1, const Vector8 v2) return _mm_cmpeq_epi8(v1, v2); #endif } + +static inline Vector32 +vector32_eq(const Vector32 v1, const Vector32 v2) +{ +#ifdef USE_SSE2 + return _mm_cmpeq_epi32(v1, v2); +#endif +} #endif /* bitwise operations */ @@ -220,4 +270,14 @@ vector8_or(const Vector8 v1, const Vector8 v2) #endif } +static inline Vector32 +vector32_or(const Vector32 v1, const Vector32 v2) +{ +#ifdef USE_SSE2 + return _mm_or_si128(v1, v2); +#else + return v1 | v2; +#endif +} + #endif /* SIMD_H */ -- 2.25.1
>From be7a5be891050330556f950731889d5c352ce58c Mon Sep 17 00:00:00 2001 From: Nathan Bossart <nathandboss...@gmail.com> Date: Thu, 25 Aug 2022 22:59:12 -0700 Subject: [PATCH v3 2/2] use ARM Advanced SIMD intrinsic functions where available --- src/include/port/simd.h | 46 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/src/include/port/simd.h b/src/include/port/simd.h index bd4f1a3f39..839c0f25db 100644 --- a/src/include/port/simd.h +++ b/src/include/port/simd.h @@ -34,6 +34,19 @@ typedef __m128i Vector8; typedef __m128i Vector32; +/* + * Include arm_neon.h if the compiler is targeting an architecture that + * supports ARM Advanced SIMD (Neon) intrinsics. While Neon support is + * technically optional for aarch64, we assume it's unlikely that anyone will + * run PostgreSQL on specialized hardware lacking this feature, and we assume + * that compilers targeting this architecture understand Neon intrinsics. + */ +#elif defined(__aarch64__) +#include <arm_neon.h> +#define USE_NEON +typedef uint8x16_t Vector8; +typedef uint32x4_t Vector32; + #else /* * If no SIMD instructions are available, we can in some cases emulate vector @@ -70,6 +83,8 @@ vector8_load(Vector8 *v, const uint8 *s) { #ifdef USE_SSE2 *v = _mm_loadu_si128((const __m128i *) s); +#elif USE_NEON + *v = vld1q_u8(s); #else memcpy(v, s, sizeof(Vector8)); #endif @@ -80,6 +95,8 @@ vector32_load(Vector32 *v, const uint32 *s) { #ifdef USE_SSE2 *v = _mm_loadu_si128((const __m128i *) s); +#elif USE_NEON + *v = vld1q_u32(s); #else memcpy(v, s, sizeof(Vector32)); #endif @@ -95,6 +112,8 @@ vector8_broadcast(const uint8 c) { #ifdef USE_SSE2 return _mm_set1_epi8(c); +#elif USE_NEON + return vdupq_n_u8(c); #else return ~UINT64CONST(0) / 0xFF * c; #endif @@ -105,6 +124,8 @@ vector32_broadcast(const uint32 c) { #ifdef USE_SSE2 return _mm_set1_epi32(c); +#elif USE_NEON + return vdupq_n_u32(c); #else return ~UINT64CONST(0) / 0xFFFFFFFF * c; #endif @@ -120,6 +141,8 @@ vector8_has_zero(const Vector8 v) { #ifdef USE_SSE2 return _mm_movemask_epi8(_mm_cmpeq_epi8(v, _mm_setzero_si128())); +#elif USE_NEON + return vmaxvq_u8(vceqzq_u8(v)) != 0; #else return vector8_has_le(v, 0); #endif @@ -146,6 +169,8 @@ vector8_has(const Vector8 v, const uint8 c) #ifdef USE_SSE2 result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c))); +#elif USE_NEON + result = vmaxvq_u8(vceqq_u8(v, vector8_broadcast(c))) != 0; #else /* any bytes in v equal to c will evaluate to zero via XOR */ result = vector8_has_zero(v ^ vector8_broadcast(c)); @@ -159,8 +184,8 @@ static inline bool vector8_has_le(const Vector8 v, const uint8 c) { bool result = false; -#ifdef USE_SSE2 - __m128i sub; +#ifndef USE_NO_SIMD + Vector8 sub; #endif /* pre-compute the result for assert checking */ @@ -185,6 +210,11 @@ vector8_has_le(const Vector8 v, const uint8 c) */ sub = _mm_subs_epu8(v, vector8_broadcast(c)); result = vector8_has_zero(sub); +#elif USE_NEON + + /* use the same approach as the USE_SSE2 block above */ + sub = vqsubq_u8(v, vector8_broadcast(c)); + result = vector8_has_zero(sub); #else /* @@ -218,6 +248,8 @@ vector8_is_highbit_set(const Vector8 v) { #ifdef USE_SSE2 return _mm_movemask_epi8(v) != 0; +#elif USE_NEON + return vmaxvq_u8(vandq_u8(v, vector8_broadcast(0x80))) != 0; #else return v & vector8_broadcast(0x80); #endif @@ -234,6 +266,8 @@ vector32_any_lane_set(const Vector32 v) { #ifdef USE_SSE2 return _mm_movemask_epi8(v) != 0; +#elif USE_NEON + return vmaxvq_u32(v) != 0; #endif } #endif @@ -246,6 +280,8 @@ vector8_eq(const Vector8 v1, const Vector8 v2) { #ifdef USE_SSE2 return _mm_cmpeq_epi8(v1, v2); +#elif USE_NEON + return vceqq_u8(v1, v2); #endif } @@ -254,6 +290,8 @@ vector32_eq(const Vector32 v1, const Vector32 v2) { #ifdef USE_SSE2 return _mm_cmpeq_epi32(v1, v2); +#elif USE_NEON + return vceqq_u32(v1, v2); #endif } #endif @@ -265,6 +303,8 @@ vector8_or(const Vector8 v1, const Vector8 v2) { #ifdef USE_SSE2 return _mm_or_si128(v1, v2); +#elif USE_NEON + return vorrq_u8(v1, v2); #else return v1 | v2; #endif @@ -275,6 +315,8 @@ vector32_or(const Vector32 v1, const Vector32 v2) { #ifdef USE_SSE2 return _mm_or_si128(v1, v2); +#elif USE_NEON + return vorrq_u32(v1, v2); #else return v1 | v2; #endif -- 2.25.1