On Mon, Aug 29, 2022 at 11:25:50AM +0700, John Naylor wrote:
> + uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
> + uint32 nelem_per_iteration = 4 * nelem_per_vector;
> 
> Using local #defines would be my style. I don't have a reason to
> object to this way, but adding const makes these vars more clear.

I added const.

> Speaking of const:
> 
> - const __m128i tmp1 = _mm_or_si128(result1, result2);
> - const __m128i tmp2 = _mm_or_si128(result3, result4);
> - const __m128i result = _mm_or_si128(tmp1, tmp2);
> + tmp1 = vector32_or(result1, result2);
> + tmp2 = vector32_or(result3, result4);
> + result = vector32_or(tmp1, tmp2);
> 
> Any reason to throw away the const declarations?

The only reason is because I had to move the declarations to before the
vector32_load() calls.

> +static inline bool
> +vector32_is_highbit_set(const Vector32 v)
> +{
> +#ifdef USE_SSE2
> + return (_mm_movemask_epi8(v) & 0x8888) != 0;
> +#endif
> +}
> 
> I'm not sure why we need this function -- AFAICS it just adds more
> work on x86 for zero benefit. For our present application, can we just
> cast to Vector8 (for Arm's sake) and call the 8-bit version?

Good idea.

> - * operations using bitwise operations on unsigned integers.
> + * operations using bitwise operations on unsigned integers.  Note that many
> + * of the functions in this file presently do not have non-SIMD
> + * implementations.
> 
> It's unclear to the reader whether this is a matter of 'round-to-it's.
> I'd like to document what I asserted in this thread, that it's likely
> not worthwhile to do anything with a uint64 representing two 32-bit
> ints. (It *is* demonstrably worth it for handling 8 byte-values at a
> time)

Done.

>   * Use saturating subtraction to find bytes <= c, which will present as
> - * NUL bytes in 'sub'.
> + * NUL bytes.
> 
> I'd like to to point out that the reason to do it this way is to
> workaround SIMD architectures frequent lack of unsigned comparison.

Done.

> + * Return the result of subtracting the respective elements of the input
> + * vectors using saturation.
> 
> I wonder if we should explain briefly what saturating arithmetic is. I
> had never encountered it outside of a SIMD programming context.

Done.

-- 
Nathan Bossart
Amazon Web Services: https://aws.amazon.com
>From 567c5309f3caa87c8cd7fe2de62309eea429d8c5 Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandboss...@gmail.com>
Date: Thu, 25 Aug 2022 22:18:30 -0700
Subject: [PATCH v6 1/2] abstract architecture-specific implementation details
 from pg_lfind32()

---
 src/include/port/pg_lfind.h | 55 ++++++++++++-----------
 src/include/port/simd.h     | 88 +++++++++++++++++++++++++++++++------
 2 files changed, 104 insertions(+), 39 deletions(-)

diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h
index a4e13dffec..1d9be4eb36 100644
--- a/src/include/port/pg_lfind.h
+++ b/src/include/port/pg_lfind.h
@@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 {
 	uint32		i = 0;
 
-#ifdef USE_SSE2
+#ifndef USE_NO_SIMD
 
 	/*
-	 * A 16-byte register only has four 4-byte lanes. For better
-	 * instruction-level parallelism, each loop iteration operates on a block
-	 * of four registers. Testing has showed this is ~40% faster than using a
-	 * block of two registers.
+	 * For better instruction-level parallelism, each loop iteration operates
+	 * on a block of four registers.  Testing for SSE2 has showed this is ~40%
+	 * faster than using a block of two registers.
 	 */
-	const		__m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */
-	uint32		iterations = nelem & ~0xF;	/* round down to multiple of 16 */
+	const Vector32 keys = vector32_broadcast(key);	/* load copies of key */
+	const uint32 nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
+	const uint32 nelem_per_iteration = 4 * nelem_per_vector;
+
+	/* round down to multiple of elements per iteration */
+	const uint32 tail_idx = nelem & ~(nelem_per_iteration - 1);
 
 #if defined(USE_ASSERT_CHECKING)
 	bool		assert_result = false;
@@ -116,31 +119,33 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 	}
 #endif
 
-	for (i = 0; i < iterations; i += 16)
+	for (i = 0; i < tail_idx; i += nelem_per_iteration)
 	{
-		/* load the next block into 4 registers holding 4 values each */
-		const		__m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]);
-		const		__m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]);
-		const		__m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]);
-		const		__m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]);
+		Vector32	vals1, vals2, vals3, vals4,
+					result1, result2, result3, result4,
+					tmp1, tmp2, result;
+
+		/* load the next block into 4 registers */
+		vector32_load(&vals1, &base[i]);
+		vector32_load(&vals2, &base[i + nelem_per_vector]);
+		vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
+		vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
 
 		/* compare each value to the key */
-		const		__m128i result1 = _mm_cmpeq_epi32(keys, vals1);
-		const		__m128i result2 = _mm_cmpeq_epi32(keys, vals2);
-		const		__m128i result3 = _mm_cmpeq_epi32(keys, vals3);
-		const		__m128i result4 = _mm_cmpeq_epi32(keys, vals4);
+		result1 = vector32_eq(keys, vals1);
+		result2 = vector32_eq(keys, vals2);
+		result3 = vector32_eq(keys, vals3);
+		result4 = vector32_eq(keys, vals4);
 
 		/* combine the results into a single variable */
-		const		__m128i tmp1 = _mm_or_si128(result1, result2);
-		const		__m128i tmp2 = _mm_or_si128(result3, result4);
-		const		__m128i result = _mm_or_si128(tmp1, tmp2);
+		tmp1 = vector32_or(result1, result2);
+		tmp2 = vector32_or(result3, result4);
+		result = vector32_or(tmp1, tmp2);
 
 		/* see if there was a match */
-		if (_mm_movemask_epi8(result) != 0)
+		if (vector8_is_highbit_set((Vector8) result))
 		{
-#if defined(USE_ASSERT_CHECKING)
 			Assert(assert_result == true);
-#endif
 			return true;
 		}
 	}
@@ -151,14 +156,14 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 	{
 		if (key == base[i])
 		{
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
 			Assert(assert_result == true);
 #endif
 			return true;
 		}
 	}
 
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
 	Assert(assert_result == false);
 #endif
 	return false;
diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index a425cd887b..709408d8c6 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -31,22 +31,32 @@
 #include <emmintrin.h>
 #define USE_SSE2
 typedef __m128i Vector8;
+typedef __m128i Vector32;
 
 #else
 /*
  * If no SIMD instructions are available, we can in some cases emulate vector
- * operations using bitwise operations on unsigned integers.
+ * operations using bitwise operations on unsigned integers.  Note that many
+ * of the functions in this file presently do not have non-SIMD
+ * implementations.  In particular, none of the functions involving Vector32
+ * are implemented without SIMD since it's likely not worthwhile to do anything
+ * with a uint64 representing two 32-bit integers.
  */
 #define USE_NO_SIMD
 typedef uint64 Vector8;
 #endif
 
-
 /* load/store operations */
 static inline void vector8_load(Vector8 *v, const uint8 *s);
+#ifndef USE_NO_SIMD
+static inline void vector32_load(Vector32 *v, const uint32 *s);
+#endif
 
 /* assignment operations */
 static inline Vector8 vector8_broadcast(const uint8 c);
+#ifndef USE_NO_SIMD
+static inline Vector32 vector32_broadcast(const uint32 c);
+#endif
 
 /* element-wise comparisons to a scalar */
 static inline bool vector8_has(const Vector8 v, const uint8 c);
@@ -56,14 +66,16 @@ static inline bool vector8_is_highbit_set(const Vector8 v);
 
 /* arithmetic operations */
 static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
-
-/* Different semantics for SIMD architectures. */
 #ifndef USE_NO_SIMD
+static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
+static inline Vector8 vector8_ssub(const Vector8 v1, const Vector8 v2);
+#endif
 
 /* comparisons between vectors */
+#ifndef USE_NO_SIMD
 static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
-
-#endif							/* ! USE_NO_SIMD */
+static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
+#endif
 
 /*
  * Load a chunk of memory into the given vector.
@@ -78,6 +90,15 @@ vector8_load(Vector8 *v, const uint8 *s)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline void
+vector32_load(Vector32 *v, const uint32 *s)
+{
+#ifdef USE_SSE2
+	*v = _mm_loadu_si128((const __m128i *) s);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
 
 /*
  * Create a vector with all elements set to the same value.
@@ -92,6 +113,16 @@ vector8_broadcast(const uint8 c)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_broadcast(const uint32 c)
+{
+#ifdef USE_SSE2
+	return _mm_set1_epi32(c);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
+
 /*
  * Return true if any elements in the vector are equal to the given scalar.
  */
@@ -118,7 +149,7 @@ vector8_has(const Vector8 v, const uint8 c)
 	/* any bytes in v equal to c will evaluate to zero via XOR */
 	result = vector8_has_zero(v ^ vector8_broadcast(c));
 #elif defined(USE_SSE2)
-	result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c)));
+	result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
 #endif
 
 	Assert(assert_result == result);
@@ -150,9 +181,6 @@ static inline bool
 vector8_has_le(const Vector8 v, const uint8 c)
 {
 	bool		result = false;
-#if defined(USE_SSE2)
-	__m128i		sub;
-#endif
 
 	/* pre-compute the result for assert checking */
 #ifdef USE_ASSERT_CHECKING
@@ -194,10 +222,10 @@ vector8_has_le(const Vector8 v, const uint8 c)
 
 	/*
 	 * Use saturating subtraction to find bytes <= c, which will present as
-	 * NUL bytes in 'sub'.
+	 * NUL bytes.  This approach is a workaround for the lack of unsigned
+	 * comparison instructions on some architectures.
 	 */
-	sub = _mm_subs_epu8(v, vector8_broadcast(c));
-	result = vector8_has_zero(sub);
+	result = vector8_has_zero(vector8_ssub(v, vector8_broadcast(c)));
 #endif
 
 	Assert(assert_result == result);
@@ -230,14 +258,37 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_or(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+	return _mm_or_si128(v1, v2);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
 
-/* Different semantics for SIMD architectures. */
+/*
+ * Return the result of subtracting the respective elements of the input
+ * vectors using saturation (i.e., if the operation would yield a value less
+ * than zero, zero is returned instead).  For more information on saturation
+ * arithmetic, see https://en.wikipedia.org/wiki/Saturation_arithmetic.
+ */
 #ifndef USE_NO_SIMD
+static inline Vector8
+vector8_ssub(const Vector8 v1, const Vector8 v2)
+{
+#ifdef USE_SSE2
+	return _mm_subs_epu8(v1, v2);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
 
 /*
  * Return a vector with all bits set in each lane where the the corresponding
  * lanes in the inputs are equal.
  */
+#ifndef USE_NO_SIMD
 static inline Vector8
 vector8_eq(const Vector8 v1, const Vector8 v2)
 {
@@ -245,7 +296,16 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 	return _mm_cmpeq_epi8(v1, v2);
 #endif
 }
+#endif							/* ! USE_NO_SIMD */
 
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_eq(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+	return _mm_cmpeq_epi32(v1, v2);
+#endif
+}
 #endif							/* ! USE_NO_SIMD */
 
 #endif							/* SIMD_H */
-- 
2.32.1 (Apple Git-133)

>From 080a0223ab3c869de80f11272ffb25e940d26eff Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandboss...@gmail.com>
Date: Fri, 26 Aug 2022 10:47:35 -0700
Subject: [PATCH v6 2/2] use ARM Advanced SIMD intrinsic functions where
 available

---
 src/include/port/simd.h | 40 +++++++++++++++++++++++++++++++++++++---
 1 file changed, 37 insertions(+), 3 deletions(-)

diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index 709408d8c6..aeba240e2e 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -33,6 +33,20 @@
 typedef __m128i Vector8;
 typedef __m128i Vector32;
 
+#elif defined(__aarch64__) && defined(__ARM_NEON)
+/*
+ * We use the Neon instructions if the compiler provides access to them (as
+ * indicated by __ARM_NEON) and we are on aarch64.  While Neon support is
+ * technically optional for aarch64, it appears that all available 64-bit
+ * hardware does have it.  Neon exists in some 32-bit hardware too, but we
+ * could not realistically use it there without a run-time check, which seems
+ * not worth the trouble for now.
+ */
+#include <arm_neon.h>
+#define USE_NEON
+typedef uint8x16_t Vector8;
+typedef uint32x4_t Vector32;
+
 #else
 /*
  * If no SIMD instructions are available, we can in some cases emulate vector
@@ -85,6 +99,8 @@ vector8_load(Vector8 *v, const uint8 *s)
 {
 #if defined(USE_SSE2)
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif defined(USE_NEON)
+	*v = vld1q_u8(s);
 #else
 	memcpy(v, s, sizeof(Vector8));
 #endif
@@ -96,6 +112,8 @@ vector32_load(Vector32 *v, const uint32 *s)
 {
 #ifdef USE_SSE2
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif defined(USE_NEON)
+	*v = vld1q_u32(s);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -108,6 +126,8 @@ vector8_broadcast(const uint8 c)
 {
 #if defined(USE_SSE2)
 	return _mm_set1_epi8(c);
+#elif defined(USE_NEON)
+	return vdupq_n_u8(c);
 #else
 	return ~UINT64CONST(0) / 0xFF * c;
 #endif
@@ -119,6 +139,8 @@ vector32_broadcast(const uint32 c)
 {
 #ifdef USE_SSE2
 	return _mm_set1_epi32(c);
+#elif defined(USE_NEON)
+	return vdupq_n_u32(c);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -148,7 +170,7 @@ vector8_has(const Vector8 v, const uint8 c)
 #if defined(USE_NO_SIMD)
 	/* any bytes in v equal to c will evaluate to zero via XOR */
 	result = vector8_has_zero(v ^ vector8_broadcast(c));
-#elif defined(USE_SSE2)
+#else
 	result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
 #endif
 
@@ -168,7 +190,7 @@ vector8_has_zero(const Vector8 v)
 	 * definition.
 	 */
 	return vector8_has_le(v, 0);
-#elif defined(USE_SSE2)
+#else
 	return vector8_has(v, 0);
 #endif
 }
@@ -218,7 +240,7 @@ vector8_has_le(const Vector8 v, const uint8 c)
 			}
 		}
 	}
-#elif defined(USE_SSE2)
+#else
 
 	/*
 	 * Use saturating subtraction to find bytes <= c, which will present as
@@ -240,6 +262,8 @@ vector8_is_highbit_set(const Vector8 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(v) != 0;
+#elif defined(USE_NEON)
+	return vmaxvq_u8(v) > 0x7F;
 #else
 	return v & vector8_broadcast(0x80);
 #endif
@@ -253,6 +277,8 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif defined(USE_NEON)
+	return vorrq_u8(v1, v2);
 #else
 	return v1 | v2;
 #endif
@@ -264,6 +290,8 @@ vector32_or(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif defined(USE_NEON)
+	return vorrq_u32(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -280,6 +308,8 @@ vector8_ssub(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_subs_epu8(v1, v2);
+#elif defined(USE_NEON)
+	return vqsubq_u8(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -294,6 +324,8 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi8(v1, v2);
+#elif defined(USE_NEON)
+	return vceqq_u8(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -304,6 +336,8 @@ vector32_eq(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi32(v1, v2);
+#elif defined(USE_NEON)
+	return vceqq_u32(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
-- 
2.32.1 (Apple Git-133)

Reply via email to