Here is a new patch set in which I've attempted to address all feedback.

-- 
Nathan Bossart
Amazon Web Services: https://aws.amazon.com
>From a5f381097819db05b6e47418597cd56bab411fad Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandboss...@gmail.com>
Date: Thu, 25 Aug 2022 22:18:30 -0700
Subject: [PATCH v5 1/2] abstract architecture-specific implementation details
 from pg_lfind32()

---
 src/include/port/pg_lfind.h | 55 +++++++++++----------
 src/include/port/simd.h     | 96 +++++++++++++++++++++++++++++++------
 2 files changed, 112 insertions(+), 39 deletions(-)

diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h
index a4e13dffec..2f8413b59e 100644
--- a/src/include/port/pg_lfind.h
+++ b/src/include/port/pg_lfind.h
@@ -91,16 +91,19 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 {
 	uint32		i = 0;
 
-#ifdef USE_SSE2
+#ifndef USE_NO_SIMD
 
 	/*
-	 * A 16-byte register only has four 4-byte lanes. For better
-	 * instruction-level parallelism, each loop iteration operates on a block
-	 * of four registers. Testing has showed this is ~40% faster than using a
-	 * block of two registers.
+	 * For better instruction-level parallelism, each loop iteration operates
+	 * on a block of four registers.  Testing for SSE2 has showed this is ~40%
+	 * faster than using a block of two registers.
 	 */
-	const		__m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */
-	uint32		iterations = nelem & ~0xF;	/* round down to multiple of 16 */
+	const Vector32 keys = vector32_broadcast(key);	/* load copies of key */
+	uint32		nelem_per_vector = sizeof(Vector32) / sizeof(uint32);
+	uint32		nelem_per_iteration = 4 * nelem_per_vector;
+
+	/* round down to multiple of elements per iteration */
+	uint32		tail_idx = nelem & ~(nelem_per_iteration - 1);
 
 #if defined(USE_ASSERT_CHECKING)
 	bool		assert_result = false;
@@ -116,31 +119,33 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 	}
 #endif
 
-	for (i = 0; i < iterations; i += 16)
+	for (i = 0; i < tail_idx; i += nelem_per_iteration)
 	{
-		/* load the next block into 4 registers holding 4 values each */
-		const		__m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]);
-		const		__m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]);
-		const		__m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]);
-		const		__m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]);
+		Vector32	vals1, vals2, vals3, vals4,
+					result1, result2, result3, result4,
+					tmp1, tmp2, result;
+
+		/* load the next block into 4 registers */
+		vector32_load(&vals1, &base[i]);
+		vector32_load(&vals2, &base[i + nelem_per_vector]);
+		vector32_load(&vals3, &base[i + nelem_per_vector * 2]);
+		vector32_load(&vals4, &base[i + nelem_per_vector * 3]);
 
 		/* compare each value to the key */
-		const		__m128i result1 = _mm_cmpeq_epi32(keys, vals1);
-		const		__m128i result2 = _mm_cmpeq_epi32(keys, vals2);
-		const		__m128i result3 = _mm_cmpeq_epi32(keys, vals3);
-		const		__m128i result4 = _mm_cmpeq_epi32(keys, vals4);
+		result1 = vector32_eq(keys, vals1);
+		result2 = vector32_eq(keys, vals2);
+		result3 = vector32_eq(keys, vals3);
+		result4 = vector32_eq(keys, vals4);
 
 		/* combine the results into a single variable */
-		const		__m128i tmp1 = _mm_or_si128(result1, result2);
-		const		__m128i tmp2 = _mm_or_si128(result3, result4);
-		const		__m128i result = _mm_or_si128(tmp1, tmp2);
+		tmp1 = vector32_or(result1, result2);
+		tmp2 = vector32_or(result3, result4);
+		result = vector32_or(tmp1, tmp2);
 
 		/* see if there was a match */
-		if (_mm_movemask_epi8(result) != 0)
+		if (vector32_is_highbit_set(result))
 		{
-#if defined(USE_ASSERT_CHECKING)
 			Assert(assert_result == true);
-#endif
 			return true;
 		}
 	}
@@ -151,14 +156,14 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem)
 	{
 		if (key == base[i])
 		{
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
 			Assert(assert_result == true);
 #endif
 			return true;
 		}
 	}
 
-#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING)
+#ifndef USE_NO_SIMD
 	Assert(assert_result == false);
 #endif
 	return false;
diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index a425cd887b..58b5f5ed86 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -31,39 +31,52 @@
 #include <emmintrin.h>
 #define USE_SSE2
 typedef __m128i Vector8;
+typedef __m128i Vector32;
 
 #else
 /*
  * If no SIMD instructions are available, we can in some cases emulate vector
- * operations using bitwise operations on unsigned integers.
+ * operations using bitwise operations on unsigned integers.  Note that many
+ * of the functions in this file presently do not have non-SIMD
+ * implementations.
  */
 #define USE_NO_SIMD
 typedef uint64 Vector8;
 #endif
 
-
 /* load/store operations */
 static inline void vector8_load(Vector8 *v, const uint8 *s);
+#ifndef USE_NO_SIMD
+static inline void vector32_load(Vector32 *v, const uint32 *s);
+#endif
 
 /* assignment operations */
 static inline Vector8 vector8_broadcast(const uint8 c);
+#ifndef USE_NO_SIMD
+static inline Vector32 vector32_broadcast(const uint32 c);
+#endif
 
 /* element-wise comparisons to a scalar */
 static inline bool vector8_has(const Vector8 v, const uint8 c);
 static inline bool vector8_has_zero(const Vector8 v);
 static inline bool vector8_has_le(const Vector8 v, const uint8 c);
 static inline bool vector8_is_highbit_set(const Vector8 v);
+#ifndef USE_NO_SIMD
+static inline bool vector32_is_highbit_set(const Vector32 v);
+#endif
 
 /* arithmetic operations */
 static inline Vector8 vector8_or(const Vector8 v1, const Vector8 v2);
-
-/* Different semantics for SIMD architectures. */
 #ifndef USE_NO_SIMD
+static inline Vector32 vector32_or(const Vector32 v1, const Vector32 v2);
+static inline Vector8 vector8_ssub(const Vector8 v1, const Vector8 v2);
+#endif
 
 /* comparisons between vectors */
+#ifndef USE_NO_SIMD
 static inline Vector8 vector8_eq(const Vector8 v1, const Vector8 v2);
-
-#endif							/* ! USE_NO_SIMD */
+static inline Vector32 vector32_eq(const Vector32 v1, const Vector32 v2);
+#endif
 
 /*
  * Load a chunk of memory into the given vector.
@@ -78,6 +91,15 @@ vector8_load(Vector8 *v, const uint8 *s)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline void
+vector32_load(Vector32 *v, const uint32 *s)
+{
+#ifdef USE_SSE2
+	*v = _mm_loadu_si128((const __m128i *) s);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
 
 /*
  * Create a vector with all elements set to the same value.
@@ -92,6 +114,16 @@ vector8_broadcast(const uint8 c)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_broadcast(const uint32 c)
+{
+#ifdef USE_SSE2
+	return _mm_set1_epi32(c);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
+
 /*
  * Return true if any elements in the vector are equal to the given scalar.
  */
@@ -118,7 +150,7 @@ vector8_has(const Vector8 v, const uint8 c)
 	/* any bytes in v equal to c will evaluate to zero via XOR */
 	result = vector8_has_zero(v ^ vector8_broadcast(c));
 #elif defined(USE_SSE2)
-	result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c)));
+	result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
 #endif
 
 	Assert(assert_result == result);
@@ -150,9 +182,6 @@ static inline bool
 vector8_has_le(const Vector8 v, const uint8 c)
 {
 	bool		result = false;
-#if defined(USE_SSE2)
-	__m128i		sub;
-#endif
 
 	/* pre-compute the result for assert checking */
 #ifdef USE_ASSERT_CHECKING
@@ -194,10 +223,9 @@ vector8_has_le(const Vector8 v, const uint8 c)
 
 	/*
 	 * Use saturating subtraction to find bytes <= c, which will present as
-	 * NUL bytes in 'sub'.
+	 * NUL bytes.
 	 */
-	sub = _mm_subs_epu8(v, vector8_broadcast(c));
-	result = vector8_has_zero(sub);
+	result = vector8_has_zero(vector8_ssub(v, vector8_broadcast(c)));
 #endif
 
 	Assert(assert_result == result);
@@ -217,6 +245,16 @@ vector8_is_highbit_set(const Vector8 v)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline bool
+vector32_is_highbit_set(const Vector32 v)
+{
+#ifdef USE_SSE2
+	return (_mm_movemask_epi8(v) & 0x8888) != 0;
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
+
 /*
  * Return the bitwise OR of the inputs
  */
@@ -230,14 +268,35 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 #endif
 }
 
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_or(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+	return _mm_or_si128(v1, v2);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
 
-/* Different semantics for SIMD architectures. */
+/*
+ * Return the result of subtracting the respective elements of the input
+ * vectors using saturation.
+ */
 #ifndef USE_NO_SIMD
+static inline Vector8
+vector8_ssub(const Vector8 v1, const Vector8 v2)
+{
+#ifdef USE_SSE2
+	return _mm_subs_epu8(v1, v2);
+#endif
+}
+#endif							/* ! USE_NO_SIMD */
 
 /*
  * Return a vector with all bits set in each lane where the the corresponding
  * lanes in the inputs are equal.
  */
+#ifndef USE_NO_SIMD
 static inline Vector8
 vector8_eq(const Vector8 v1, const Vector8 v2)
 {
@@ -245,7 +304,16 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 	return _mm_cmpeq_epi8(v1, v2);
 #endif
 }
+#endif							/* ! USE_NO_SIMD */
 
+#ifndef USE_NO_SIMD
+static inline Vector32
+vector32_eq(const Vector32 v1, const Vector32 v2)
+{
+#ifdef USE_SSE2
+	return _mm_cmpeq_epi32(v1, v2);
+#endif
+}
 #endif							/* ! USE_NO_SIMD */
 
 #endif							/* SIMD_H */
-- 
2.25.1

>From ded777318b1e3508be7ab8d4e9d89ab9e77eeba6 Mon Sep 17 00:00:00 2001
From: Nathan Bossart <nathandboss...@gmail.com>
Date: Fri, 26 Aug 2022 10:47:35 -0700
Subject: [PATCH v5 2/2] use ARM Advanced SIMD intrinsic functions where
 available

---
 src/include/port/simd.h | 42 ++++++++++++++++++++++++++++++++++++++---
 1 file changed, 39 insertions(+), 3 deletions(-)

diff --git a/src/include/port/simd.h b/src/include/port/simd.h
index 58b5f5ed86..d7e41f5162 100644
--- a/src/include/port/simd.h
+++ b/src/include/port/simd.h
@@ -33,6 +33,20 @@
 typedef __m128i Vector8;
 typedef __m128i Vector32;
 
+#elif defined(__aarch64__) && defined(__ARM_NEON)
+/*
+ * We use the Neon instructions if the compiler provides access to them (as
+ * indicated by __ARM_NEON) and we are on aarch64.  While Neon support is
+ * technically optional for aarch64, it appears that all available 64-bit
+ * hardware does have it.  Neon exists in some 32-bit hardware too, but we
+ * could not realistically use it there without a run-time check, which seems
+ * not worth the trouble for now.
+ */
+#include <arm_neon.h>
+#define USE_NEON
+typedef uint8x16_t Vector8;
+typedef uint32x4_t Vector32;
+
 #else
 /*
  * If no SIMD instructions are available, we can in some cases emulate vector
@@ -86,6 +100,8 @@ vector8_load(Vector8 *v, const uint8 *s)
 {
 #if defined(USE_SSE2)
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif defined(USE_NEON)
+	*v = vld1q_u8(s);
 #else
 	memcpy(v, s, sizeof(Vector8));
 #endif
@@ -97,6 +113,8 @@ vector32_load(Vector32 *v, const uint32 *s)
 {
 #ifdef USE_SSE2
 	*v = _mm_loadu_si128((const __m128i *) s);
+#elif defined(USE_NEON)
+	*v = vld1q_u32(s);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -109,6 +127,8 @@ vector8_broadcast(const uint8 c)
 {
 #if defined(USE_SSE2)
 	return _mm_set1_epi8(c);
+#elif defined(USE_NEON)
+	return vdupq_n_u8(c);
 #else
 	return ~UINT64CONST(0) / 0xFF * c;
 #endif
@@ -120,6 +140,8 @@ vector32_broadcast(const uint32 c)
 {
 #ifdef USE_SSE2
 	return _mm_set1_epi32(c);
+#elif defined(USE_NEON)
+	return vdupq_n_u32(c);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -149,7 +171,7 @@ vector8_has(const Vector8 v, const uint8 c)
 #if defined(USE_NO_SIMD)
 	/* any bytes in v equal to c will evaluate to zero via XOR */
 	result = vector8_has_zero(v ^ vector8_broadcast(c));
-#elif defined(USE_SSE2)
+#else
 	result = vector8_is_highbit_set(vector8_eq(v, vector8_broadcast(c)));
 #endif
 
@@ -169,7 +191,7 @@ vector8_has_zero(const Vector8 v)
 	 * definition.
 	 */
 	return vector8_has_le(v, 0);
-#elif defined(USE_SSE2)
+#else
 	return vector8_has(v, 0);
 #endif
 }
@@ -219,7 +241,7 @@ vector8_has_le(const Vector8 v, const uint8 c)
 			}
 		}
 	}
-#elif defined(USE_SSE2)
+#else
 
 	/*
 	 * Use saturating subtraction to find bytes <= c, which will present as
@@ -240,6 +262,8 @@ vector8_is_highbit_set(const Vector8 v)
 {
 #ifdef USE_SSE2
 	return _mm_movemask_epi8(v) != 0;
+#elif defined(USE_NEON)
+	return vmaxvq_u8(v) > 0x7F;
 #else
 	return v & vector8_broadcast(0x80);
 #endif
@@ -251,6 +275,8 @@ vector32_is_highbit_set(const Vector32 v)
 {
 #ifdef USE_SSE2
 	return (_mm_movemask_epi8(v) & 0x8888) != 0;
+#elif defined(USE_NEON)
+	return vmaxvq_u32(v) > 0x7FFFFFFFU;
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -263,6 +289,8 @@ vector8_or(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif defined(USE_NEON)
+	return vorrq_u8(v1, v2);
 #else
 	return v1 | v2;
 #endif
@@ -274,6 +302,8 @@ vector32_or(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_or_si128(v1, v2);
+#elif defined(USE_NEON)
+	return vorrq_u32(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -288,6 +318,8 @@ vector8_ssub(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_subs_epu8(v1, v2);
+#elif defined(USE_NEON)
+	return vqsubq_u8(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -302,6 +334,8 @@ vector8_eq(const Vector8 v1, const Vector8 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi8(v1, v2);
+#elif defined(USE_NEON)
+	return vceqq_u8(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
@@ -312,6 +346,8 @@ vector32_eq(const Vector32 v1, const Vector32 v2)
 {
 #ifdef USE_SSE2
 	return _mm_cmpeq_epi32(v1, v2);
+#elif defined(USE_NEON)
+	return vceqq_u32(v1, v2);
 #endif
 }
 #endif							/* ! USE_NO_SIMD */
-- 
2.25.1

Reply via email to