On Fri, Aug 19, 2022 at 01:42:15PM -0700, Nathan Bossart wrote: > On Fri, Aug 19, 2022 at 03:11:36PM +0700, John Naylor wrote: >> This is done. Also: >> - a complete overhaul of the pg_lfind8* tests >> - using a typedef for the vector type >> - some refactoring, name changes and other cleanups (a few of these >> could also be applied to the 32-byte element path, but that is left >> for future work) >> >> TODO: json-specific tests of the new path > > This looks pretty good to me. Should we rename vector_broadcast() and > vector_has_zero() to indicate that they are working with bytes (e.g., > vector_broadcast_byte())? We might be able to use vector_broadcast_int() > in the 32-bit functions, and your other vector functions already have a > _byte suffix. > > In general, the approach you've taken seems like a decent readability > improvement. I'd be happy to try my hand at adjusting the 32-bit path and > adding ARM versions of all this stuff.
I spent some more time looking at this one, and I had a few ideas that I thought I'd share. 0001 is your v6 patch with a few additional changes, including simplying the assertions for readability, splitting out the Vector type into Vector8 and Vector32 (needed for ARM), and adjusting pg_lfind32() to use the new tools in simd.h. 0002 adds ARM versions of everything, which obsoletes the other thread I started [0]. This is still a little rough around the edges (e.g., this should probably be more than 2 patches), but I think it helps demonstrate a more comprehensive design than what I've proposed in the pg_lfind32-for-ARM thread [0]. Apologies if I'm stepping on your toes a bit here. [0] https://postgr.es/m/20220819200829.GA395728%40nathanxps13 -- Nathan Bossart Amazon Web Services: https://aws.amazon.com
>From 7dd35c8ffe8e42885586fb16a77b6c3e792c6a6d Mon Sep 17 00:00:00 2001 From: Nathan Bossart <nathandboss...@gmail.com> Date: Sat, 20 Aug 2022 21:14:01 -0700 Subject: [PATCH 1/2] json_lex_string() SIMD --- src/common/jsonapi.c | 11 +- src/include/port/pg_lfind.h | 132 ++++++---- src/include/port/simd.h | 227 ++++++++++++++++++ .../test_lfind/expected/test_lfind.out | 18 +- .../modules/test_lfind/sql/test_lfind.sql | 4 +- .../modules/test_lfind/test_lfind--1.0.sql | 10 +- src/test/modules/test_lfind/test_lfind.c | 91 ++++++- 7 files changed, 443 insertions(+), 50 deletions(-) diff --git a/src/common/jsonapi.c b/src/common/jsonapi.c index fefd1d24d9..87e1d0b192 100644 --- a/src/common/jsonapi.c +++ b/src/common/jsonapi.c @@ -19,6 +19,7 @@ #include "common/jsonapi.h" #include "mb/pg_wchar.h" +#include "port/pg_lfind.h" #ifndef FRONTEND #include "miscadmin.h" @@ -844,7 +845,7 @@ json_lex_string(JsonLexContext *lex) } else { - char *p; + char *p = s; if (hi_surrogate != -1) return JSON_UNICODE_LOW_SURROGATE; @@ -853,7 +854,13 @@ json_lex_string(JsonLexContext *lex) * Skip to the first byte that requires special handling, so we * can batch calls to appendBinaryStringInfo. */ - for (p = s; p < end; p++) + while (p < end - sizeof(Vector8) && + !pg_lfind8('\\', (uint8 *) p, sizeof(Vector8)) && + !pg_lfind8('"', (uint8 *) p, sizeof(Vector8)) && + !pg_lfind8_le(0x1F, (uint8 *) p, sizeof(Vector8))) + p += sizeof(Vector8); + + for (; p < end; p++) { if (*p == '\\' || *p == '"') break; diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h index fb125977b2..def858cbe1 100644 --- a/src/include/port/pg_lfind.h +++ b/src/include/port/pg_lfind.h @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * pg_lfind.h - * Optimized linear search routines. + * Optimized linear search routines using SIMD intrinsics where available * * Copyright (c) 2022, PostgreSQL Global Development Group * @@ -15,6 +15,68 @@ #include "port/simd.h" +/* + * pg_lfind8 + * + * Return true if there is an element in 'base' that equals 'key', otherwise + * return false. + */ +static inline bool +pg_lfind8(uint8 key, uint8 *base, uint32 nelem) +{ + uint32 i; + /* round down to multiple of vector length */ + uint32 tail_idx = nelem & ~(sizeof(Vector8) - 1); + Vector8 chunk; + + for (i = 0; i < tail_idx; i += sizeof(Vector8)) + { + vector8_load(&chunk, &base[i]); + if (vector8_eq(chunk, key)) + return true; + } + + /* Process the remaining elements one at a time. */ + for (; i < nelem; i++) + { + if (key == base[i]) + return true; + } + + return false; +} + +/* + * pg_lfind8_le + * + * Return true if there is an element in 'base' that is less than or equal to + * 'key', otherwise return false. + */ +static inline bool +pg_lfind8_le(uint8 key, uint8 *base, uint32 nelem) +{ + uint32 i; + /* round down to multiple of vector length */ + uint32 tail_idx = nelem & ~(sizeof(Vector8) - 1); + Vector8 chunk; + + for (i = 0; i < tail_idx; i += sizeof(Vector8)) + { + vector8_load(&chunk, &base[i]); + if (vector8_le(chunk, key)) + return true; + } + + /* Process the remaining elements one at a time. */ + for (; i < nelem; i++) + { + if (base[i] <= key) + return true; + } + + return false; +} + /* * pg_lfind32 * @@ -24,59 +86,50 @@ static inline bool pg_lfind32(uint32 key, uint32 *base, uint32 nelem) { + bool result = false; uint32 i = 0; +#ifdef USE_ASSERT_CHECKING + size_t nelem_for_asserts = nelem; +#endif - /* Use SIMD intrinsics where available. */ #ifdef USE_SSE2 - /* * A 16-byte register only has four 4-byte lanes. For better * instruction-level parallelism, each loop iteration operates on a block * of four registers. Testing has showed this is ~40% faster than using a * block of two registers. */ - const __m128i keys = _mm_set1_epi32(key); /* load 4 copies of key */ - uint32 iterations = nelem & ~0xF; /* round down to multiple of 16 */ + const Vector32 keys = vector32_broadcast(key); /* load 4 copies of key */ + uint32 tail_idx = nelem & ~0xF; /* round down to multiple of 16 */ -#if defined(USE_ASSERT_CHECKING) - bool assert_result = false; - - /* pre-compute the result for assert checking */ - for (i = 0; i < nelem; i++) + for (i = 0; i < tail_idx; i += 16) { - if (key == base[i]) - { - assert_result = true; - break; - } - } -#endif + Vector32 vals1, vals2, vals3, vals4, + result1, result2, result3, result4, + tmp1, tmp2, result; - for (i = 0; i < iterations; i += 16) - { /* load the next block into 4 registers holding 4 values each */ - const __m128i vals1 = _mm_loadu_si128((__m128i *) & base[i]); - const __m128i vals2 = _mm_loadu_si128((__m128i *) & base[i + 4]); - const __m128i vals3 = _mm_loadu_si128((__m128i *) & base[i + 8]); - const __m128i vals4 = _mm_loadu_si128((__m128i *) & base[i + 12]); + vector32_load(&vals1, &base[i]); + vector32_load(&vals2, &base[i + 4]); + vector32_load(&vals3, &base[i + 8]); + vector32_load(&vals4, &base[i + 12]); /* compare each value to the key */ - const __m128i result1 = _mm_cmpeq_epi32(keys, vals1); - const __m128i result2 = _mm_cmpeq_epi32(keys, vals2); - const __m128i result3 = _mm_cmpeq_epi32(keys, vals3); - const __m128i result4 = _mm_cmpeq_epi32(keys, vals4); + result1 = vector32_veq(keys, vals1); + result2 = vector32_veq(keys, vals2); + result3 = vector32_veq(keys, vals3); + result4 = vector32_veq(keys, vals4); /* combine the results into a single variable */ - const __m128i tmp1 = _mm_or_si128(result1, result2); - const __m128i tmp2 = _mm_or_si128(result3, result4); - const __m128i result = _mm_or_si128(tmp1, tmp2); + tmp1 = vector32_vor(result1, result2); + tmp2 = vector32_vor(result3, result4); + result = vector32_vor(tmp1, tmp2); /* see if there was a match */ if (_mm_movemask_epi8(result) != 0) { -#if defined(USE_ASSERT_CHECKING) - Assert(assert_result == true); -#endif + Assert(lfind(&key, base, &nelem_for_asserts, sizeof(uint32), + uint32_cmp_eq)); return true; } } @@ -87,17 +140,14 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem) { if (key == base[i]) { -#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING) - Assert(assert_result == true); -#endif - return true; + result = true; + break; } } -#if defined(USE_SSE2) && defined(USE_ASSERT_CHECKING) - Assert(assert_result == false); -#endif - return false; + Assert(result == (lfind(&key, base, &nelem_for_asserts, sizeof(uint32), + uint32_cmp_eq) != NULL)); + return result; } #endif /* PG_LFIND_H */ diff --git a/src/include/port/simd.h b/src/include/port/simd.h index a571e79f57..4dda87f3dd 100644 --- a/src/include/port/simd.h +++ b/src/include/port/simd.h @@ -13,6 +13,8 @@ #ifndef SIMD_H #define SIMD_H +#include "utils/elog.h" + /* * SSE2 instructions are part of the spec for the 64-bit x86 ISA. We assume * that compilers targeting this architecture understand SSE2 intrinsics. @@ -25,6 +27,231 @@ #if (defined(__x86_64__) || defined(_M_AMD64)) #include <emmintrin.h> #define USE_SSE2 +typedef __m128i Vector8; +typedef __m128i Vector32; + +/* + * If no SIMD instructions are available, we emulate specialized vector + * operations using uint64. + */ +#else +typedef uint64 Vector8; +typedef uint64 Vector32; +#endif + + +static inline void vector8_load(Vector8 *v, const uint8 *s); +static inline void vector32_load(Vector32 *v, const uint32 *s); +static inline Vector8 vector8_broadcast(const uint8 c); +static inline Vector32 vector32_broadcast(const uint32 c); +static inline bool vector8_has_zero(const Vector8 v); +static inline bool vector8_le(const Vector8 v, const uint8 c); +static inline bool vector8_eq(const Vector8 v, const uint8 c); +static inline Vector32 vector32_veq(const Vector32 v1, const Vector32 v2); +static inline Vector32 vector32_vor(const Vector32 v1, const Vector32 v2); + + +/* + * Stuff for assert-enabled builds. + */ +#ifdef USE_ASSERT_CHECKING + +#include <search.h> + +static size_t nelem_vector8 = sizeof(Vector8) / sizeof(uint8); + +static int +uint8_cmp_eq(const void *key, const void *elem) +{ + uint8 k = *((const uint8 *) key); + uint8 e = *((const uint8 *) elem); + + if (k < e) + return -1; + if (k > e) + return 1; + return 0; +} + +static int +uint32_cmp_eq(const void *key, const void *elem) +{ + uint32 k = *((const uint32 *) key); + uint32 e = *((const uint32 *) elem); + + if (k < e) + return -1; + if (k > e) + return 1; + return 0; +} + +static int +uint8_cmp_le(const void *key, const void *elem) +{ + uint8 k = *((const uint8 *) key); + uint8 e = *((const uint8 *) elem); + + /* + * This is counterintuitive. We want lfind() to report success if it finds + * an element <= the key, so we need to return 0 any time the key is >= the + * current element. + */ + if (k >= e) + return 0; + return -1; +} + +#endif /* USE_ASSERT_CHECKING */ + + +/* + * Functions for loading a chunk of memory into a vector. + */ + +static inline void +vector8_load(Vector8 *v, const uint8 *s) +{ +#ifdef USE_SSE2 + *v = _mm_loadu_si128((const __m128i *) s); +#else + memcpy(v, s, sizeof(Vector8)); +#endif +} + +static inline void +vector32_load(Vector32 *v, const uint32 *s) +{ +#ifdef USE_SSE2 + *v = _mm_loadu_si128((const __m128i *) s); +#else + elog(ERROR, "vector32() without SIMD not implemented"); + pg_unreachable(); +#endif +} + + +/* + * Functions for creating a vector with all elements set to the same value. + */ + +static inline Vector8 +vector8_broadcast(const uint8 c) +{ +#ifdef USE_SSE2 + return _mm_set1_epi8(c); +#else + return ~UINT64CONST(0) / 0xFF * c; +#endif +} + +static inline Vector32 +vector32_broadcast(const uint32 c) +{ +#ifdef USE_SSE2 + return _mm_set1_epi32(c); +#else + elog(ERROR, "vector32_broadcast() without SIMD not implemented"); + pg_unreachable(); +#endif +} + + +/* + * Functions for comparing vector elements to a given value. + */ + +static inline bool +vector8_has_zero(const Vector8 v) +{ +#ifdef USE_SSE2 + return _mm_movemask_epi8(_mm_cmpeq_epi8(v, _mm_setzero_si128())); +#else + return vector8_le(v, 0); +#endif +} + +static inline bool +vector8_eq(const Vector8 v, const uint8 c) +{ + bool result; + +#ifdef USE_SSE2 + result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c))); +#else + /* any bytes in v equal to c will evaluate to zero via XOR */ + result = vector8_has_zero(v ^ vector8_broadcast(c)); +#endif /* USE_SSE2 */ + + Assert(result == (lfind(&c, &v, &nelem_vector8, sizeof(uint8), + uint8_cmp_eq) != NULL)); + return result; +} + +static inline Vector32 +vector32_veq(const Vector32 v1, const Vector32 v2) +{ +#ifdef USE_SSE2 + return _mm_cmpeq_epi32(v1, v2); +#else + elog(ERROR, "vector32_veq() without SIMD not implemented"); + pg_unreachable(); +#endif +} + +static inline bool +vector8_le(const Vector8 v, const uint8 c) +{ + bool result = false; + +#ifdef USE_SSE2 + /* + * Use saturating subtraction to find bytes <= c, which will present as + * NUL bytes in 'sub'. + */ + __m128i sub = _mm_subs_epu8(v, vector8_broadcast(c)); + result = vector8_has_zero(sub); +#else + /* + * To find bytes <= c, we can use bitwise operations to find bytes < c + 1, + * but it only works if c + 1 <= 128 and if the highest bit in v is not set + * (from https://graphics.stanford.edu/~seander/bithacks.html). + */ + if ((int64) v >= 0 && c < 0x80) + result = (v - vector8_broadcast(c + 1)) & ~v & vector8_broadcast(0x80); + else + { + /* one byte at a time */ + for (int i = 0; i < sizeof(Vector8); i++) + { + if (((const uint8 *) &v)[i] <= c) + { + result = true; + break; + } + } + } +#endif + + Assert(result == (lfind(&c, &v, &nelem_vector8, sizeof(uint8), + uint8_cmp_le) != NULL)); + return result; +} + + +/* + * Functions for bitwise operations on vectors. + */ + +static inline Vector32 +vector32_vor(const Vector32 v1, const Vector32 v2) +{ +#ifdef USE_SSE2 + return _mm_or_si128(v1, v2); +#else + elog(ERROR, "vector32_vor() without SIMD not implemented"); + pg_unreachable(); #endif +} #endif /* SIMD_H */ diff --git a/src/test/modules/test_lfind/expected/test_lfind.out b/src/test/modules/test_lfind/expected/test_lfind.out index 222c8fd7ff..1d4b14e703 100644 --- a/src/test/modules/test_lfind/expected/test_lfind.out +++ b/src/test/modules/test_lfind/expected/test_lfind.out @@ -4,9 +4,21 @@ CREATE EXTENSION test_lfind; -- the operations complete without crashing or hanging and that none of their -- internal sanity tests fail. -- -SELECT test_lfind(); - test_lfind ------------- +SELECT test_lfind8(); + test_lfind8 +------------- + +(1 row) + +SELECT test_lfind8_le(); + test_lfind8_le +---------------- + +(1 row) + +SELECT test_lfind32(); + test_lfind32 +-------------- (1 row) diff --git a/src/test/modules/test_lfind/sql/test_lfind.sql b/src/test/modules/test_lfind/sql/test_lfind.sql index 899f1dd49b..766c640831 100644 --- a/src/test/modules/test_lfind/sql/test_lfind.sql +++ b/src/test/modules/test_lfind/sql/test_lfind.sql @@ -5,4 +5,6 @@ CREATE EXTENSION test_lfind; -- the operations complete without crashing or hanging and that none of their -- internal sanity tests fail. -- -SELECT test_lfind(); +SELECT test_lfind8(); +SELECT test_lfind8_le(); +SELECT test_lfind32(); diff --git a/src/test/modules/test_lfind/test_lfind--1.0.sql b/src/test/modules/test_lfind/test_lfind--1.0.sql index d82ab0567e..81801926ae 100644 --- a/src/test/modules/test_lfind/test_lfind--1.0.sql +++ b/src/test/modules/test_lfind/test_lfind--1.0.sql @@ -3,6 +3,14 @@ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION test_lfind" to load this file. \quit -CREATE FUNCTION test_lfind() +CREATE FUNCTION test_lfind32() + RETURNS pg_catalog.void + AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION test_lfind8() + RETURNS pg_catalog.void + AS 'MODULE_PATHNAME' LANGUAGE C; + +CREATE FUNCTION test_lfind8_le() RETURNS pg_catalog.void AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_lfind/test_lfind.c b/src/test/modules/test_lfind/test_lfind.c index a000746fb8..efe6b60bc5 100644 --- a/src/test/modules/test_lfind/test_lfind.c +++ b/src/test/modules/test_lfind/test_lfind.c @@ -18,10 +18,97 @@ PG_MODULE_MAGIC; -PG_FUNCTION_INFO_V1(test_lfind); +/* workhorse for test_lfind8 */ +static void +test_lfind8_internal(uint8 key) +{ + /* The byte searched for shouldn't be in the first vector-sized chunk, to make sure iteration works */ +#define LEN_NO_TAIL (2 * sizeof(Vector8)) +#define LEN_WITH_TAIL (LEN_NO_TAIL + 3) + + uint8 charbuf[LEN_WITH_TAIL]; + + memset(charbuf, 0xFF, LEN_WITH_TAIL); + /* search tail to test one-byte-at-a-time path */ + charbuf[LEN_WITH_TAIL - 1] = key; + if (key > 0x00 && pg_lfind8(key - 1, charbuf, LEN_WITH_TAIL)) + elog(ERROR, "pg_lfind8() found nonexistent element <= '0x%x'", key - 1); + if (key < 0xFF && !pg_lfind8(key, charbuf, LEN_WITH_TAIL)) + elog(ERROR, "pg_lfind8() did not find existing element <= '0x%x'", key); + if (key < 0xFE && pg_lfind8(key + 1, charbuf, LEN_WITH_TAIL)) + elog(ERROR, "pg_lfind8() found nonexistent element <= '0x%x'", key + 1); + + memset(charbuf, 0xFF, LEN_WITH_TAIL); + /* search with vector operations */ + charbuf[LEN_NO_TAIL - 1] = key; + if (key > 0x00 && pg_lfind8(key - 1, charbuf, LEN_NO_TAIL)) + elog(ERROR, "pg_lfind8() found nonexistent element <= '0x%x'", key - 1); + if (key < 0xFF && !pg_lfind8(key, charbuf, LEN_NO_TAIL)) + elog(ERROR, "pg_lfind8() did not find existing element <= '0x%x'", key); + if (key < 0xFE && pg_lfind8(key + 1, charbuf, LEN_NO_TAIL)) + elog(ERROR, "pg_lfind8() found nonexistent element <= '0x%x'", key + 1); +} + +PG_FUNCTION_INFO_V1(test_lfind8); +Datum +test_lfind8(PG_FUNCTION_ARGS) +{ + test_lfind8_internal(0); + test_lfind8_internal(1); + test_lfind8_internal(0x7F); + test_lfind8_internal(0x80); + test_lfind8_internal(0xFD); + + PG_RETURN_VOID(); +} + +/* workhorse for test_lfind8_le */ +static void +test_lfind8_le_internal(uint8 key) +{ + /* The byte searched for shouldn't be in the first vector-sized chunk, to make sure iteration works */ +#define LEN_NO_TAIL (2 * sizeof(Vector8)) +#define LEN_WITH_TAIL (LEN_NO_TAIL + 3) + + uint8 charbuf[LEN_WITH_TAIL]; + + memset(charbuf, 0xFF, LEN_WITH_TAIL); + /* search tail to test one-byte-at-a-time path */ + charbuf[LEN_WITH_TAIL - 1] = key; + if (key > 0x00 && pg_lfind8_le(key - 1, charbuf, LEN_WITH_TAIL)) + elog(ERROR, "pg_lfind8_le() found nonexistent element <= '0x%x'", key - 1); + if (key < 0xFF && !pg_lfind8_le(key, charbuf, LEN_WITH_TAIL)) + elog(ERROR, "pg_lfind8_le() did not find existing element <= '0x%x'", key); + if (key < 0xFE && !pg_lfind8_le(key + 1, charbuf, LEN_WITH_TAIL)) + elog(ERROR, "pg_lfind8_le() did not find existing element <= '0x%x'", key + 1); + + memset(charbuf, 0xFF, LEN_WITH_TAIL); + /* search with vector operations */ + charbuf[LEN_NO_TAIL - 1] = key; + if (key > 0x00 && pg_lfind8_le(key - 1, charbuf, LEN_NO_TAIL)) + elog(ERROR, "pg_lfind8_le() found nonexistent element <= '0x%x'", key - 1); + if (key < 0xFF && !pg_lfind8_le(key, charbuf, LEN_NO_TAIL)) + elog(ERROR, "pg_lfind8_le() did not find existing element <= '0x%x'", key); + if (key < 0xFE && !pg_lfind8_le(key + 1, charbuf, LEN_NO_TAIL)) + elog(ERROR, "pg_lfind8_le() did not find existing element <= '0x%x'", key + 1); +} + +PG_FUNCTION_INFO_V1(test_lfind8_le); +Datum +test_lfind8_le(PG_FUNCTION_ARGS) +{ + test_lfind8_le_internal(0); + test_lfind8_le_internal(1); + test_lfind8_le_internal(0x7F); + test_lfind8_le_internal(0x80); + test_lfind8_le_internal(0xFD); + + PG_RETURN_VOID(); +} +PG_FUNCTION_INFO_V1(test_lfind32); Datum -test_lfind(PG_FUNCTION_ARGS) +test_lfind32(PG_FUNCTION_ARGS) { #define TEST_ARRAY_SIZE 135 uint32 test_array[TEST_ARRAY_SIZE] = {0}; -- 2.25.1
>From e2fa356adfc41a13ea18da839b53353b774739cc Mon Sep 17 00:00:00 2001 From: Nathan Bossart <nathandboss...@gmail.com> Date: Sat, 20 Aug 2022 21:44:21 -0700 Subject: [PATCH 2/2] ARM SIMD support --- src/include/port/pg_lfind.h | 6 +++++- src/include/port/simd.h | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/include/port/pg_lfind.h b/src/include/port/pg_lfind.h index def858cbe1..04f09200b4 100644 --- a/src/include/port/pg_lfind.h +++ b/src/include/port/pg_lfind.h @@ -92,7 +92,7 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem) size_t nelem_for_asserts = nelem; #endif -#ifdef USE_SSE2 +#if defined(USE_SSE2) || defined(__ARM_NEON) /* * A 16-byte register only has four 4-byte lanes. For better * instruction-level parallelism, each loop iteration operates on a block @@ -126,7 +126,11 @@ pg_lfind32(uint32 key, uint32 *base, uint32 nelem) result = vector32_vor(tmp1, tmp2); /* see if there was a match */ +#ifdef USE_SSE2 if (_mm_movemask_epi8(result) != 0) +#elif defined(__ARM_NEON) + if (vmaxvq_u32(result) != 0) +#endif { Assert(lfind(&key, base, &nelem_for_asserts, sizeof(uint32), uint32_cmp_eq)); diff --git a/src/include/port/simd.h b/src/include/port/simd.h index 4dda87f3dd..4f40b31e2a 100644 --- a/src/include/port/simd.h +++ b/src/include/port/simd.h @@ -30,6 +30,15 @@ typedef __m128i Vector8; typedef __m128i Vector32; +/* + * Include arm_neon.h if the compiler is targeting an architecture that + * supports ARM Advanced SIMD (Neon) intrinsics. + */ +#elif defined(__ARM_NEON) +#include <arm_neon.h> +typedef uint8x16_t Vector8; +typedef uint32x4_t Vector32; + /* * If no SIMD instructions are available, we emulate specialized vector * operations using uint64. @@ -114,6 +123,8 @@ vector8_load(Vector8 *v, const uint8 *s) { #ifdef USE_SSE2 *v = _mm_loadu_si128((const __m128i *) s); +#elif defined(__ARM_NEON) + *v = vld1q_u8(s); #else memcpy(v, s, sizeof(Vector8)); #endif @@ -124,6 +135,8 @@ vector32_load(Vector32 *v, const uint32 *s) { #ifdef USE_SSE2 *v = _mm_loadu_si128((const __m128i *) s); +#elif defined(__ARM_NEON) + *v = vld1q_u32(s); #else elog(ERROR, "vector32() without SIMD not implemented"); pg_unreachable(); @@ -140,6 +153,8 @@ vector8_broadcast(const uint8 c) { #ifdef USE_SSE2 return _mm_set1_epi8(c); +#elif defined(__ARM_NEON) + return vdupq_n_u8(c); #else return ~UINT64CONST(0) / 0xFF * c; #endif @@ -150,6 +165,8 @@ vector32_broadcast(const uint32 c) { #ifdef USE_SSE2 return _mm_set1_epi32(c); +#elif defined(__ARM_NEON) + return vdupq_n_u32(c); #else elog(ERROR, "vector32_broadcast() without SIMD not implemented"); pg_unreachable(); @@ -166,6 +183,8 @@ vector8_has_zero(const Vector8 v) { #ifdef USE_SSE2 return _mm_movemask_epi8(_mm_cmpeq_epi8(v, _mm_setzero_si128())); +#elif defined(__ARM_NEON) + return vmaxvq_u8(vceqzq_u8(v)); #else return vector8_le(v, 0); #endif @@ -178,6 +197,8 @@ vector8_eq(const Vector8 v, const uint8 c) #ifdef USE_SSE2 result = _mm_movemask_epi8(_mm_cmpeq_epi8(v, vector8_broadcast(c))); +#elif defined(__ARM_NEON) + result = vmaxvq_u8(vceqq_u8(v, vector8_broadcast(c))); #else /* any bytes in v equal to c will evaluate to zero via XOR */ result = vector8_has_zero(v ^ vector8_broadcast(c)); @@ -193,6 +214,8 @@ vector32_veq(const Vector32 v1, const Vector32 v2) { #ifdef USE_SSE2 return _mm_cmpeq_epi32(v1, v2); +#elif defined(__ARM_NEON) + return vceqq_u32(v1, v2); #else elog(ERROR, "vector32_veq() without SIMD not implemented"); pg_unreachable(); @@ -211,6 +234,9 @@ vector8_le(const Vector8 v, const uint8 c) */ __m128i sub = _mm_subs_epu8(v, vector8_broadcast(c)); result = vector8_has_zero(sub); +#elif __ARM_NEON + uint8x16_t sub = vqsubq_u8(v, vector8_broadcast(c)); + result = vector8_has_zero(sub); #else /* * To find bytes <= c, we can use bitwise operations to find bytes < c + 1, @@ -248,6 +274,8 @@ vector32_vor(const Vector32 v1, const Vector32 v2) { #ifdef USE_SSE2 return _mm_or_si128(v1, v2); +#elif defined(__ARM_NEON) + return vorrq_u32(v1, v2); #else elog(ERROR, "vector32_vor() without SIMD not implemented"); pg_unreachable(); -- 2.25.1