From 0661774349f2a96c7aa4aaac328aca4118857f4a Mon Sep 17 00:00:00 2001
From: davecramer <davecramer@gmail.com>
Date: Tue, 30 Jan 2024 09:47:25 -0500
Subject: [PATCH 2/2] naive patch to fix locking for arm64

---
 src/include/storage/s_lock.h | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index aa06e49da2..61f365d0f5 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -256,8 +256,8 @@ spin_delay(void)
  * We use the int-width variant of the builtin because it works on more chips
  * than other widths.
  */
-#if defined(__arm__) || defined(__arm) || defined(__aarch64__)
-#ifdef HAVE_GCC__SYNC_INT32_TAS
+#if defined(__arm__) || defined(__arm) || defined(__aarch64__) || defined(_M_ARM64)
+//#ifdef HAVE_GCC__SYNC_INT32_TAS
 #define HAS_TEST_AND_SET
 
 #define TAS(lock) tas(lock)
@@ -277,7 +277,7 @@ tas(volatile slock_t *lock)
  * high-core-count ARM64 processors.  It seems mostly a wash for smaller gear,
  * and ISB doesn't exist at all on pre-v7 ARM chips.
  */
-#if defined(__aarch64__)
+#if defined(__aarch64__) || defined(_M_ARM64)
 
 #define SPIN_DELAY() spin_delay()
 
@@ -288,9 +288,9 @@ spin_delay(void)
 		" isb;				\n");
 }
 
-#endif	 /* __aarch64__ */
-#endif	 /* HAVE_GCC__SYNC_INT32_TAS */
-#endif	 /* __arm__ || __arm || __aarch64__ */
+#endif	 /* __aarch64__  _M_ARM64 */
+//#endif	 /* HAVE_GCC__SYNC_INT32_TAS */
+#endif	 /* __arm__ || __arm || __aarch64__ _M_ARM64 */
 
 
 /* S/390 and S/390x Linux (32- and 64-bit zSeries) */
@@ -710,6 +710,7 @@ typedef LONG slock_t;
 /* If using Visual C++ on Win64, inline assembly is unavailable.
  * Use a _mm_pause intrinsic instead of rep nop.
  */
+#if !defined(_M_ARM64)
 #if defined(_WIN64)
 static __forceinline void
 spin_delay(void)
@@ -724,6 +725,7 @@ spin_delay(void)
 	__asm rep nop;
 }
 #endif
+#endif
 
 #include <intrin.h>
 #pragma intrinsic(_ReadWriteBarrier)
-- 
2.43.0.windows.1

