From 02b4d45c48e12a4fcd622f6703a3d422b2186cc0 Mon Sep 17 00:00:00 2001
From: Thomas Munro <thomas.munro@gmail.com>
Date: Tue, 27 Aug 2024 08:58:48 +1200
Subject: XXX LLVM ARM relocation bug mitigation.

The patched code from https://github.com/llvm/llvm-project/pull/71968,
moved into a new class SafeSectionMemoryManager, adjusted to work on LLVM < 16,
and used in place of the regular memory manager.

XXX experimental
XXX this may be a terrible idea
XXX several details include #include directives would need adjustment
for prehistoric LLVM versions

Reported-by: Anthonin Bonnefoy <anthonin.bonnefoy@datadoghq.com>
Discussion: https://postgr.es/m/CAO6_Xqr63qj%3DSx7HY6ZiiQ6R_JbX%2B-p6sTPwDYwTWZjUmjsYBg%40mail.gmail.com
Signed-off-by: Anthonin Bonnefoy <anthonin.bonnefoy@datadoghq.com>
---
 src/backend/jit/llvm/Makefile                 |   3 +-
 .../jit/llvm/SafeSectionMemoryManager.cpp     | 386 ++++++++++++++++++
 src/backend/jit/llvm/llvmjit.c                |   6 +
 src/backend/jit/llvm/llvmjit_wrap.cpp         |  15 +
 src/backend/jit/llvm/meson.build              |   1 +
 src/include/jit/SafeSectionMemoryManager.h    | 225 ++++++++++
 src/include/jit/llvmjit.h                     |   2 +
 7 files changed, 637 insertions(+), 1 deletion(-)
 create mode 100644 src/backend/jit/llvm/SafeSectionMemoryManager.cpp
 create mode 100644 src/include/jit/SafeSectionMemoryManager.h

diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index bfe5c207a2b..d7aeaff4472 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -47,7 +47,8 @@ OBJS += \
 	llvmjit.o \
 	llvmjit_error.o \
 	llvmjit_inline.o \
-	llvmjit_wrap.o
+	llvmjit_wrap.o \
+	SafeSectionMemoryManager.o
 
 # Code generation
 OBJS += \
diff --git a/src/backend/jit/llvm/SafeSectionMemoryManager.cpp b/src/backend/jit/llvm/SafeSectionMemoryManager.cpp
new file mode 100644
index 00000000000..88d7f43f621
--- /dev/null
+++ b/src/backend/jit/llvm/SafeSectionMemoryManager.cpp
@@ -0,0 +1,386 @@
+/*
+ * This file taken from https://github.com/llvm/llvm-project/pull/71968, with
+ * the name changed to SafeSectionMemoryManager, so we can support the ARM
+ * memory model on broken LLVM versions.
+ */
+
+//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the section-based memory manager used by the MCJIT
+// execution engine and RuntimeDyld
+//
+//===----------------------------------------------------------------------===//
+
+#include "jit/SafeSectionMemoryManager.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+
+bool SafeSectionMemoryManager::hasSpace(const MemoryGroup &MemGroup,
+                                    uintptr_t Size) const {
+  for (const FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+    if (FreeMB.Free.allocatedSize() >= Size)
+      return true;
+  }
+  return false;
+}
+
+#if LLVM_VERSION_MAJOR < 16
+void SafeSectionMemoryManager::reserveAllocationSpace(uintptr_t CodeSize,
+													  uint32_t CodeAlign_i,
+													  uintptr_t RODataSize,
+													  uint32_t RODataAlign_i,
+													  uintptr_t RWDataSize,
+													  uint32_t RWDataAlign_i) {
+	Align CodeAlign(CodeAlign_i);
+	Align RODataAlign(RODataAlign_i);
+	Align RWDataAlign(RWDataAlign_i);
+#else
+void SafeSectionMemoryManager::reserveAllocationSpace(
+    uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize,
+    Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) {
+#endif
+  if (CodeSize == 0 && RODataSize == 0 && RWDataSize == 0)
+    return;
+
+  static const size_t PageSize = sys::Process::getPageSizeEstimate();
+
+  // Code alignment needs to be at least the stub alignment - however, we
+  // don't have an easy way to get that here so as a workaround, we assume
+  // it's 8, which is the largest value I observed across all platforms.
+  constexpr uint64_t StubAlign = 8;
+  CodeAlign = Align(std::max(CodeAlign.value(), StubAlign));
+  RODataAlign = Align(std::max(RODataAlign.value(), StubAlign));
+  RWDataAlign = Align(std::max(RWDataAlign.value(), StubAlign));
+
+  // Get space required for each section. Use the same calculation as
+  // allocateSection because we need to be able to satisfy it.
+  uint64_t RequiredCodeSize = alignTo(CodeSize, CodeAlign) + CodeAlign.value();
+  uint64_t RequiredRODataSize =
+      alignTo(RODataSize, RODataAlign) + RODataAlign.value();
+  uint64_t RequiredRWDataSize =
+      alignTo(RWDataSize, RWDataAlign) + RWDataAlign.value();
+
+  if (hasSpace(CodeMem, RequiredCodeSize) &&
+      hasSpace(RODataMem, RequiredRODataSize) &&
+      hasSpace(RWDataMem, RequiredRWDataSize)) {
+    // Sufficient space in contiguous block already available.
+    return;
+  }
+
+  // MemoryManager does not have functions for releasing memory after it's
+  // allocated. Normally it tries to use any excess blocks that were allocated
+  // due to page alignment, but if we have insufficient free memory for the
+  // request this can lead to allocating disparate memory that can violate the
+  // ARM ABI. Clear free memory so only the new allocations are used, but do
+  // not release allocated memory as it may still be in-use.
+  CodeMem.FreeMem.clear();
+  RODataMem.FreeMem.clear();
+  RWDataMem.FreeMem.clear();
+
+  // Round up to the nearest page size. Blocks must be page-aligned.
+  RequiredCodeSize = alignTo(RequiredCodeSize, PageSize);
+  RequiredRODataSize = alignTo(RequiredRODataSize, PageSize);
+  RequiredRWDataSize = alignTo(RequiredRWDataSize, PageSize);
+  uint64_t RequiredSize =
+      RequiredCodeSize + RequiredRODataSize + RequiredRWDataSize;
+
+  std::error_code ec;
+  sys::MemoryBlock MB = MMapper->allocateMappedMemory(
+      AllocationPurpose::RWData, RequiredSize, nullptr,
+      sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+  if (ec) {
+    return;
+  }
+  // CodeMem will arbitrarily own this MemoryBlock to handle cleanup.
+  CodeMem.AllocatedMem.push_back(MB);
+  uintptr_t Addr = (uintptr_t)MB.base();
+  FreeMemBlock FreeMB;
+  FreeMB.PendingPrefixIndex = (unsigned)-1;
+
+  if (CodeSize > 0) {
+    assert(isAddrAligned(CodeAlign, (void *)Addr));
+    FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredCodeSize);
+    CodeMem.FreeMem.push_back(FreeMB);
+    Addr += RequiredCodeSize;
+  }
+
+  if (RODataSize > 0) {
+    assert(isAddrAligned(RODataAlign, (void *)Addr));
+    FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRODataSize);
+    RODataMem.FreeMem.push_back(FreeMB);
+    Addr += RequiredRODataSize;
+  }
+
+  if (RWDataSize > 0) {
+    assert(isAddrAligned(RWDataAlign, (void *)Addr));
+    FreeMB.Free = sys::MemoryBlock((void *)Addr, RequiredRWDataSize);
+    RWDataMem.FreeMem.push_back(FreeMB);
+  }
+}
+
+uint8_t *SafeSectionMemoryManager::allocateDataSection(uintptr_t Size,
+                                                   unsigned Alignment,
+                                                   unsigned SectionID,
+                                                   StringRef SectionName,
+                                                   bool IsReadOnly) {
+  if (IsReadOnly)
+    return allocateSection(SafeSectionMemoryManager::AllocationPurpose::ROData,
+                           Size, Alignment);
+  return allocateSection(SafeSectionMemoryManager::AllocationPurpose::RWData, Size,
+                         Alignment);
+}
+
+uint8_t *SafeSectionMemoryManager::allocateCodeSection(uintptr_t Size,
+                                                   unsigned Alignment,
+                                                   unsigned SectionID,
+                                                   StringRef SectionName) {
+  return allocateSection(SafeSectionMemoryManager::AllocationPurpose::Code, Size,
+                         Alignment);
+}
+
+uint8_t *SafeSectionMemoryManager::allocateSection(
+    SafeSectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
+    unsigned Alignment) {
+  if (!Alignment)
+    Alignment = 16;
+
+  assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
+
+  uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
+  uintptr_t Addr = 0;
+
+  MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
+    switch (Purpose) {
+    case AllocationPurpose::Code:
+      return CodeMem;
+    case AllocationPurpose::ROData:
+      return RODataMem;
+    case AllocationPurpose::RWData:
+      return RWDataMem;
+    }
+    llvm_unreachable("Unknown SafeSectionMemoryManager::AllocationPurpose");
+  }();
+
+  // Look in the list of free memory regions and use a block there if one
+  // is available.
+  for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+    if (FreeMB.Free.allocatedSize() >= RequiredSize) {
+      Addr = (uintptr_t)FreeMB.Free.base();
+      uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
+      // Align the address.
+      Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+      if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
+        // The part of the block we're giving out to the user is now pending
+        MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+        // Remember this pending block, such that future allocations can just
+        // modify it rather than creating a new one
+        FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
+      } else {
+        sys::MemoryBlock &PendingMB =
+            MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
+        PendingMB = sys::MemoryBlock(PendingMB.base(),
+                                     Addr + Size - (uintptr_t)PendingMB.base());
+      }
+
+      // Remember how much free space is now left in this block
+      FreeMB.Free =
+          sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
+      return (uint8_t *)Addr;
+    }
+  }
+
+  // No pre-allocated free block was large enough. Allocate a new memory region.
+  // Note that all sections get allocated as read-write.  The permissions will
+  // be updated later based on memory group.
+  //
+  // FIXME: It would be useful to define a default allocation size (or add
+  // it as a constructor parameter) to minimize the number of allocations.
+  //
+  // FIXME: Initialize the Near member for each memory group to avoid
+  // interleaving.
+  std::error_code ec;
+  sys::MemoryBlock MB = MMapper->allocateMappedMemory(
+      Purpose, RequiredSize, &MemGroup.Near,
+      sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+  if (ec) {
+    // FIXME: Add error propagation to the interface.
+    return nullptr;
+  }
+
+  // Save this address as the basis for our next request
+  MemGroup.Near = MB;
+
+  // Copy the address to all the other groups, if they have not
+  // been initialized.
+  if (CodeMem.Near.base() == nullptr)
+    CodeMem.Near = MB;
+  if (RODataMem.Near.base() == nullptr)
+    RODataMem.Near = MB;
+  if (RWDataMem.Near.base() == nullptr)
+    RWDataMem.Near = MB;
+
+  // Remember that we allocated this memory
+  MemGroup.AllocatedMem.push_back(MB);
+  Addr = (uintptr_t)MB.base();
+  uintptr_t EndOfBlock = Addr + MB.allocatedSize();
+
+  // Align the address.
+  Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+  // The part of the block we're giving out to the user is now pending
+  MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+  // The allocateMappedMemory may allocate much more memory than we need. In
+  // this case, we store the unused memory as a free memory block.
+  unsigned FreeSize = EndOfBlock - Addr - Size;
+  if (FreeSize > 16) {
+    FreeMemBlock FreeMB;
+    FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
+    FreeMB.PendingPrefixIndex = (unsigned)-1;
+    MemGroup.FreeMem.push_back(FreeMB);
+  }
+
+  // Return aligned address
+  return (uint8_t *)Addr;
+}
+
+bool SafeSectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
+  // FIXME: Should in-progress permissions be reverted if an error occurs?
+  std::error_code ec;
+
+  // Make code memory executable.
+  ec = applyMemoryGroupPermissions(CodeMem,
+                                   sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+  if (ec) {
+    if (ErrMsg) {
+      *ErrMsg = ec.message();
+    }
+    return true;
+  }
+
+  // Make read-only data memory read-only.
+  ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
+  if (ec) {
+    if (ErrMsg) {
+      *ErrMsg = ec.message();
+    }
+    return true;
+  }
+
+  // Read-write data memory already has the correct permissions
+
+  // Some platforms with separate data cache and instruction cache require
+  // explicit cache flush, otherwise JIT code manipulations (like resolved
+  // relocations) will get to the data cache but not to the instruction cache.
+  invalidateInstructionCache();
+
+  return false;
+}
+
+static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
+  static const size_t PageSize = sys::Process::getPageSizeEstimate();
+
+  size_t StartOverlap =
+      (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
+
+  size_t TrimmedSize = M.allocatedSize();
+  TrimmedSize -= StartOverlap;
+  TrimmedSize -= TrimmedSize % PageSize;
+
+  sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
+                           TrimmedSize);
+
+  assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
+  assert((Trimmed.allocatedSize() % PageSize) == 0);
+  assert(M.base() <= Trimmed.base() &&
+         Trimmed.allocatedSize() <= M.allocatedSize());
+
+  return Trimmed;
+}
+
+std::error_code
+SafeSectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+                                                  unsigned Permissions) {
+  for (sys::MemoryBlock &MB : MemGroup.PendingMem)
+    if (std::error_code EC = MMapper->protectMappedMemory(MB, Permissions))
+      return EC;
+
+  MemGroup.PendingMem.clear();
+
+  // Now go through free blocks and trim any of them that don't span the entire
+  // page because one of the pending blocks may have overlapped it.
+  for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+    FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
+    // We cleared the PendingMem list, so all these pointers are now invalid
+    FreeMB.PendingPrefixIndex = (unsigned)-1;
+  }
+
+  // Remove all blocks which are now empty
+  erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
+    return FreeMB.Free.allocatedSize() == 0;
+  });
+
+  return std::error_code();
+}
+
+void SafeSectionMemoryManager::invalidateInstructionCache() {
+  for (sys::MemoryBlock &Block : CodeMem.PendingMem)
+    sys::Memory::InvalidateInstructionCache(Block.base(),
+                                            Block.allocatedSize());
+}
+
+SafeSectionMemoryManager::~SafeSectionMemoryManager() {
+  for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
+    for (sys::MemoryBlock &Block : Group->AllocatedMem)
+      MMapper->releaseMappedMemory(Block);
+  }
+}
+
+SafeSectionMemoryManager::MemoryMapper::~MemoryMapper() = default;
+
+void SafeSectionMemoryManager::anchor() {}
+
+namespace {
+// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
+// into sys::Memory.
+class DefaultMMapper final : public SafeSectionMemoryManager::MemoryMapper {
+public:
+  sys::MemoryBlock
+  allocateMappedMemory(SafeSectionMemoryManager::AllocationPurpose Purpose,
+                       size_t NumBytes, const sys::MemoryBlock *const NearBlock,
+                       unsigned Flags, std::error_code &EC) override {
+    return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
+  }
+
+  std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+                                      unsigned Flags) override {
+    return sys::Memory::protectMappedMemory(Block, Flags);
+  }
+
+  std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
+    return sys::Memory::releaseMappedMemory(M);
+  }
+};
+} // namespace
+
+SafeSectionMemoryManager::SafeSectionMemoryManager(MemoryMapper *UnownedMM,
+                                           bool ReserveAlloc)
+    : MMapper(UnownedMM), OwnedMMapper(nullptr),
+      ReserveAllocation(ReserveAlloc) {
+  if (!MMapper) {
+    OwnedMMapper = std::make_unique<DefaultMMapper>();
+    MMapper = OwnedMMapper.get();
+  }
+}
+
+} // namespace llvm
diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c
index 0f6cec52496..a4ea47bcfb6 100644
--- a/src/backend/jit/llvm/llvmjit.c
+++ b/src/backend/jit/llvm/llvmjit.c
@@ -1278,8 +1278,14 @@ llvm_log_jit_error(void *ctx, LLVMErrorRef error)
 static LLVMOrcObjectLayerRef
 llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple)
 {
+#if defined(__aarch64__)
+	LLVMOrcObjectLayerRef objlayer =
+		LLVMOrcCreateRTDyldObjectLinkingLayerWithSafeSectionMemoryManager(ES);
+#else
 	LLVMOrcObjectLayerRef objlayer =
 		LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
+#endif
+
 
 #if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER
 	if (jit_debugging_support)
diff --git a/src/backend/jit/llvm/llvmjit_wrap.cpp b/src/backend/jit/llvm/llvmjit_wrap.cpp
index 641c8841ca3..6c722c431a7 100644
--- a/src/backend/jit/llvm/llvmjit_wrap.cpp
+++ b/src/backend/jit/llvm/llvmjit_wrap.cpp
@@ -17,13 +17,18 @@ extern "C"
 }
 
 #include <llvm-c/Core.h>
+#include <llvm-c/OrcEE.h>
 
 /* Avoid macro clash with LLVM's C++ headers */
 #undef Min
 
+#include <llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h>
+#include <llvm/ExecutionEngine/SectionMemoryManager.h>
 #include <llvm/IR/Function.h>
+#include <llvm/Support/CBindingWrapping.h>
 
 #include "jit/llvmjit.h"
+#include "jit/SafeSectionMemoryManager.h"
 
 
 /*
@@ -41,3 +46,13 @@ LLVMGetFunctionType(LLVMValueRef r)
 {
 	return llvm::wrap(llvm::unwrap<llvm::Function>(r)->getFunctionType());
 }
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(llvm::orc::ExecutionSession, LLVMOrcExecutionSessionRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(llvm::orc::ObjectLayer, LLVMOrcObjectLayerRef);
+
+LLVMOrcObjectLayerRef
+LLVMOrcCreateRTDyldObjectLinkingLayerWithSafeSectionMemoryManager(LLVMOrcExecutionSessionRef ES)
+{
+	return wrap(new llvm::orc::RTDyldObjectLinkingLayer(
+		*unwrap(ES), [] { return std::make_unique<llvm::SafeSectionMemoryManager>(nullptr, true); }));
+}
diff --git a/src/backend/jit/llvm/meson.build b/src/backend/jit/llvm/meson.build
index 4a4232661ba..87e0feba32b 100644
--- a/src/backend/jit/llvm/meson.build
+++ b/src/backend/jit/llvm/meson.build
@@ -14,6 +14,7 @@ llvmjit_sources += files(
   'llvmjit_error.cpp',
   'llvmjit_inline.cpp',
   'llvmjit_wrap.cpp',
+  'SafeSectionMemoryManager.cpp',
 )
 
 # Code generation
diff --git a/src/include/jit/SafeSectionMemoryManager.h b/src/include/jit/SafeSectionMemoryManager.h
new file mode 100644
index 00000000000..985e85440aa
--- /dev/null
+++ b/src/include/jit/SafeSectionMemoryManager.h
@@ -0,0 +1,225 @@
+/*
+ * This file taken from https://github.com/llvm/llvm-project/pull/71968, with
+ * the name changed to SafeSectionMemoryManager, so we can support the ARM
+ * memory model on broken LLVM versions.
+ */
+
+//===- SectionMemoryManager.h - Memory manager for MCJIT/RtDyld -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of a section-based memory manager used by
+// the MCJIT execution engine and RuntimeDyld.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_SAFESECTIONMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_SAFESECTIONMEMORYMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/Memory.h"
+#include <cstdint>
+#include <string>
+#include <system_error>
+
+namespace llvm {
+
+/// This is a simple memory manager which implements the methods called by
+/// the RuntimeDyld class to allocate memory for section-based loading of
+/// objects, usually those generated by the MCJIT execution engine.
+///
+/// This memory manager allocates all section memory as read-write.  The
+/// RuntimeDyld will copy JITed section memory into these allocated blocks
+/// and perform any necessary linking and relocations.
+///
+/// Any client using this memory manager MUST ensure that section-specific
+/// page permissions have been applied before attempting to execute functions
+/// in the JITed object.  Permissions can be applied either by calling
+/// MCJIT::finalizeObject or by calling SectionMemoryManager::finalizeMemory
+/// directly.  Clients of MCJIT should call MCJIT::finalizeObject.
+class SafeSectionMemoryManager : public RTDyldMemoryManager {
+public:
+  /// This enum describes the various reasons to allocate pages from
+  /// allocateMappedMemory.
+  enum class AllocationPurpose {
+    Code,
+    ROData,
+    RWData,
+  };
+
+  /// Implementations of this interface are used by SectionMemoryManager to
+  /// request pages from the operating system.
+  class MemoryMapper {
+  public:
+    /// This method attempts to allocate \p NumBytes bytes of virtual memory for
+    /// \p Purpose.  \p NearBlock may point to an existing allocation, in which
+    /// case an attempt is made to allocate more memory near the existing block.
+    /// The actual allocated address is not guaranteed to be near the requested
+    /// address.  \p Flags is used to set the initial protection flags for the
+    /// block of the memory.  \p EC [out] returns an object describing any error
+    /// that occurs.
+    ///
+    /// This method may allocate more than the number of bytes requested.  The
+    /// actual number of bytes allocated is indicated in the returned
+    /// MemoryBlock.
+    ///
+    /// The start of the allocated block must be aligned with the system
+    /// allocation granularity (64K on Windows, page size on Linux).  If the
+    /// address following \p NearBlock is not so aligned, it will be rounded up
+    /// to the next allocation granularity boundary.
+    ///
+    /// \r a non-null MemoryBlock if the function was successful, otherwise a
+    /// null MemoryBlock with \p EC describing the error.
+    virtual sys::MemoryBlock
+    allocateMappedMemory(AllocationPurpose Purpose, size_t NumBytes,
+                         const sys::MemoryBlock *const NearBlock,
+                         unsigned Flags, std::error_code &EC) = 0;
+
+    /// This method sets the protection flags for a block of memory to the state
+    /// specified by \p Flags.  The behavior is not specified if the memory was
+    /// not allocated using the allocateMappedMemory method.
+    /// \p Block describes the memory block to be protected.
+    /// \p Flags specifies the new protection state to be assigned to the block.
+    ///
+    /// If \p Flags is MF_WRITE, the actual behavior varies with the operating
+    /// system (i.e. MF_READ | MF_WRITE on Windows) and the target architecture
+    /// (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+    ///
+    /// \r error_success if the function was successful, or an error_code
+    /// describing the failure if an error occurred.
+    virtual std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+                                                unsigned Flags) = 0;
+
+    /// This method releases a block of memory that was allocated with the
+    /// allocateMappedMemory method. It should not be used to release any memory
+    /// block allocated any other way.
+    /// \p Block describes the memory to be released.
+    ///
+    /// \r error_success if the function was successful, or an error_code
+    /// describing the failure if an error occurred.
+    virtual std::error_code releaseMappedMemory(sys::MemoryBlock &M) = 0;
+
+    virtual ~MemoryMapper();
+  };
+
+  /// Creates a SectionMemoryManager instance with \p MM as the associated
+  /// memory mapper.  If \p MM is nullptr then a default memory mapper is used
+  /// that directly calls into the operating system.
+  ///
+  /// If \p ReserveAlloc is true all memory will be pre-allocated, and any
+  /// attempts to allocate beyond pre-allocated memory will fail.
+  SafeSectionMemoryManager(MemoryMapper *MM = nullptr, bool ReserveAlloc = false);
+  SafeSectionMemoryManager(const SafeSectionMemoryManager &) = delete;
+  void operator=(const SafeSectionMemoryManager &) = delete;
+  ~SafeSectionMemoryManager() override;
+
+  /// Enable reserveAllocationSpace when requested.
+  bool needsToReserveAllocationSpace() override { return ReserveAllocation; }
+
+  /// Implements allocating all memory in a single block. This is required to
+  /// limit memory offsets to fit the ARM ABI; large memory systems may
+  /// otherwise allocate separate sections too far apart.
+#if LLVM_VERSION_MAJOR < 16
+  virtual void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign,
+                                      uintptr_t RODataSize,
+                                      uint32_t RODataAlign,
+                                      uintptr_t RWDataSize,
+                                      uint32_t RWDataAlign) override;
+#else
+  void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign,
+                              uintptr_t RODataSize, Align RODataAlign,
+                              uintptr_t RWDataSize, Align RWDataAlign) override;
+#endif
+
+  /// Allocates a memory block of (at least) the given size suitable for
+  /// executable code.
+  ///
+  /// The value of \p Alignment must be a power of two.  If \p Alignment is zero
+  /// a default alignment of 16 will be used.
+  uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+                               unsigned SectionID,
+                               StringRef SectionName) override;
+
+  /// Allocates a memory block of (at least) the given size suitable for
+  /// executable code.
+  ///
+  /// The value of \p Alignment must be a power of two.  If \p Alignment is zero
+  /// a default alignment of 16 will be used.
+  uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+                               unsigned SectionID, StringRef SectionName,
+                               bool isReadOnly) override;
+
+  /// Update section-specific memory permissions and other attributes.
+  ///
+  /// This method is called when object loading is complete and section page
+  /// permissions can be applied.  It is up to the memory manager implementation
+  /// to decide whether or not to act on this method.  The memory manager will
+  /// typically allocate all sections as read-write and then apply specific
+  /// permissions when this method is called.  Code sections cannot be executed
+  /// until this function has been called.  In addition, any cache coherency
+  /// operations needed to reliably use the memory are also performed.
+  ///
+  /// \returns true if an error occurred, false otherwise.
+  bool finalizeMemory(std::string *ErrMsg = nullptr) override;
+
+  /// Invalidate instruction cache for code sections.
+  ///
+  /// Some platforms with separate data cache and instruction cache require
+  /// explicit cache flush, otherwise JIT code manipulations (like resolved
+  /// relocations) will get to the data cache but not to the instruction cache.
+  ///
+  /// This method is called from finalizeMemory.
+  virtual void invalidateInstructionCache();
+
+private:
+  struct FreeMemBlock {
+    // The actual block of free memory
+    sys::MemoryBlock Free;
+    // If there is a pending allocation from the same reservation right before
+    // this block, store it's index in PendingMem, to be able to update the
+    // pending region if part of this block is allocated, rather than having to
+    // create a new one
+    unsigned PendingPrefixIndex;
+  };
+
+  struct MemoryGroup {
+    // PendingMem contains all blocks of memory (subblocks of AllocatedMem)
+    // which have not yet had their permissions applied, but have been given
+    // out to the user. FreeMem contains all block of memory, which have
+    // neither had their permissions applied, nor been given out to the user.
+    SmallVector<sys::MemoryBlock, 16> PendingMem;
+    SmallVector<FreeMemBlock, 16> FreeMem;
+
+    // All memory blocks that have been requested from the system
+    SmallVector<sys::MemoryBlock, 16> AllocatedMem;
+
+    sys::MemoryBlock Near;
+  };
+
+  uint8_t *allocateSection(AllocationPurpose Purpose, uintptr_t Size,
+                           unsigned Alignment);
+
+  std::error_code applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+                                              unsigned Permissions);
+
+  bool hasSpace(const MemoryGroup &MemGroup, uintptr_t Size) const;
+
+  void anchor() override;
+
+  MemoryGroup CodeMem;
+  MemoryGroup RWDataMem;
+  MemoryGroup RODataMem;
+  MemoryMapper *MMapper;
+  std::unique_ptr<MemoryMapper> OwnedMMapper;
+  bool ReserveAllocation;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_SAFESECTIONMEMORYMANAGER_H
diff --git a/src/include/jit/llvmjit.h b/src/include/jit/llvmjit.h
index 420775b1899..5c600e35604 100644
--- a/src/include/jit/llvmjit.h
+++ b/src/include/jit/llvmjit.h
@@ -18,6 +18,7 @@
 #ifdef USE_LLVM
 
 #include <llvm-c/Types.h>
+#include <llvm-c/OrcEE.h>
 
 
 /*
@@ -135,6 +136,7 @@ extern LLVMValueRef slot_compile_deform(struct LLVMJitContext *context, TupleDes
  */
 extern LLVMTypeRef LLVMGetFunctionReturnType(LLVMValueRef r);
 extern LLVMTypeRef LLVMGetFunctionType(LLVMValueRef r);
+extern LLVMOrcObjectLayerRef LLVMOrcCreateRTDyldObjectLinkingLayerWithSafeSectionMemoryManager(LLVMOrcExecutionSessionRef ES);
 
 #ifdef __cplusplus
 } /* extern "C" */
-- 
2.39.3 (Apple Git-146)

