Title: [257812] trunk/Source/_javascript_Core
Revision
257812
Author
sbar...@apple.com
Date
2020-03-03 16:18:30 -0800 (Tue, 03 Mar 2020)

Log Message

Refactor FixedVMPoolExecutableAllocator to not have member functions which are really just helper functions
https://bugs.webkit.org/show_bug.cgi?id=208537

Reviewed by Mark Lam.

There were a few member functions in FixedVMPoolExecutableAllocator that were
essentially helper functions. I've factored them out, and made FixedVMPoolExecutableAllocator
call them directly. This refactoring is needed when I implement the 1GB
executable pool on arm64 since the implementation of that will create split
implementations of something like FixedVMPoolExecutableAllocator.

* jit/ExecutableAllocator.cpp:
(JSC::jitWriteThunkGenerator):
(JSC::genericWriteToJITRegion):
(JSC::initializeSeparatedWXHeaps):
(JSC::initializeJITPageReservation):
(JSC::ExecutableAllocator::isValid const):
(JSC::ExecutableAllocator::underMemoryPressure):
(JSC::ExecutableAllocator::memoryPressureMultiplier):
(JSC::ExecutableAllocator::allocate):
(JSC::ExecutableAllocator::isValidExecutableMemory):
(JSC::ExecutableAllocator::getLock const):
(JSC::ExecutableAllocator::committedByteCount):
(JSC::ExecutableAllocator::dumpProfile):
(JSC::startOfFixedExecutableMemoryPoolImpl):
(JSC::endOfFixedExecutableMemoryPoolImpl):
(JSC::isJITPC):

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (257811 => 257812)


--- trunk/Source/_javascript_Core/ChangeLog	2020-03-04 00:06:15 UTC (rev 257811)
+++ trunk/Source/_javascript_Core/ChangeLog	2020-03-04 00:18:30 UTC (rev 257812)
@@ -1,3 +1,33 @@
+2020-03-03  Saam Barati  <sbar...@apple.com>
+
+        Refactor FixedVMPoolExecutableAllocator to not have member functions which are really just helper functions
+        https://bugs.webkit.org/show_bug.cgi?id=208537
+
+        Reviewed by Mark Lam.
+
+        There were a few member functions in FixedVMPoolExecutableAllocator that were
+        essentially helper functions. I've factored them out, and made FixedVMPoolExecutableAllocator
+        call them directly. This refactoring is needed when I implement the 1GB
+        executable pool on arm64 since the implementation of that will create split
+        implementations of something like FixedVMPoolExecutableAllocator.
+
+        * jit/ExecutableAllocator.cpp:
+        (JSC::jitWriteThunkGenerator):
+        (JSC::genericWriteToJITRegion):
+        (JSC::initializeSeparatedWXHeaps):
+        (JSC::initializeJITPageReservation):
+        (JSC::ExecutableAllocator::isValid const):
+        (JSC::ExecutableAllocator::underMemoryPressure):
+        (JSC::ExecutableAllocator::memoryPressureMultiplier):
+        (JSC::ExecutableAllocator::allocate):
+        (JSC::ExecutableAllocator::isValidExecutableMemory):
+        (JSC::ExecutableAllocator::getLock const):
+        (JSC::ExecutableAllocator::committedByteCount):
+        (JSC::ExecutableAllocator::dumpProfile):
+        (JSC::startOfFixedExecutableMemoryPoolImpl):
+        (JSC::endOfFixedExecutableMemoryPoolImpl):
+        (JSC::isJITPC):
+
 2020-03-03  Ross Kirsling  <ross.kirsl...@sony.com>
 
         Introduce JSRemoteInspectorServerStart API for socket-based RWI.

Modified: trunk/Source/_javascript_Core/jit/ExecutableAllocator.cpp (257811 => 257812)


--- trunk/Source/_javascript_Core/jit/ExecutableAllocator.cpp	2020-03-04 00:06:15 UTC (rev 257811)
+++ trunk/Source/_javascript_Core/jit/ExecutableAllocator.cpp	2020-03-04 00:18:30 UTC (rev 257812)
@@ -154,256 +154,251 @@
 #endif
 }
 
-class FixedVMPoolExecutableAllocator final : public MetaAllocator {
-    WTF_MAKE_FAST_ALLOCATED;
-public:
-    FixedVMPoolExecutableAllocator()
-        : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
-    {
-        if (!isJITEnabled())
-            return;
+#if OS(DARWIN) && HAVE(REMAP_JIT)
 
-        size_t reservationSize;
-        if (Options::jitMemoryReservationSize())
-            reservationSize = Options::jitMemoryReservationSize();
-        else
-            reservationSize = fixedExecutableMemoryPoolSize;
-        reservationSize = std::max(roundUpToMultipleOf(pageSize(), reservationSize), pageSize() * 2);
+#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+static ALWAYS_INLINE MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
+{
+    using namespace ARM64Registers;
+    using TrustedImm32 = MacroAssembler::TrustedImm32;
 
-        auto tryCreatePageReservation = [] (size_t reservationSize) {
-#if OS(LINUX)
-            // If we use uncommitted reservation, mmap operation is recorded with small page size in perf command's output.
-            // This makes the following JIT code logging broken and some of JIT code is not recorded correctly.
-            // To avoid this problem, we use committed reservation if we need perf JITDump logging.
-            if (Options::logJITCodeForPerf())
-                return PageReservation::reserveAndCommitWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-#endif
-            return PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-        };
+    MacroAssembler jit;
 
-        m_reservation = tryCreatePageReservation(reservationSize);
-        if (m_reservation) {
-            ASSERT(m_reservation.size() == reservationSize);
-            void* reservationBase = m_reservation.base();
+    jit.tagReturnAddress();
+    jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
+    jit.addPtr(x7, x0);
 
-#if ENABLE(FAST_JIT_PERMISSIONS) && !ENABLE(SEPARATED_WX_HEAP)
-            RELEASE_ASSERT(os_thread_self_restrict_rwx_is_supported());
-            os_thread_self_restrict_rwx_to_rx();
+    jit.move(x0, x3);
+    MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
 
-#else // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
-#if ENABLE(FAST_JIT_PERMISSIONS)
-            if (os_thread_self_restrict_rwx_is_supported()) {
-                g_jscConfig.useFastPermisionsJITCopy = true;
-                os_thread_self_restrict_rwx_to_rx();
-            } else
-#endif
-            if (Options::useSeparatedWXHeap()) {
-                // First page of our JIT allocation is reserved.
-                ASSERT(reservationSize >= pageSize() * 2);
-                reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
-                reservationSize -= pageSize();
-                initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
-            }
-#endif // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
+    jit.add64(TrustedImm32(32), x3);
+    jit.and64(TrustedImm32(-32), x3);
+    jit.loadPair64(x1, x12, x13);
+    jit.loadPair64(x1, TrustedImm32(16), x14, x15);
+    jit.sub64(x3, x0, x5);
+    jit.addPtr(x5, x1);
 
-            addFreshFreeSpace(reservationBase, reservationSize);
+    jit.loadPair64(x1, x8, x9);
+    jit.loadPair64(x1, TrustedImm32(16), x10, x11);
+    jit.add64(TrustedImm32(32), x1);
+    jit.sub64(x5, x2);
+    jit.storePair64(x12, x13, x0);
+    jit.storePair64(x14, x15, x0, TrustedImm32(16));
+    MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
 
-            ASSERT(bytesReserved() == reservationSize); // Since our executable memory is fixed-sized, bytesReserved is never changed after initialization.
+    MacroAssembler::Label copyLoop = jit.label();
+    jit.storePair64WithNonTemporalAccess(x8, x9, x3);
+    jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
+    jit.add64(TrustedImm32(32), x3);
+    jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
+    jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
+    jit.add64(TrustedImm32(32), x1);
+    jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
 
-            void* reservationEnd = reinterpret_cast<uint8_t*>(reservationBase) + reservationSize;
+    cleanup.link(&jit);
+    jit.add64(x2, x1);
+    jit.loadPair64(x1, x12, x13);
+    jit.loadPair64(x1, TrustedImm32(16), x14, x15);
+    jit.storePair64(x8, x9, x3);
+    jit.storePair64(x10, x11, x3, TrustedImm32(16));
+    jit.addPtr(x2, x3);
+    jit.storePair64(x12, x13, x3, TrustedImm32(32));
+    jit.storePair64(x14, x15, x3, TrustedImm32(48));
+    jit.ret();
 
-            g_jscConfig.startExecutableMemory = tagCodePtr<ExecutableMemoryPtrTag>(reservationBase);
-            g_jscConfig.endExecutableMemory = tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd);
-        }
-    }
+    MacroAssembler::Label local0 = jit.label();
+    jit.load64(x1, PostIndex(8), x6);
+    jit.store64(x6, x3, PostIndex(8));
+    smallCopy.link(&jit);
+    jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
+    MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
+    MacroAssembler::Label local1 = jit.label();
+    jit.load8(x1, PostIndex(1), x6);
+    jit.store8(x6, x3, PostIndex(1));
+    jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
+    local2.link(&jit);
+    jit.ret();
 
-    virtual ~FixedVMPoolExecutableAllocator();
+    auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase));
+    LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize);
+    // We don't use FINALIZE_CODE() for two reasons.
+    // The first is that we don't want the writeable address, as disassembled instructions,
+    // to appear in the console or anywhere in memory, via the PrintStream buffer.
+    // The second is we can't guarantee that the code is readable when using the
+    // asyncDisassembly option as our caller will set our pages execute only.
+    return linkBuffer.finalizeCodeWithoutDisassembly<JITThunkPtrTag>();
+}
+#else // not CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
+{
+    memcpy((void*)(g_jscConfig.startOfFixedWritableMemoryPool + offset), data, dataSize);
+}
 
-    void* memoryStart() { return untagCodePtr<ExecutableMemoryPtrTag>(g_jscConfig.startExecutableMemory); }
-    void* memoryEnd() { return untagCodePtr<ExecutableMemoryPtrTag>(g_jscConfig.endExecutableMemory); }
-    bool isJITPC(void* pc) { return memoryStart() <= pc && pc < memoryEnd(); }
-
-protected:
-    FreeSpacePtr allocateNewSpace(size_t&) override
-    {
-        // We're operating in a fixed pool, so new allocation is always prohibited.
-        return nullptr;
-    }
-
-    void notifyNeedPage(void* page, size_t count) override
-    {
-#if USE(MADV_FREE_FOR_JIT_MEMORY)
-        UNUSED_PARAM(page);
-        UNUSED_PARAM(count);
-#else
-        m_reservation.commit(page, pageSize() * count);
+static MacroAssemblerCodeRef<JITThunkPtrTag> ALWAYS_INLINE jitWriteThunkGenerator(void* address, void*, size_t)
+{
+    g_jscConfig.startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
+    void* function = reinterpret_cast<void*>(&genericWriteToJITRegion);
+#if CPU(ARM_THUMB2)
+    // Handle thumb offset
+    uintptr_t functionAsInt = reinterpret_cast<uintptr_t>(function);
+    functionAsInt -= 1;
+    function = reinterpret_cast<void*>(functionAsInt);
 #endif
-    }
+    auto codePtr = MacroAssemblerCodePtr<JITThunkPtrTag>(tagCFunctionPtr<JITThunkPtrTag>(function));
+    return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(codePtr);
+}
+#endif // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
 
-    void notifyPageIsFree(void* page, size_t count) override
-    {
-#if USE(MADV_FREE_FOR_JIT_MEMORY)
-        for (;;) {
-            int result = madvise(page, pageSize() * count, MADV_FREE);
-            if (!result)
-                return;
-            ASSERT(result == -1);
-            if (errno != EAGAIN) {
-                RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
-                break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
-            }
-        }
-#else
-        m_reservation.decommit(page, pageSize() * count);
-#endif
-    }
+static ALWAYS_INLINE void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
+{
+    mach_vm_address_t writableAddr = 0;
 
-private:
-#if OS(DARWIN) && HAVE(REMAP_JIT)
-    void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
-    {
-        mach_vm_address_t writableAddr = 0;
-
-        // Create a second mapping of the JIT region at a random address.
-        vm_prot_t cur, max;
-        int remapFlags = VM_FLAGS_ANYWHERE;
+    // Create a second mapping of the JIT region at a random address.
+    vm_prot_t cur, max;
+    int remapFlags = VM_FLAGS_ANYWHERE;
 #if defined(VM_FLAGS_RANDOM_ADDR)
-        remapFlags |= VM_FLAGS_RANDOM_ADDR;
+    remapFlags |= VM_FLAGS_RANDOM_ADDR;
 #endif
-        kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
-            remapFlags,
-            mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
-            &cur, &max, VM_INHERIT_DEFAULT);
+    kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
+        remapFlags,
+        mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
+        &cur, &max, VM_INHERIT_DEFAULT);
 
-        bool remapSucceeded = (ret == KERN_SUCCESS);
-        if (!remapSucceeded)
-            return;
+    bool remapSucceeded = (ret == KERN_SUCCESS);
+    if (!remapSucceeded)
+        return;
 
-        // Assemble a thunk that will serve as the means for writing into the JIT region.
-        MacroAssemblerCodeRef<JITThunkPtrTag> writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
+    // Assemble a thunk that will serve as the means for writing into the JIT region.
+    MacroAssemblerCodeRef<JITThunkPtrTag> writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
 
-        int result = 0;
+    int result = 0;
 
 #if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
-        // Prevent reading the write thunk code.
-        result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(stubBase), stubSize, true, VM_PROT_EXECUTE);
-        RELEASE_ASSERT(!result);
+    // Prevent reading the write thunk code.
+    result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(stubBase), stubSize, true, VM_PROT_EXECUTE);
+    RELEASE_ASSERT(!result);
 #endif
 
-        // Prevent writing into the executable JIT mapping.
-        result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(jitBase), jitSize, true, VM_PROT_READ | VM_PROT_EXECUTE);
-        RELEASE_ASSERT(!result);
+    // Prevent writing into the executable JIT mapping.
+    result = vm_protect(mach_task_self(), reinterpret_cast<vm_address_t>(jitBase), jitSize, true, VM_PROT_READ | VM_PROT_EXECUTE);
+    RELEASE_ASSERT(!result);
 
-        // Prevent execution in the writable JIT mapping.
-        result = vm_protect(mach_task_self(), static_cast<vm_address_t>(writableAddr), jitSize, true, VM_PROT_READ | VM_PROT_WRITE);
-        RELEASE_ASSERT(!result);
+    // Prevent execution in the writable JIT mapping.
+    result = vm_protect(mach_task_self(), static_cast<vm_address_t>(writableAddr), jitSize, true, VM_PROT_READ | VM_PROT_WRITE);
+    RELEASE_ASSERT(!result);
 
-        // Zero out writableAddr to avoid leaking the address of the writable mapping.
-        memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
+    // Zero out writableAddr to avoid leaking the address of the writable mapping.
+    memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
 
 #if ENABLE(SEPARATED_WX_HEAP)
-        g_jscConfig.jitWriteSeparateHeaps = reinterpret_cast<JITWriteSeparateHeapsFunction>(writeThunk.code().executableAddress());
+    g_jscConfig.jitWriteSeparateHeaps = reinterpret_cast<JITWriteSeparateHeapsFunction>(writeThunk.code().executableAddress());
 #endif
-    }
+}
 
-#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
-    MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
-    {
-        using namespace ARM64Registers;
-        using TrustedImm32 = MacroAssembler::TrustedImm32;
+#else // OS(DARWIN) && HAVE(REMAP_JIT)
+static ALWAYS_INLINE void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
+{
+}
+#endif
 
-        MacroAssembler jit;
+struct JITReservation {
+    PageReservation pageReservation;
+    void* base { nullptr };
+    size_t size { 0 };
+};
 
-        jit.tagReturnAddress();
-        jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
-        jit.addPtr(x7, x0);
+static ALWAYS_INLINE JITReservation initializeJITPageReservation()
+{
+    JITReservation reservation;
+    if (!isJITEnabled())
+        return reservation;
 
-        jit.move(x0, x3);
-        MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
+    if (Options::jitMemoryReservationSize())
+        reservation.size = Options::jitMemoryReservationSize();
+    else
+        reservation.size = fixedExecutableMemoryPoolSize;
+    reservation.size = std::max(roundUpToMultipleOf(pageSize(), reservation.size), pageSize() * 2);
 
-        jit.add64(TrustedImm32(32), x3);
-        jit.and64(TrustedImm32(-32), x3);
-        jit.loadPair64(x1, x12, x13);
-        jit.loadPair64(x1, TrustedImm32(16), x14, x15);
-        jit.sub64(x3, x0, x5);
-        jit.addPtr(x5, x1);
+    auto tryCreatePageReservation = [] (size_t reservationSize) {
+#if OS(LINUX)
+        // If we use uncommitted reservation, mmap operation is recorded with small page size in perf command's output.
+        // This makes the following JIT code logging broken and some of JIT code is not recorded correctly.
+        // To avoid this problem, we use committed reservation if we need perf JITDump logging.
+        if (Options::logJITCodeForPerf())
+            return PageReservation::reserveAndCommitWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+#endif
+        return PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+    };
 
-        jit.loadPair64(x1, x8, x9);
-        jit.loadPair64(x1, TrustedImm32(16), x10, x11);
-        jit.add64(TrustedImm32(32), x1);
-        jit.sub64(x5, x2);
-        jit.storePair64(x12, x13, x0);
-        jit.storePair64(x14, x15, x0, TrustedImm32(16));
-        MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
+    reservation.pageReservation = tryCreatePageReservation(reservation.size);
+    if (reservation.pageReservation) {
+        ASSERT(reservation.pageReservation.size() == reservation.size);
+        reservation.base = reservation.pageReservation.base();
 
-        MacroAssembler::Label copyLoop = jit.label();
-        jit.storePair64WithNonTemporalAccess(x8, x9, x3);
-        jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
-        jit.add64(TrustedImm32(32), x3);
-        jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
-        jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
-        jit.add64(TrustedImm32(32), x1);
-        jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
+#if ENABLE(FAST_JIT_PERMISSIONS) && !ENABLE(SEPARATED_WX_HEAP)
+        RELEASE_ASSERT(os_thread_self_restrict_rwx_is_supported());
+        os_thread_self_restrict_rwx_to_rx();
 
-        cleanup.link(&jit);
-        jit.add64(x2, x1);
-        jit.loadPair64(x1, x12, x13);
-        jit.loadPair64(x1, TrustedImm32(16), x14, x15);
-        jit.storePair64(x8, x9, x3);
-        jit.storePair64(x10, x11, x3, TrustedImm32(16));
-        jit.addPtr(x2, x3);
-        jit.storePair64(x12, x13, x3, TrustedImm32(32));
-        jit.storePair64(x14, x15, x3, TrustedImm32(48));
-        jit.ret();
+#else // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
+#if ENABLE(FAST_JIT_PERMISSIONS)
+        if (os_thread_self_restrict_rwx_is_supported()) {
+            g_jscConfig.useFastPermisionsJITCopy = true;
+            os_thread_self_restrict_rwx_to_rx();
+        } else
+#endif
+        if (Options::useSeparatedWXHeap()) {
+            // First page of our JIT allocation is reserved.
+            ASSERT(reservation.size >= pageSize() * 2);
+            reservation.base = (void*)((uintptr_t)(reservation.base) + pageSize());
+            reservation.size -= pageSize();
+            initializeSeparatedWXHeaps(reservation.pageReservation.base(), pageSize(), reservation.base, reservation.size);
+        }
+#endif // not ENABLE(FAST_JIT_PERMISSIONS) or ENABLE(SEPARATED_WX_HEAP)
+        void* reservationEnd = reinterpret_cast<uint8_t*>(reservation.base) + reservation.size;
 
-        MacroAssembler::Label local0 = jit.label();
-        jit.load64(x1, PostIndex(8), x6);
-        jit.store64(x6, x3, PostIndex(8));
-        smallCopy.link(&jit);
-        jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
-        MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
-        MacroAssembler::Label local1 = jit.label();
-        jit.load8(x1, PostIndex(1), x6);
-        jit.store8(x6, x3, PostIndex(1));
-        jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
-        local2.link(&jit);
-        jit.ret();
+        g_jscConfig.startExecutableMemory = tagCodePtr<ExecutableMemoryPtrTag>(reservation.base);
+        g_jscConfig.endExecutableMemory = tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd);
+    }
 
-        auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase));
-        LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize);
-        // We don't use FINALIZE_CODE() for two reasons.
-        // The first is that we don't want the writeable address, as disassembled instructions,
-        // to appear in the console or anywhere in memory, via the PrintStream buffer.
-        // The second is we can't guarantee that the code is readable when using the
-        // asyncDisassembly option as our caller will set our pages execute only.
-        return linkBuffer.finalizeCodeWithoutDisassembly<JITThunkPtrTag>();
+    return reservation;
+}
+
+class FixedVMPoolExecutableAllocator final : public MetaAllocator {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    FixedVMPoolExecutableAllocator()
+        : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
+    {
+        JITReservation reservation = initializeJITPageReservation();
+        m_reservation = WTFMove(reservation.pageReservation);
+        if (m_reservation) {
+            addFreshFreeSpace(reservation.base, reservation.size);
+            ASSERT(bytesReserved() == reservation.size); // Since our executable memory is fixed-sized, bytesReserved is never changed after initialization.
+        }
     }
-#else // not CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
-    static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
+
+    virtual ~FixedVMPoolExecutableAllocator();
+
+    void* memoryStart() { return untagCodePtr<ExecutableMemoryPtrTag>(g_jscConfig.startExecutableMemory); }
+    void* memoryEnd() { return untagCodePtr<ExecutableMemoryPtrTag>(g_jscConfig.endExecutableMemory); }
+    bool isJITPC(void* pc) { return memoryStart() <= pc && pc < memoryEnd(); }
+
+protected:
+    FreeSpacePtr allocateNewSpace(size_t&) override
     {
-        memcpy((void*)(g_jscConfig.startOfFixedWritableMemoryPool + offset), data, dataSize);
+        // We're operating in a fixed pool, so new allocation is always prohibited.
+        return nullptr;
     }
 
-    MacroAssemblerCodeRef<JITThunkPtrTag> jitWriteThunkGenerator(void* address, void*, size_t)
+    void notifyNeedPage(void* page, size_t count) override
     {
-        g_jscConfig.startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
-        void* function = reinterpret_cast<void*>(&genericWriteToJITRegion);
-#if CPU(ARM_THUMB2)
-        // Handle thumb offset
-        uintptr_t functionAsInt = reinterpret_cast<uintptr_t>(function);
-        functionAsInt -= 1;
-        function = reinterpret_cast<void*>(functionAsInt);
-#endif
-        auto codePtr = MacroAssemblerCodePtr<JITThunkPtrTag>(tagCFunctionPtr<JITThunkPtrTag>(function));
-        return MacroAssemblerCodeRef<JITThunkPtrTag>::createSelfManagedCodeRef(codePtr);
+        m_reservation.commit(page, pageSize() * count);
     }
-#endif // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
 
-#else // OS(DARWIN) && HAVE(REMAP_JIT)
-    void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
+    void notifyPageIsFree(void* page, size_t count) override
     {
+        m_reservation.decommit(page, pageSize() * count);
     }
-#endif
 
 private:
     PageReservation m_reservation;
@@ -427,7 +422,7 @@
 
 bool ExecutableAllocator::isValid() const
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::isValid();
     return !!allocator->bytesReserved();
@@ -435,7 +430,7 @@
 
 bool ExecutableAllocator::underMemoryPressure()
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::underMemoryPressure();
     return allocator->bytesAllocated() > allocator->bytesReserved() / 2;
@@ -443,7 +438,7 @@
 
 double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::memoryPressureMultiplier(addedMemoryUsage);
     ASSERT(allocator->bytesAllocated() <= allocator->bytesReserved());
@@ -463,7 +458,7 @@
 
 RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::allocate(sizeInBytes, ownerUID, effort);
     if (Options::logExecutableAllocation()) {
@@ -512,7 +507,7 @@
 
 bool ExecutableAllocator::isValidExecutableMemory(const AbstractLocker& locker, void* address)
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::isValidExecutableMemory(locker, address);
     return allocator->isInAllocatedMemory(locker, address);
@@ -520,7 +515,7 @@
 
 Lock& ExecutableAllocator::getLock() const
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::getLock();
     return allocator->getLock();
@@ -528,7 +523,7 @@
 
 size_t ExecutableAllocator::committedByteCount()
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return Base::committedByteCount();
     return allocator->bytesCommitted();
@@ -537,7 +532,7 @@
 #if ENABLE(META_ALLOCATOR_PROFILE)
 void ExecutableAllocator::dumpProfile()
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return;
     allocator->dumpProfile();
@@ -546,7 +541,7 @@
 
 void* startOfFixedExecutableMemoryPoolImpl()
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return nullptr;
     return allocator->memoryStart();
@@ -554,7 +549,7 @@
 
 void* endOfFixedExecutableMemoryPoolImpl()
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     if (!allocator)
         return nullptr;
     return allocator->memoryEnd();
@@ -562,7 +557,7 @@
 
 bool isJITPC(void* pc)
 {
-    auto* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
+    FixedVMPoolExecutableAllocator* allocator = g_jscConfig.fixedVMPoolExecutableAllocator;
     return allocator && allocator->isJITPC(pc);
 }
 
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to