Title: [227951] trunk/Source
Revision
227951
Author
sbar...@apple.com
Date
2018-01-31 21:36:40 -0800 (Wed, 31 Jan 2018)

Log Message

Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm
https://bugs.webkit.org/show_bug.cgi?id=182064
<rdar://problem/36840132>

Reviewed by Geoffrey Garen.

Source/bmalloc:

This patch replaces the tryLargeMemalignVirtual API with tryLargeZeroedMemalignVirtual.
By doing that, we're able to remove the AllocationKind enum. To zero the memory,
tryLargeZeroedMemalignVirtual uses mmap(... MAP_ANON ...) over previously mmapped
memory. This both purges the any resident memory for the virtual range and ensures
that the pages in the range are zeroed. Most OSs should implement this by taking a
page fault and zero filling on first access. Therefore, this API is returning pages
that will result in page faults on first access. Hence, the name 'virtual' in the API.
This API differs from the old API in that users of it need not call madvise themselves.
The memory is ready to go.

* bmalloc.xcodeproj/project.pbxproj:
* bmalloc/AllocationKind.h: Removed.
* bmalloc/DebugHeap.cpp:
(bmalloc::DebugHeap::memalignLarge):
(bmalloc::DebugHeap::freeLarge):
* bmalloc/DebugHeap.h:
* bmalloc/Heap.cpp:
(bmalloc::Heap::splitAndAllocate):
(bmalloc::Heap::tryAllocateLarge):
(bmalloc::Heap::allocateLarge):
(bmalloc::Heap::shrinkLarge):
(bmalloc::Heap::deallocateLarge):
* bmalloc/Heap.h:
* bmalloc/IsoPage.cpp:
(bmalloc::IsoPageBase::allocatePageMemory):
* bmalloc/VMAllocate.h:
(bmalloc::vmZeroAndPurge):
* bmalloc/VMHeap.cpp:
(bmalloc::VMHeap::tryAllocateLargeChunk):
* bmalloc/VMHeap.h:
* bmalloc/bmalloc.cpp:
(bmalloc::api::tryLargeZeroedMemalignVirtual):
(bmalloc::api::freeLargeVirtual):
(bmalloc::api::tryLargeMemalignVirtual): Deleted.
* bmalloc/bmalloc.h:

Source/_javascript_Core:

This patch switches WebAssembly Memory to always use bmalloc's
zeroed virtual allocation API. This makes it so that we don't
dirty the memory to zero it. It's a huge compile time speedup
on WasmBench on iOS.

* wasm/WasmMemory.cpp:
(JSC::Wasm::Memory::create):
(JSC::Wasm::Memory::~Memory):
(JSC::Wasm::Memory::addressIsInActiveFastMemory):
(JSC::Wasm::Memory::grow):
(JSC::Wasm::commitZeroPages): Deleted.

Source/WTF:

* wtf/Gigacage.cpp:
(Gigacage::tryAllocateZeroedVirtualPages):
(Gigacage::freeVirtualPages):
(Gigacage::tryAllocateVirtualPages): Deleted.
* wtf/Gigacage.h:
* wtf/OSAllocator.h:

Modified Paths

Removed Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (227950 => 227951)


--- trunk/Source/_javascript_Core/ChangeLog	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/_javascript_Core/ChangeLog	2018-02-01 05:36:40 UTC (rev 227951)
@@ -1,3 +1,23 @@
+2018-01-31  Saam Barati  <sbar...@apple.com>
+
+        Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm
+        https://bugs.webkit.org/show_bug.cgi?id=182064
+        <rdar://problem/36840132>
+
+        Reviewed by Geoffrey Garen.
+
+        This patch switches WebAssembly Memory to always use bmalloc's
+        zeroed virtual allocation API. This makes it so that we don't
+        dirty the memory to zero it. It's a huge compile time speedup
+        on WasmBench on iOS.
+
+        * wasm/WasmMemory.cpp:
+        (JSC::Wasm::Memory::create):
+        (JSC::Wasm::Memory::~Memory):
+        (JSC::Wasm::Memory::addressIsInActiveFastMemory):
+        (JSC::Wasm::Memory::grow):
+        (JSC::Wasm::commitZeroPages): Deleted.
+
 2018-01-31  Mark Lam  <mark....@apple.com>
 
         Build fix for CLoop after r227874.

Modified: trunk/Source/_javascript_Core/wasm/WasmMemory.cpp (227950 => 227951)


--- trunk/Source/_javascript_Core/wasm/WasmMemory.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/_javascript_Core/wasm/WasmMemory.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -95,26 +95,26 @@
 class MemoryManager {
 public:
     MemoryManager()
-        : m_maxCount(Options::maxNumWebAssemblyFastMemories())
+        : m_maxFastMemoryCount(Options::maxNumWebAssemblyFastMemories())
     {
     }
     
-    MemoryResult tryAllocateVirtualPages()
+    MemoryResult tryAllocateFastMemory()
     {
         MemoryResult result = [&] {
             auto holder = holdLock(m_lock);
-            if (m_memories.size() >= m_maxCount)
+            if (m_fastMemories.size() >= m_maxFastMemoryCount)
                 return MemoryResult(nullptr, MemoryResult::SyncTryToReclaimMemory);
             
-            void* result = Gigacage::tryAllocateVirtualPages(Gigacage::Primitive, Memory::fastMappedBytes());
+            void* result = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, Memory::fastMappedBytes());
             if (!result)
                 return MemoryResult(nullptr, MemoryResult::SyncTryToReclaimMemory);
             
-            m_memories.append(result);
+            m_fastMemories.append(result);
             
             return MemoryResult(
                 result,
-                m_memories.size() >= m_maxCount / 2 ? MemoryResult::SuccessAndNotifyMemoryPressure : MemoryResult::Success);
+                m_fastMemories.size() >= m_maxFastMemoryCount / 2 ? MemoryResult::SuccessAndNotifyMemoryPressure : MemoryResult::Success);
         }();
         
         if (Options::logWebAssemblyMemory())
@@ -123,12 +123,12 @@
         return result;
     }
     
-    void freeVirtualPages(void* basePtr)
+    void freeFastMemory(void* basePtr)
     {
         {
             auto holder = holdLock(m_lock);
             Gigacage::freeVirtualPages(Gigacage::Primitive, basePtr, Memory::fastMappedBytes());
-            m_memories.removeFirst(basePtr);
+            m_fastMemories.removeFirst(basePtr);
         }
         
         if (Options::logWebAssemblyMemory())
@@ -135,11 +135,11 @@
             dataLog("Freed virtual; state: ", *this, "\n");
     }
     
-    bool containsAddress(void* address)
+    bool isAddressInFastMemory(void* address)
     {
         // NOTE: This can be called from a signal handler, but only after we proved that we're in JIT code.
         auto holder = holdLock(m_lock);
-        for (void* memory : m_memories) {
+        for (void* memory : m_fastMemories) {
             char* start = static_cast<char*>(memory);
             if (start <= address && address <= start + Memory::fastMappedBytes())
                 return true;
@@ -188,13 +188,13 @@
     
     void dump(PrintStream& out) const
     {
-        out.print("virtual memories =  ", m_memories.size(), "/", m_maxCount, ", bytes = ", m_physicalBytes, "/", memoryLimit());
+        out.print("fast memories =  ", m_fastMemories.size(), "/", m_maxFastMemoryCount, ", bytes = ", m_physicalBytes, "/", memoryLimit());
     }
     
 private:
     Lock m_lock;
-    unsigned m_maxCount { 0 };
-    Vector<void*> m_memories;
+    unsigned m_maxFastMemoryCount { 0 };
+    Vector<void*> m_fastMemories;
     size_t m_physicalBytes { 0 };
 };
 
@@ -269,21 +269,6 @@
     dataLogLnIf(verbose, "Memory::Memory allocating ", *this);
 }
 
-static void commitZeroPages(void* startAddress, size_t sizeInBytes)
-{
-    bool writable = true;
-    bool executable = false;
-#if OS(LINUX)
-    // In Linux, MADV_DONTNEED clears backing pages with zero. Be Careful that MADV_DONTNEED shows different semantics in different OSes.
-    // For example, FreeBSD does not clear backing pages immediately.
-    while (madvise(startAddress, sizeInBytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
-    OSAllocator::commit(startAddress, sizeInBytes, writable, executable);
-#else
-    OSAllocator::commit(startAddress, sizeInBytes, writable, executable);
-    memset(startAddress, 0, sizeInBytes);
-#endif
-}
-
 RefPtr<Memory> Memory::create()
 {
     return adoptRef(new Memory());
@@ -314,7 +299,7 @@
     if (Options::useWebAssemblyFastMemory()) {
         tryAllocate(
             [&] () -> MemoryResult::Kind {
-                auto result = memoryManager().tryAllocateVirtualPages();
+                auto result = memoryManager().tryAllocateFastMemory();
                 fastMemory = bitwise_cast<char*>(result.basePtr);
                 return result.kind;
             }, notifyMemoryPressure, syncTryToReclaimMemory);
@@ -327,8 +312,6 @@
             RELEASE_ASSERT_NOT_REACHED();
         }
 
-        commitZeroPages(fastMemory, initialBytes);
-
         return adoptRef(new Memory(fastMemory, initial, maximum, Memory::fastMappedBytes(), MemoryMode::Signaling, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback)));
     }
     
@@ -338,12 +321,11 @@
     if (!initialBytes)
         return adoptRef(new Memory(initial, maximum, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback)));
     
-    void* slowMemory = Gigacage::tryAlignedMalloc(Gigacage::Primitive, WTF::pageSize(), initialBytes);
+    void* slowMemory = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, initialBytes);
     if (!slowMemory) {
         memoryManager().freePhysicalBytes(initialBytes);
         return nullptr;
     }
-    memset(slowMemory, 0, initialBytes);
     return adoptRef(new Memory(slowMemory, initial, maximum, initialBytes, MemoryMode::BoundsChecking, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback)));
 }
 
@@ -357,10 +339,10 @@
                 dataLog("mprotect failed: ", strerror(errno), "\n");
                 RELEASE_ASSERT_NOT_REACHED();
             }
-            memoryManager().freeVirtualPages(m_memory);
+            memoryManager().freeFastMemory(m_memory);
             break;
         case MemoryMode::BoundsChecking:
-            Gigacage::alignedFree(Gigacage::Primitive, m_memory);
+            Gigacage::freeVirtualPages(Gigacage::Primitive, m_memory, m_size);
             break;
         }
     }
@@ -379,7 +361,7 @@
 
 bool Memory::addressIsInActiveFastMemory(void* address)
 {
-    return memoryManager().containsAddress(address);
+    return memoryManager().isAddressInFastMemory(address);
 }
 
 Expected<PageCount, Memory::GrowFailReason> Memory::grow(PageCount delta)
@@ -422,14 +404,13 @@
     case MemoryMode::BoundsChecking: {
         RELEASE_ASSERT(maximum().bytes() != 0);
 
-        void* newMemory = Gigacage::tryAlignedMalloc(Gigacage::Primitive, WTF::pageSize(), desiredSize);
+        void* newMemory = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, desiredSize);
         if (!newMemory)
             return makeUnexpected(GrowFailReason::OutOfMemory);
 
         memcpy(newMemory, m_memory, m_size);
-        memset(static_cast<char*>(newMemory) + m_size, 0, desiredSize - m_size);
         if (m_memory)
-            Gigacage::alignedFree(Gigacage::Primitive, m_memory);
+            Gigacage::freeVirtualPages(Gigacage::Primitive, m_memory, m_size);
         m_memory = newMemory;
         m_mappedCapacity = desiredSize;
         m_size = desiredSize;
@@ -446,7 +427,6 @@
             dataLog("mprotect failed: ", strerror(errno), "\n");
             RELEASE_ASSERT_NOT_REACHED();
         }
-        commitZeroPages(startAddress, extraBytes);
         m_size = desiredSize;
         m_indexingMask = WTF::computeIndexingMask(desiredSize);
         return success();

Modified: trunk/Source/WTF/ChangeLog (227950 => 227951)


--- trunk/Source/WTF/ChangeLog	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/WTF/ChangeLog	2018-02-01 05:36:40 UTC (rev 227951)
@@ -1,3 +1,18 @@
+2018-01-31  Saam Barati  <sbar...@apple.com>
+
+        Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm
+        https://bugs.webkit.org/show_bug.cgi?id=182064
+        <rdar://problem/36840132>
+
+        Reviewed by Geoffrey Garen.
+
+        * wtf/Gigacage.cpp:
+        (Gigacage::tryAllocateZeroedVirtualPages):
+        (Gigacage::freeVirtualPages):
+        (Gigacage::tryAllocateVirtualPages): Deleted.
+        * wtf/Gigacage.h:
+        * wtf/OSAllocator.h:
+
 2018-01-31  Mark Lam  <mark....@apple.com>
 
         Fix some ARM64_32 build failures.

Modified: trunk/Source/WTF/wtf/Gigacage.cpp (227950 => 227951)


--- trunk/Source/WTF/wtf/Gigacage.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/WTF/wtf/Gigacage.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -41,14 +41,22 @@
     return FastMalloc::tryMalloc(size);
 }
 
-void* tryAllocateVirtualPages(Kind, size_t size)
+void* tryAllocateZeroedVirtualPages(Kind, size_t size)
 {
-    return OSAllocator::reserveUncommitted(size);
+    size = roundUpToMultipleOf(WTF::pageSize(), size);
+    void* result = OSAllocator::reserveAndCommit(size);
+#if !ASSERT_DISABLED
+    if (result) {
+        for (size_t i = 0; i < size / sizeof(uintptr_t); ++i)
+            ASSERT(static_cast<uintptr_t*>(result)[i] == 0);
+    }
+#endif
+    return result;
 }
 
 void freeVirtualPages(Kind, void* basePtr, size_t size)
 {
-    OSAllocator::releaseDecommitted(basePtr, size);
+    OSAllocator::decommitAndRelease(basePtr, size);
 }
 
 } // namespace Gigacage
@@ -93,9 +101,9 @@
     WTF::compilerFence();
 }
 
-void* tryAllocateVirtualPages(Kind kind, size_t size)
+void* tryAllocateZeroedVirtualPages(Kind kind, size_t size)
 {
-    void* result = bmalloc::api::tryLargeMemalignVirtual(WTF::pageSize(), size, bmalloc::heapKind(kind));
+    void* result = bmalloc::api::tryLargeZeroedMemalignVirtual(WTF::pageSize(), size, bmalloc::heapKind(kind));
     WTF::compilerFence();
     return result;
 }

Modified: trunk/Source/WTF/wtf/Gigacage.h (227950 => 227951)


--- trunk/Source/WTF/wtf/Gigacage.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/WTF/wtf/Gigacage.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -119,7 +119,7 @@
 WTF_EXPORT_PRIVATE void* tryMalloc(Kind, size_t size);
 inline void free(Kind, void* p) { fastFree(p); }
 
-WTF_EXPORT_PRIVATE void* tryAllocateVirtualPages(Kind, size_t size);
+WTF_EXPORT_PRIVATE void* tryAllocateZeroedVirtualPages(Kind, size_t size);
 WTF_EXPORT_PRIVATE void freeVirtualPages(Kind, void* basePtr, size_t size);
 
 } // namespace Gigacage
@@ -133,7 +133,7 @@
 WTF_EXPORT_PRIVATE void* tryMalloc(Kind, size_t);
 WTF_EXPORT_PRIVATE void free(Kind, void*);
 
-WTF_EXPORT_PRIVATE void* tryAllocateVirtualPages(Kind, size_t size);
+WTF_EXPORT_PRIVATE void* tryAllocateZeroedVirtualPages(Kind, size_t size);
 WTF_EXPORT_PRIVATE void freeVirtualPages(Kind, void* basePtr, size_t size);
 
 } // namespace Gigacage

Modified: trunk/Source/WTF/wtf/OSAllocator.h (227950 => 227951)


--- trunk/Source/WTF/wtf/OSAllocator.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/WTF/wtf/OSAllocator.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -42,7 +42,8 @@
 
     // These methods are symmetric; reserveUncommitted allocates VM in an uncommitted state,
     // releaseDecommitted should be called on a region of VM allocated by a single reservation,
-    // the memory must all currently be in a decommitted state.
+    // the memory must all currently be in a decommitted state. reserveUncommitted returns to
+    // you memory that is zeroed.
     WTF_EXPORT_PRIVATE static void* reserveUncommitted(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false, bool includesGuardPages = false);
     WTF_EXPORT_PRIVATE static void releaseDecommitted(void*, size_t);
 

Modified: trunk/Source/bmalloc/ChangeLog (227950 => 227951)


--- trunk/Source/bmalloc/ChangeLog	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/ChangeLog	2018-02-01 05:36:40 UTC (rev 227951)
@@ -1,3 +1,47 @@
+2018-01-31  Saam Barati  <sbar...@apple.com>
+
+        Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm
+        https://bugs.webkit.org/show_bug.cgi?id=182064
+        <rdar://problem/36840132>
+
+        Reviewed by Geoffrey Garen.
+
+        This patch replaces the tryLargeMemalignVirtual API with tryLargeZeroedMemalignVirtual.
+        By doing that, we're able to remove the AllocationKind enum. To zero the memory,
+        tryLargeZeroedMemalignVirtual uses mmap(... MAP_ANON ...) over previously mmapped
+        memory. This both purges the any resident memory for the virtual range and ensures
+        that the pages in the range are zeroed. Most OSs should implement this by taking a
+        page fault and zero filling on first access. Therefore, this API is returning pages
+        that will result in page faults on first access. Hence, the name 'virtual' in the API.
+        This API differs from the old API in that users of it need not call madvise themselves.
+        The memory is ready to go.
+
+        * bmalloc.xcodeproj/project.pbxproj:
+        * bmalloc/AllocationKind.h: Removed.
+        * bmalloc/DebugHeap.cpp:
+        (bmalloc::DebugHeap::memalignLarge):
+        (bmalloc::DebugHeap::freeLarge):
+        * bmalloc/DebugHeap.h:
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::splitAndAllocate):
+        (bmalloc::Heap::tryAllocateLarge):
+        (bmalloc::Heap::allocateLarge):
+        (bmalloc::Heap::shrinkLarge):
+        (bmalloc::Heap::deallocateLarge):
+        * bmalloc/Heap.h:
+        * bmalloc/IsoPage.cpp:
+        (bmalloc::IsoPageBase::allocatePageMemory):
+        * bmalloc/VMAllocate.h:
+        (bmalloc::vmZeroAndPurge):
+        * bmalloc/VMHeap.cpp:
+        (bmalloc::VMHeap::tryAllocateLargeChunk):
+        * bmalloc/VMHeap.h:
+        * bmalloc/bmalloc.cpp:
+        (bmalloc::api::tryLargeZeroedMemalignVirtual):
+        (bmalloc::api::freeLargeVirtual):
+        (bmalloc::api::tryLargeMemalignVirtual): Deleted.
+        * bmalloc/bmalloc.h:
+
 2018-01-19  Keith Miller  <keith_mil...@apple.com>
 
         HaveInternalSDK includes should be "#include?"

Deleted: trunk/Source/bmalloc/bmalloc/AllocationKind.h (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/AllocationKind.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/AllocationKind.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2017 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
- */
-
-#pragma once
-
-namespace bmalloc {
-
-enum class AllocationKind {
-    Physical,
-    Virtual
-};
-
-} // namespace bmalloc
-

Modified: trunk/Source/bmalloc/bmalloc/DebugHeap.cpp (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/DebugHeap.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/DebugHeap.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -115,7 +115,7 @@
 // FIXME: This looks an awful lot like the code in wtf/Gigacage.cpp for large allocation.
 // https://bugs.webkit.org/show_bug.cgi?id=175086
 
-void* DebugHeap::memalignLarge(size_t alignment, size_t size, AllocationKind allocationKind)
+void* DebugHeap::memalignLarge(size_t alignment, size_t size)
 {
     alignment = roundUpToMultipleOf(m_pageSize, alignment);
     size = roundUpToMultipleOf(m_pageSize, size);
@@ -122,8 +122,6 @@
     void* result = tryVMAllocate(alignment, size);
     if (!result)
         return nullptr;
-    if (allocationKind == AllocationKind::Virtual)
-        vmDeallocatePhysicalPages(result, size);
     {
         std::lock_guard<std::mutex> locker(m_lock);
         m_sizeMap[result] = size;
@@ -131,7 +129,7 @@
     return result;
 }
 
-void DebugHeap::freeLarge(void* base, AllocationKind)
+void DebugHeap::freeLarge(void* base)
 {
     if (!base)
         return;

Modified: trunk/Source/bmalloc/bmalloc/DebugHeap.h (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/DebugHeap.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/DebugHeap.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -25,7 +25,6 @@
 
 #pragma once
 
-#include "AllocationKind.h"
 #include "StaticMutex.h"
 #include <mutex>
 #include <unordered_map>
@@ -45,8 +44,8 @@
     void* realloc(void*, size_t);
     void free(void*);
     
-    void* memalignLarge(size_t alignment, size_t, AllocationKind);
-    void freeLarge(void* base, AllocationKind);
+    void* memalignLarge(size_t alignment, size_t);
+    void freeLarge(void* base);
 
 private:
 #if BOS(DARWIN)

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -420,7 +420,7 @@
     }
 }
 
-LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size, AllocationKind allocationKind)
+LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
@@ -441,20 +441,10 @@
         next = pair.second;
     }
     
-    switch (allocationKind) {
-    case AllocationKind::Virtual:
-        if (range.physicalSize())
-            vmDeallocatePhysicalPagesSloppy(range.begin(), range.size());
-        break;
-        
-    case AllocationKind::Physical:
-        if (range.physicalSize() < range.size()) {
-            m_scavenger->scheduleIfUnderMemoryPressure(range.size());
-            
-            vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
-            range.setPhysicalSize(range.size());
-        }
-        break;
+    if (range.physicalSize() < range.size()) {
+        m_scavenger->scheduleIfUnderMemoryPressure(range.size());
+        vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize());
+        range.setPhysicalSize(range.size());
     }
     
     if (prev)
@@ -469,7 +459,7 @@
     return range;
 }
 
-void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, AllocationKind allocationKind)
+void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size)
 {
     RELEASE_BASSERT(isActiveHeapKind(m_kind));
 
@@ -476,7 +466,7 @@
     BASSERT(isPowerOfTwo(alignment));
     
     if (m_debugHeap)
-        return m_debugHeap->memalignLarge(alignment, size, allocationKind);
+        return m_debugHeap->memalignLarge(alignment, size);
     
     m_scavenger->didStartGrowing();
     
@@ -495,7 +485,7 @@
         if (usingGigacage())
             return nullptr;
 
-        range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size, allocationKind);
+        range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size);
         if (!range)
             return nullptr;
         
@@ -504,12 +494,12 @@
         range = m_largeFree.remove(alignment, size);
     }
 
-    return splitAndAllocate(range, alignment, size, allocationKind).begin();
+    return splitAndAllocate(range, alignment, size).begin();
 }
 
-void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, AllocationKind allocationKind)
+void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size)
 {
-    void* result = tryAllocateLarge(lock, alignment, size, allocationKind);
+    void* result = tryAllocateLarge(lock, alignment, size);
     RELEASE_BASSERT(result);
     return result;
 }
@@ -530,18 +520,18 @@
 
     size_t size = m_largeAllocated.remove(object.begin());
     LargeRange range = LargeRange(object, size);
-    splitAndAllocate(range, alignment, newSize, AllocationKind::Physical);
+    splitAndAllocate(range, alignment, newSize);
 
     m_scavenger->schedule(size);
 }
 
-void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object, AllocationKind allocationKind)
+void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object)
 {
     if (m_debugHeap)
-        return m_debugHeap->freeLarge(object, allocationKind);
+        return m_debugHeap->freeLarge(object);
 
     size_t size = m_largeAllocated.remove(object);
-    m_largeFree.add(LargeRange(object, size, allocationKind == AllocationKind::Physical ? size : 0));
+    m_largeFree.add(LargeRange(object, size, size));
     m_scavenger->schedule(size);
 }
 

Modified: trunk/Source/bmalloc/bmalloc/Heap.h (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/Heap.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/Heap.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -26,7 +26,6 @@
 #ifndef Heap_h
 #define Heap_h
 
-#include "AllocationKind.h"
 #include "BumpRange.h"
 #include "Chunk.h"
 #include "HeapKind.h"
@@ -67,9 +66,9 @@
     void derefSmallLine(std::lock_guard<StaticMutex>&, Object, LineCache&);
     void deallocateLineCache(std::lock_guard<StaticMutex>&, LineCache&);
 
-    void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t, AllocationKind = AllocationKind::Physical);
-    void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t, AllocationKind = AllocationKind::Physical);
-    void deallocateLarge(std::lock_guard<StaticMutex>&, void*, AllocationKind = AllocationKind::Physical);
+    void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
+    void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t);
+    void deallocateLarge(std::lock_guard<StaticMutex>&, void*);
 
     bool isLarge(std::lock_guard<StaticMutex>&, void*);
     size_t largeSize(std::lock_guard<StaticMutex>&, void*);
@@ -110,7 +109,7 @@
     void mergeLargeLeft(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
     void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap);
 
-    LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t, AllocationKind);
+    LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t);
 
     HeapKind m_kind;
     

Modified: trunk/Source/bmalloc/bmalloc/IsoPage.cpp (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/IsoPage.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/IsoPage.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -32,7 +32,7 @@
 
 void* IsoPageBase::allocatePageMemory()
 {
-    return PerProcess<VMHeap>::get()->tryAllocateLargeChunk(pageSize, pageSize, AllocationKind::Physical).begin();
+    return PerProcess<VMHeap>::get()->tryAllocateLargeChunk(pageSize, pageSize).begin();
 }
 
 } // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/VMAllocate.h (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/VMAllocate.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/VMAllocate.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -146,6 +146,15 @@
     mprotect(p, vmSize, PROT_NONE);
 }
 
+inline void vmZeroAndPurge(void* p, size_t vmSize)
+{
+    vmValidate(p, vmSize);
+    // MAP_ANON guarantees the memory is zeroed. This will also cause
+    // page faults on accesses to this range following this call.
+    void* result = mmap(p, vmSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | BMALLOC_NORESERVE, BMALLOC_VM_TAG, 0);
+    RELEASE_BASSERT(result == p);
+}
+
 // Allocates vmSize bytes at a specified power-of-two alignment.
 // Use this function to create maskable memory regions.
 

Modified: trunk/Source/bmalloc/bmalloc/VMHeap.cpp (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/VMHeap.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/VMHeap.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -33,7 +33,7 @@
 {
 }
 
-LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size, AllocationKind allocationKind)
+LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size)
 {
     // We allocate VM in aligned multiples to increase the chances that
     // the OS will provide contiguous ranges that we can merge.
@@ -51,9 +51,6 @@
     if (!memory)
         return LargeRange();
     
-    if (allocationKind == AllocationKind::Virtual)
-        vmDeallocatePhysicalPagesSloppy(memory, size);
-
     Chunk* chunk = static_cast<Chunk*>(memory);
     
 #if BOS(DARWIN)

Modified: trunk/Source/bmalloc/bmalloc/VMHeap.h (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/VMHeap.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/VMHeap.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -26,7 +26,6 @@
 #ifndef VMHeap_h
 #define VMHeap_h
 
-#include "AllocationKind.h"
 #include "Chunk.h"
 #include "FixedVector.h"
 #include "HeapKind.h"
@@ -49,7 +48,7 @@
 public:
     VMHeap(std::lock_guard<StaticMutex>&);
     
-    LargeRange tryAllocateLargeChunk(size_t alignment, size_t, AllocationKind);
+    LargeRange tryAllocateLargeChunk(size_t alignment, size_t);
 };
 
 } // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/bmalloc.cpp (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/bmalloc.cpp	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/bmalloc.cpp	2018-02-01 05:36:40 UTC (rev 227951)
@@ -39,12 +39,26 @@
     free(object, kind);
 }
 
-void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind)
+void* tryLargeZeroedMemalignVirtual(size_t alignment, size_t size, HeapKind kind)
 {
+    BASSERT(isPowerOfTwo(alignment));
+
+    size_t pageSize = vmPageSize();
+    alignment = roundUpToMultipleOf(pageSize, alignment);
+    size = roundUpToMultipleOf(pageSize, size);
+
     kind = mapToActiveHeapKind(kind);
     Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);
-    std::lock_guard<StaticMutex> lock(Heap::mutex());
-    return heap.tryAllocateLarge(lock, alignment, size, AllocationKind::Virtual);
+
+    void* result;
+    {
+        std::lock_guard<StaticMutex> lock(Heap::mutex());
+        result = heap.tryAllocateLarge(lock, alignment, size);
+    }
+
+    if (result)
+        vmZeroAndPurge(result, size);
+    return result;
 }
 
 void freeLargeVirtual(void* object, HeapKind kind)
@@ -52,7 +66,7 @@
     kind = mapToActiveHeapKind(kind);
     Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);
     std::lock_guard<StaticMutex> lock(Heap::mutex());
-    heap.deallocateLarge(lock, object, AllocationKind::Virtual);
+    heap.deallocateLarge(lock, object);
 }
 
 void scavenge()

Modified: trunk/Source/bmalloc/bmalloc/bmalloc.h (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc/bmalloc.h	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc/bmalloc.h	2018-02-01 05:36:40 UTC (rev 227951)
@@ -69,8 +69,11 @@
     return Cache::reallocate(kind, object, newSize);
 }
 
-// Returns null for failure
-BEXPORT void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary);
+// Returns null on failure.
+// This API will give you zeroed pages that are ready to be used. These pages
+// will page fault on first access. It returns to you memory that initially only
+// uses up virtual address space, not `size` bytes of physical memory.
+BEXPORT void* tryLargeZeroedMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary);
 
 inline void free(void* object, HeapKind kind = HeapKind::Primary)
 {

Modified: trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj (227950 => 227951)


--- trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj	2018-02-01 04:27:12 UTC (rev 227950)
+++ trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj	2018-02-01 05:36:40 UTC (rev 227951)
@@ -21,7 +21,6 @@
 /* End PBXAggregateTarget section */
 
 /* Begin PBXBuildFile section */
-		0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3DA0131F267AB800342C08 /* AllocationKind.h */; settings = {ATTRIBUTES = (Private, ); }; };
 		0F5167741FAD685C008236A8 /* bmalloc.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5167731FAD6852008236A8 /* bmalloc.cpp */; };
 		0F5549EF1FB54704007FF75A /* IsoPage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5549EE1FB54701007FF75A /* IsoPage.cpp */; };
 		0F5BF1471F22A8B10029D91D /* HeapKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1461F22A8B10029D91D /* HeapKind.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -173,7 +172,6 @@
 /* End PBXCopyFilesBuildPhase section */
 
 /* Begin PBXFileReference section */
-		0F3DA0131F267AB800342C08 /* AllocationKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = AllocationKind.h; path = bmalloc/AllocationKind.h; sourceTree = "<group>"; };
 		0F5167731FAD6852008236A8 /* bmalloc.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = bmalloc.cpp; path = bmalloc/bmalloc.cpp; sourceTree = "<group>"; };
 		0F5549EE1FB54701007FF75A /* IsoPage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoPage.cpp; path = bmalloc/IsoPage.cpp; sourceTree = "<group>"; };
 		0F5BF1461F22A8B10029D91D /* HeapKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = HeapKind.h; path = bmalloc/HeapKind.h; sourceTree = "<group>"; };
@@ -468,7 +466,6 @@
 		14D9DB4E17F2866E00EAAB79 /* heap */ = {
 			isa = PBXGroup;
 			children = (
-				0F3DA0131F267AB800342C08 /* AllocationKind.h */,
 				140FA00219CE429C00FFD3C8 /* BumpRange.h */,
 				147DC6E21CA5B70B00724E8D /* Chunk.h */,
 				142B44341E2839E7001DA6E9 /* DebugHeap.cpp */,
@@ -614,7 +611,6 @@
 				14DD789318F48D0F00950702 /* ObjectType.h in Headers */,
 				14DD78CB18F48D7500950702 /* PerProcess.h in Headers */,
 				0F7EB8261F9541B000F1ABCB /* IsoAllocatorInlines.h in Headers */,
-				0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */,
 				14DD78CC18F48D7500950702 /* PerThread.h in Headers */,
 				14DD78CD18F48D7500950702 /* Range.h in Headers */,
 				0F7EB8441F9541B000F1ABCB /* FreeList.h in Headers */,
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to