Diff
Modified: trunk/Source/bmalloc/CMakeLists.txt (261666 => 261667)
--- trunk/Source/bmalloc/CMakeLists.txt 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/CMakeLists.txt 2020-05-14 01:09:19 UTC (rev 261667)
@@ -29,6 +29,7 @@
bmalloc/Logging.cpp
bmalloc/Mutex.cpp
bmalloc/ObjectType.cpp
+ bmalloc/ObjectTypeTable.cpp
bmalloc/PerProcess.cpp
bmalloc/Scavenger.cpp
bmalloc/bmalloc.cpp
@@ -110,6 +111,7 @@
bmalloc/Mutex.h
bmalloc/Object.h
bmalloc/ObjectType.h
+ bmalloc/ObjectTypeTable.h
bmalloc/Packed.h
bmalloc/PerHeapKind.h
bmalloc/PerProcess.h
Modified: trunk/Source/bmalloc/ChangeLog (261666 => 261667)
--- trunk/Source/bmalloc/ChangeLog 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/ChangeLog 2020-05-14 01:09:19 UTC (rev 261667)
@@ -1,3 +1,61 @@
+2020-05-13 Yusuke Suzuki <ysuz...@apple.com>
+
+ [bmalloc] Introduce lock-less ObjectType query
+ https://bugs.webkit.org/show_bug.cgi?id=211809
+
+ Reviewed by Mark Lam.
+
+ This patch introduces ObjectTypeTable, which allows lock-less ObjectType query for Chunk*.
+ It has bit-vector to store ObjectType per address. And each bit represents 1MB of VA region since
+ Chunk*'s size is at least 1MB and ObjectType is 1bit data. Every time we extend this bit-vector
+ to support larger VA region, we do not free the old bit-vector. Since we always allocate power-of-2
+ sized bit-vector, # of extension is limited and it does not waste much memory because Chunk's size
+ is enough large (1MB). Since each 4KB page on macOS can represent a bit-vector for 32GB VA region,
+ in practice, this extension almost never happens. I verified that 4KB page can handle memory
+ allocations in JetStream2 and Gmail.
+
+ * CMakeLists.txt:
+ * bmalloc.xcodeproj/project.pbxproj:
+ * bmalloc/Algorithm.h:
+ (bmalloc::roundUpToPowerOfTwo):
+ * bmalloc/Deallocator.cpp:
+ (bmalloc::Deallocator::deallocateSlowCase):
+ * bmalloc/Heap.cpp:
+ (bmalloc::Heap::freeableMemory):
+ (bmalloc::Heap::decommitLargeRange):
+ (bmalloc::Heap::scavenge):
+ (bmalloc::Heap::scavengeToHighWatermark):
+ (bmalloc::Heap::allocateSmallChunk):
+ (bmalloc::Heap::deallocateSmallChunk):
+ (bmalloc::Heap::deallocateSmallLine):
+ (bmalloc::Heap::splitAndAllocate):
+ (bmalloc::Heap::isLarge): Deleted.
+ * bmalloc/Heap.h:
+ (bmalloc::Heap::isLarge):
+ * bmalloc/ObjectType.cpp:
+ (bmalloc::objectType):
+ * bmalloc/ObjectTypeTable.cpp: Added.
+ (bmalloc::ObjectTypeTable::set):
+ * bmalloc/ObjectTypeTable.h: Added.
+ (bmalloc::ObjectTypeTable::convertToIndex):
+ (bmalloc::ObjectTypeTable::Bits::Bits):
+ (bmalloc::ObjectTypeTable::Bits::previous const):
+ (bmalloc::ObjectTypeTable::Bits::begin const):
+ (bmalloc::ObjectTypeTable::Bits::end const):
+ (bmalloc::ObjectTypeTable::Bits::count const):
+ (bmalloc::ObjectTypeTable::Bits::sizeInBytes const):
+ (bmalloc::ObjectTypeTable::Bits::words const):
+ (bmalloc::ObjectTypeTable::Bits::words):
+ (bmalloc::ObjectTypeTable::ObjectTypeTable):
+ (bmalloc::ObjectTypeTable::get):
+ (bmalloc::ObjectTypeTable::Bits::get):
+ (bmalloc::ObjectTypeTable::Bits::set):
+ (bmalloc::ObjectTypeTable::Bits::wordForIndex):
+ * bmalloc/Scavenger.cpp:
+ (bmalloc::Scavenger::scavenge):
+ (bmalloc::Scavenger::partialScavenge):
+ (bmalloc::Scavenger::freeableMemory):
+
2020-05-11 Basuke Suzuki <basuke.suz...@sony.com>
[bmalloc][WTF] Add computing memory size implementation for FreeBSD
Modified: trunk/Source/bmalloc/bmalloc/Algorithm.h (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc/Algorithm.h 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc/Algorithm.h 2020-05-14 01:09:19 UTC (rev 261667)
@@ -219,6 +219,19 @@
return ctzConstexpr(t);
}
+// From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+constexpr uint32_t roundUpToPowerOfTwo(uint32_t v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+
} // namespace bmalloc
#endif // Algorithm_h
Modified: trunk/Source/bmalloc/bmalloc/Deallocator.cpp (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc/Deallocator.cpp 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc/Deallocator.cpp 2020-05-14 01:09:19 UTC (rev 261667)
@@ -68,14 +68,16 @@
if (!object)
return;
- UniqueLockHolder lock(Heap::mutex());
- if (m_heap.isLarge(lock, object)) {
+ if (m_heap.isLarge(object)) {
+ UniqueLockHolder lock(Heap::mutex());
m_heap.deallocateLarge(lock, object);
return;
}
- if (m_objectLog.size() == m_objectLog.capacity())
+ if (m_objectLog.size() == m_objectLog.capacity()) {
+ UniqueLockHolder lock(Heap::mutex());
processObjectLog(lock);
+ }
m_objectLog.push(object);
}
Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc/Heap.cpp 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp 2020-05-14 01:09:19 UTC (rev 261667)
@@ -84,7 +84,7 @@
return Gigacage::size(gigacageKind(m_kind));
}
-size_t Heap::freeableMemory(const LockHolder&)
+size_t Heap::freeableMemory(UniqueLockHolder&)
{
return m_freeableMemory;
}
@@ -101,7 +101,7 @@
m_condition.notify_all();
}
-void Heap::decommitLargeRange(const LockHolder&, LargeRange& range, BulkDecommit& decommitter)
+void Heap::decommitLargeRange(UniqueLockHolder&, LargeRange& range, BulkDecommit& decommitter)
{
m_footprint -= range.totalPhysicalSize();
m_freeableMemory -= range.totalPhysicalSize();
@@ -117,9 +117,9 @@
}
#if BUSE(PARTIAL_SCAVENGE)
-void Heap::scavenge(const LockHolder& lock, BulkDecommit& decommitter)
+void Heap::scavenge(UniqueLockHolder& lock, BulkDecommit& decommitter)
#else
-void Heap::scavenge(const LockHolder& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
+void Heap::scavenge(UniqueLockHolder& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
#endif
{
for (auto& list : m_freePages) {
@@ -150,7 +150,7 @@
for (auto& list : m_chunkCache) {
while (!list.isEmpty())
- deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]);
+ deallocateSmallChunk(lock, list.pop(), &list - &m_chunkCache[0]);
}
for (LargeRange& range : m_largeFree) {
@@ -172,7 +172,7 @@
}
#if BUSE(PARTIAL_SCAVENGE)
-void Heap::scavengeToHighWatermark(const LockHolder& lock, BulkDecommit& decommitter)
+void Heap::scavengeToHighWatermark(UniqueLockHolder& lock, BulkDecommit& decommitter)
{
void* newHighWaterMark = nullptr;
for (LargeRange& range : m_largeFree) {
@@ -213,7 +213,7 @@
Chunk* chunk = new (memory) Chunk(pageSize);
- m_objectTypes.set(chunk, ObjectType::Small);
+ m_objectTypes.set(lock, chunk, ObjectType::Small);
size_t accountedInFreeable = 0;
forEachPage(chunk, pageSize, [&](SmallPage* page) {
@@ -244,9 +244,9 @@
m_freePages[pageClass].push(chunk);
}
-void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
+void Heap::deallocateSmallChunk(UniqueLockHolder& lock, Chunk* chunk, size_t pageClass)
{
- m_objectTypes.set(chunk, ObjectType::Large);
+ m_objectTypes.set(lock, chunk, ObjectType::Large);
size_t size = m_largeAllocated.remove(chunk);
size_t totalPhysicalSize = size;
@@ -357,7 +357,7 @@
m_freePages[pageClass].remove(chunk);
if (!m_chunkCache[pageClass].isEmpty())
- deallocateSmallChunk(m_chunkCache[pageClass].pop(), pageClass);
+ deallocateSmallChunk(lock, m_chunkCache[pageClass].pop(), pageClass);
m_chunkCache[pageClass].push(chunk);
}
@@ -495,7 +495,7 @@
}
}
-LargeRange Heap::splitAndAllocate(UniqueLockHolder&, LargeRange& range, size_t alignment, size_t size)
+LargeRange Heap::splitAndAllocate(UniqueLockHolder& lock, LargeRange& range, size_t alignment, size_t size)
{
RELEASE_BASSERT(isActiveHeapKind(m_kind));
@@ -537,7 +537,7 @@
m_largeFree.add(next);
}
- m_objectTypes.set(Chunk::get(range.begin()), ObjectType::Large);
+ m_objectTypes.set(lock, Chunk::get(range.begin()), ObjectType::Large);
m_largeAllocated.set(range.begin(), range.size());
return range;
@@ -621,11 +621,6 @@
return LargeRange(memory, size, 0, 0);
}
-bool Heap::isLarge(UniqueLockHolder&, void* object)
-{
- return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
-}
-
size_t Heap::largeSize(UniqueLockHolder&, void* object)
{
return m_largeAllocated.get(object);
Modified: trunk/Source/bmalloc/bmalloc/Heap.h (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc/Heap.h 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc/Heap.h 2020-05-14 01:09:19 UTC (rev 261667)
@@ -35,6 +35,7 @@
#include "Map.h"
#include "Mutex.h"
#include "Object.h"
+#include "ObjectTypeTable.h"
#include "PerHeapKind.h"
#include "PerProcess.h"
#include "PhysicalPageMap.h"
@@ -69,19 +70,19 @@
void* allocateLarge(UniqueLockHolder&, size_t alignment, size_t, FailureAction);
void deallocateLarge(UniqueLockHolder&, void*);
- bool isLarge(UniqueLockHolder&, void*);
+ bool isLarge(void*);
size_t largeSize(UniqueLockHolder&, void*);
void shrinkLarge(UniqueLockHolder&, const Range&, size_t);
#if BUSE(PARTIAL_SCAVENGE)
- void scavengeToHighWatermark(const LockHolder&, BulkDecommit&);
- void scavenge(const LockHolder&, BulkDecommit&);
+ void scavengeToHighWatermark(UniqueLockHolder&, BulkDecommit&);
+ void scavenge(UniqueLockHolder&, BulkDecommit&);
#else
- void scavenge(const LockHolder&, BulkDecommit&, size_t& deferredDecommits);
+ void scavenge(UniqueLockHolder&, BulkDecommit&, size_t& deferredDecommits);
#endif
- void scavenge(const LockHolder&, BulkDecommit&, size_t& freed, size_t goal);
+ void scavenge(UniqueLockHolder&, BulkDecommit&, size_t& freed, size_t goal);
- size_t freeableMemory(const LockHolder&);
+ size_t freeableMemory(UniqueLockHolder&);
size_t footprint();
void externalDecommit(void* ptr, size_t);
@@ -92,7 +93,7 @@
void markAllLargeAsEligibile(const LockHolder&);
private:
- void decommitLargeRange(const LockHolder&, LargeRange&, BulkDecommit&);
+ void decommitLargeRange(UniqueLockHolder&, LargeRange&, BulkDecommit&);
struct LargeObjectHash {
static unsigned hash(void* key)
@@ -117,7 +118,7 @@
void deallocateSmallLine(UniqueLockHolder&, Object, LineCache&);
void allocateSmallChunk(UniqueLockHolder&, size_t pageClass, FailureAction);
- void deallocateSmallChunk(Chunk*, size_t pageClass);
+ void deallocateSmallChunk(UniqueLockHolder&, Chunk*, size_t pageClass);
LargeRange tryAllocateLargeChunk(size_t alignment, size_t);
LargeRange splitAndAllocate(UniqueLockHolder&, LargeRange&, size_t alignment, size_t);
@@ -135,7 +136,7 @@
Map<void*, size_t, LargeObjectHash> m_largeAllocated;
LargeMap m_largeFree;
- Map<Chunk*, ObjectType, ChunkHash> m_objectTypes;
+ ObjectTypeTable m_objectTypes;
Scavenger* m_scavenger { nullptr };
@@ -168,6 +169,11 @@
deallocateSmallLine(lock, object, lineCache);
}
+inline bool Heap::isLarge(void* object)
+{
+ return m_objectTypes.get(Object(object).chunk()) == ObjectType::Large;
+}
+
} // namespace bmalloc
#endif // Heap_h
Modified: trunk/Source/bmalloc/bmalloc/ObjectType.cpp (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc/ObjectType.cpp 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc/ObjectType.cpp 2020-05-14 01:09:19 UTC (rev 261667)
@@ -38,8 +38,7 @@
if (!object)
return ObjectType::Small;
- UniqueLockHolder lock(Heap::mutex());
- if (heap.isLarge(lock, object))
+ if (heap.isLarge(object))
return ObjectType::Large;
}
Added: trunk/Source/bmalloc/bmalloc/ObjectTypeTable.cpp (0 => 261667)
--- trunk/Source/bmalloc/bmalloc/ObjectTypeTable.cpp (rev 0)
+++ trunk/Source/bmalloc/bmalloc/ObjectTypeTable.cpp 2020-05-14 01:09:19 UTC (rev 261667)
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2020 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ObjectTypeTable.h"
+
+#include "VMAllocate.h"
+
+namespace bmalloc {
+
+ObjectTypeTable::Bits sentinelBits { nullptr, 0, 0 };
+
+void ObjectTypeTable::set(UniqueLockHolder&, Chunk* chunk, ObjectType objectType)
+{
+ unsigned index = convertToIndex(chunk);
+ Bits* bits = m_bits;
+ if (!(bits->begin() <= index && index < bits->end())) {
+ unsigned newBegin = 0;
+ unsigned newEnd = 0;
+ if (bits == &sentinelBits) {
+ // This is initial allocation of ObjectTypeTable. In this case, it could be possible that for the first registration,
+ // some VAs are already allocated for a different purpose, and later they will be reused for bmalloc. In that case,
+ // soon, we will see a smaller index request than this initial one. We subtract a 128MB offset to the initial newBegin
+ // to cover such patterns without extending table too quickly.
+ newBegin = std::min<unsigned>(index, index - ObjectTypeTable::Bits::bitCountPerWord * 4);
+ newEnd = index + 1;
+ } else if (index < bits->begin()) {
+ BASSERT(bits->begin());
+ BASSERT(bits->end());
+ newBegin = std::min<unsigned>(index, bits->begin() - bits->count());
+ newEnd = bits->end();
+ } else {
+ BASSERT(bits->begin());
+ BASSERT(bits->end());
+ newBegin = bits->begin();
+ newEnd = std::max<unsigned>(index + 1, bits->end() + bits->count());
+ }
+ newBegin = static_cast<unsigned>(roundDownToMultipleOf<size_t>(ObjectTypeTable::Bits::bitCountPerWord, newBegin));
+ BASSERT(newEnd > newBegin);
+
+ unsigned count = newEnd - newBegin;
+ size_t size = vmSize(sizeof(Bits) + (roundUpToMultipleOf<size_t>(ObjectTypeTable::Bits::bitCountPerWord, count) / 8));
+ RELEASE_BASSERT(size <= 0x80000000U); // Too large bitvector, out-of-memory.
+ size = roundUpToPowerOfTwo(size);
+ newEnd = newBegin + ((size - sizeof(Bits)) / sizeof(ObjectTypeTable::Bits::WordType)) * ObjectTypeTable::Bits::bitCountPerWord;
+ BASSERT(newEnd > newBegin);
+ void* allocated = vmAllocate(size);
+ memset(allocated, 0, size);
+ auto* newBits = new (allocated) Bits(bits, newBegin, newEnd);
+
+ memcpy(newBits->wordForIndex(bits->begin()), bits->words(), bits->sizeInBytes());
+#if !defined(NDEBUG)
+ for (unsigned index = bits->begin(); index < bits->end(); ++index)
+ BASSERT(bits->get(index) == newBits->get(index));
+#endif
+ std::atomic_thread_fence(std::memory_order_seq_cst); // Ensure table gets valid when it is visible to the other threads since ObjectTypeTable::get does not take a lock.
+ m_bits = newBits;
+ bits = newBits;
+ }
+ bool value = !!static_cast<std::underlying_type_t<ObjectType>>(objectType);
+ BASSERT(static_cast<ObjectType>(value) == objectType);
+ bits->set(index, value);
+}
+
+} // namespace bmalloc
Added: trunk/Source/bmalloc/bmalloc/ObjectTypeTable.h (0 => 261667)
--- trunk/Source/bmalloc/bmalloc/ObjectTypeTable.h (rev 0)
+++ trunk/Source/bmalloc/bmalloc/ObjectTypeTable.h 2020-05-14 01:09:19 UTC (rev 261667)
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2020 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Mutex.h"
+#include "ObjectType.h"
+#include "Sizes.h"
+
+namespace bmalloc {
+
+class Chunk;
+
+// Querying ObjectType for Chunk without locking.
+class ObjectTypeTable {
+public:
+ ObjectTypeTable();
+
+ static constexpr unsigned shiftAmount = 20;
+ static_assert((1ULL << shiftAmount) == chunkSize);
+ static_assert((BOS_EFFECTIVE_ADDRESS_WIDTH - shiftAmount) <= 32);
+
+ class Bits;
+
+ ObjectType get(Chunk*);
+ void set(UniqueLockHolder&, Chunk*, ObjectType);
+
+private:
+ static unsigned convertToIndex(Chunk* chunk)
+ {
+ uintptr_t address = reinterpret_cast<uintptr_t>(chunk);
+ BASSERT(!(address & (~chunkMask)));
+ return static_cast<unsigned>(address >> shiftAmount);
+ }
+
+ Bits* m_bits;
+};
+
+class ObjectTypeTable::Bits {
+public:
+ using WordType = unsigned;
+ static constexpr unsigned bitCountPerWord = sizeof(WordType) * 8;
+ static constexpr WordType _one_ = 1;
+ constexpr Bits(Bits* previous, unsigned begin, unsigned end)
+ : m_previous(previous)
+ , m_begin(begin)
+ , m_end(end)
+ {
+ }
+
+ bool get(unsigned index);
+ void set(unsigned index, bool);
+
+ Bits* previous() const { return m_previous; }
+ unsigned begin() const { return m_begin; }
+ unsigned end() const { return m_end; }
+ unsigned count() const { return m_end - m_begin; }
+ unsigned sizeInBytes() const { return count() / 8; }
+
+ const WordType* words() const { return const_cast<Bits*>(this)->words(); }
+ WordType* words() { return reinterpret_cast<WordType*>(reinterpret_cast<uintptr_t>(this) + sizeof(Bits)); }
+
+ WordType* wordForIndex(unsigned);
+
+private:
+ Bits* m_previous { nullptr }; // Keeping the previous Bits* just to suppress Leaks warnings.
+ unsigned m_begin { 0 };
+ unsigned m_end { 0 };
+};
+static_assert(!(sizeof(ObjectTypeTable::Bits) % sizeof(ObjectTypeTable::Bits::WordType)));
+
+extern BEXPORT ObjectTypeTable::Bits sentinelBits;
+
+inline ObjectTypeTable::ObjectTypeTable()
+ : m_bits(&sentinelBits)
+{
+}
+
+inline ObjectType ObjectTypeTable::get(Chunk* chunk)
+{
+ Bits* bits = m_bits;
+ unsigned index = convertToIndex(chunk);
+ BASSERT(bits);
+ if (bits->begin() <= index && index < bits->end())
+ return static_cast<ObjectType>(bits->get(index));
+ return { };
+}
+
+inline bool ObjectTypeTable::Bits::get(unsigned index)
+{
+ unsigned n = index - begin();
+ return words()[n / bitCountPerWord] & (one << (n % bitCountPerWord));
+}
+
+inline void ObjectTypeTable::Bits::set(unsigned index, bool value)
+{
+ unsigned n = index - begin();
+ if (value)
+ words()[n / bitCountPerWord] |= (one << (n % bitCountPerWord));
+ else
+ words()[n / bitCountPerWord] &= ~(one << (n % bitCountPerWord));
+}
+
+inline ObjectTypeTable::Bits::WordType* ObjectTypeTable::Bits::wordForIndex(unsigned index)
+{
+ unsigned n = index - begin();
+ return &words()[n / bitCountPerWord];
+}
+
+} // namespace bmalloc
Modified: trunk/Source/bmalloc/bmalloc/Scavenger.cpp (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc/Scavenger.cpp 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc/Scavenger.cpp 2020-05-14 01:09:19 UTC (rev 261667)
@@ -223,7 +223,7 @@
#if !BUSE(PARTIAL_SCAVENGE)
size_t deferredDecommits = 0;
#endif
- LockHolder lock(Heap::mutex());
+ UniqueLockHolder lock(Heap::mutex());
for (unsigned i = numHeaps; i--;) {
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
continue;
@@ -297,7 +297,7 @@
BulkDecommit decommitter;
{
PrintTime printTime("\npartialScavenge under lock time");
- LockHolder lock(Heap::mutex());
+ UniqueLockHolder lock(Heap::mutex());
for (unsigned i = numHeaps; i--;) {
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
continue;
@@ -355,7 +355,7 @@
{
size_t result = 0;
{
- LockHolder lock(Heap::mutex());
+ UniqueLockHolder lock(Heap::mutex());
for (unsigned i = numHeaps; i--;) {
if (!isActiveHeapKind(static_cast<HeapKind>(i)))
continue;
Modified: trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj (261666 => 261667)
--- trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj 2020-05-14 00:45:45 UTC (rev 261666)
+++ trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj 2020-05-14 01:09:19 UTC (rev 261667)
@@ -139,6 +139,8 @@
DE8B13B321CC5D9F00A63FCD /* BVMTags.h in Headers */ = {isa = PBXBuildFile; fileRef = DE8B13B221CC5D9F00A63FCD /* BVMTags.h */; settings = {ATTRIBUTES = (Private, ); }; };
E31E74802238CA5C005D084A /* StaticPerProcess.h in Headers */ = {isa = PBXBuildFile; fileRef = E31E747F2238CA5B005D084A /* StaticPerProcess.h */; settings = {ATTRIBUTES = (Private, ); }; };
E328D84D23CEB38900545B18 /* Packed.h in Headers */ = {isa = PBXBuildFile; fileRef = E328D84C23CEB38900545B18 /* Packed.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ E378A9DF246B68720029C2BB /* ObjectTypeTable.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E378A9DE246B686A0029C2BB /* ObjectTypeTable.cpp */; };
+ E378A9E0246B68750029C2BB /* ObjectTypeTable.h in Headers */ = {isa = PBXBuildFile; fileRef = E378A9DD246B686A0029C2BB /* ObjectTypeTable.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3A413C9226061140037F470 /* IsoSharedPageInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3A413C8226061140037F470 /* IsoSharedPageInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3F24402225D2C0100A0E0C3 /* IsoSharedPage.h in Headers */ = {isa = PBXBuildFile; fileRef = E3F24401225D2C0100A0E0C3 /* IsoSharedPage.h */; settings = {ATTRIBUTES = (Private, ); }; };
E3F24404225D2C7600A0E0C3 /* IsoSharedPage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E3F24403225D2C7600A0E0C3 /* IsoSharedPage.cpp */; };
@@ -293,6 +295,8 @@
DE8B13B221CC5D9F00A63FCD /* BVMTags.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = BVMTags.h; path = bmalloc/BVMTags.h; sourceTree = "<group>"; };
E31E747F2238CA5B005D084A /* StaticPerProcess.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = StaticPerProcess.h; path = bmalloc/StaticPerProcess.h; sourceTree = "<group>"; };
E328D84C23CEB38900545B18 /* Packed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Packed.h; path = bmalloc/Packed.h; sourceTree = "<group>"; };
+ E378A9DD246B686A0029C2BB /* ObjectTypeTable.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = ObjectTypeTable.h; path = bmalloc/ObjectTypeTable.h; sourceTree = "<group>"; };
+ E378A9DE246B686A0029C2BB /* ObjectTypeTable.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = ObjectTypeTable.cpp; path = bmalloc/ObjectTypeTable.cpp; sourceTree = "<group>"; };
E3A413C8226061140037F470 /* IsoSharedPageInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedPageInlines.h; path = bmalloc/IsoSharedPageInlines.h; sourceTree = "<group>"; };
E3F24401225D2C0100A0E0C3 /* IsoSharedPage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedPage.h; path = bmalloc/IsoSharedPage.h; sourceTree = "<group>"; };
E3F24403225D2C7600A0E0C3 /* IsoSharedPage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoSharedPage.cpp; path = bmalloc/IsoSharedPage.cpp; sourceTree = "<group>"; };
@@ -490,6 +494,8 @@
144BE11E1CA346520099C8C0 /* Object.h */,
14105E8318E14374003A106E /* ObjectType.cpp */,
1485656018A43DBA00ED6942 /* ObjectType.h */,
+ E378A9DE246B686A0029C2BB /* ObjectTypeTable.cpp */,
+ E378A9DD246B686A0029C2BB /* ObjectTypeTable.h */,
795AB3C6206E0D250074FE76 /* PhysicalPageMap.h */,
AD14AD27202529A600890E3B /* ProcessCheck.h */,
AD14AD28202529B000890E3B /* ProcessCheck.mm */,
@@ -645,6 +651,7 @@
143CB81D19022BC900B16A45 /* Mutex.h in Headers */,
144BE11F1CA346520099C8C0 /* Object.h in Headers */,
14DD789318F48D0F00950702 /* ObjectType.h in Headers */,
+ E378A9E0246B68750029C2BB /* ObjectTypeTable.h in Headers */,
E328D84D23CEB38900545B18 /* Packed.h in Headers */,
0F5BF1491F22A8D80029D91D /* PerHeapKind.h in Headers */,
14DD78CB18F48D7500950702 /* PerProcess.h in Headers */,
@@ -777,6 +784,7 @@
4426E2801C838EE0008EB042 /* Logging.cpp in Sources */,
143CB81C19022BC900B16A45 /* Mutex.cpp in Sources */,
14F271C818EA3990008C152F /* ObjectType.cpp in Sources */,
+ E378A9DF246B68720029C2BB /* ObjectTypeTable.cpp in Sources */,
0F26A7A5205483130090A141 /* PerProcess.cpp in Sources */,
AD14AD2A202529C700890E3B /* ProcessCheck.mm in Sources */,
0F5BF1521F22E1570029D91D /* Scavenger.cpp in Sources */,