Modified: trunk/Source/bmalloc/ChangeLog (241181 => 241182)
--- trunk/Source/bmalloc/ChangeLog 2019-02-08 02:01:39 UTC (rev 241181)
+++ trunk/Source/bmalloc/ChangeLog 2019-02-08 02:30:22 UTC (rev 241182)
@@ -1,3 +1,34 @@
+2019-02-07 Michael Saboff <msab...@apple.com>
+
+ bmalloc uses more memory on iOS compared to macOS due to physical page size differences
+ https://bugs.webkit.org/show_bug.cgi?id=192389
+
+ Reviewed by Geoffrey Garen.
+
+ Changed small line allocations to be in smallPageSize "virtual page" multiples instead of physical
+ page size increments for sizes less that the physical page size. This required changing the small
+ page commit / decommit code to work in full physical page increments. For page classes that are
+ physical page size and larger, there isn't any functional change.
+
+ When scavenging page classes smaller than the physical page size, we need to consider whether or
+ not the adjacent small pages on the same physical page are also free before decommiting that
+ containing page. When we need to commit more memory, we commit the whole page, and add any
+ adjacent virtual pages that were fully committed as well.
+
+ * bmalloc/Chunk.h:
+ (bmalloc::forEachPage):
+ * bmalloc/Heap.cpp:
+ (bmalloc::Heap::initializeLineMetadata):
+ (bmalloc::Heap::initializePageMetadata):
+ (bmalloc::Heap::scavenge):
+ (bmalloc::__attribute__):
+ (bmalloc::Heap::commitSmallPagesInPhysicalPage):
+ (bmalloc::Heap::allocateSmallPage):
+ (bmalloc::Heap::allocateSmallBumpRangesByMetadata):
+ * bmalloc/Heap.h:
+ * bmalloc/SmallPage.h:
+ (bmalloc::SmallPage::refCount):
+
2019-01-18 Keith Miller <keith_mil...@apple.com>
gigacage slide should randomize both start and end
Modified: trunk/Source/bmalloc/bmalloc/Chunk.h (241181 => 241182)
--- trunk/Source/bmalloc/bmalloc/Chunk.h 2019-02-08 02:01:39 UTC (rev 241181)
+++ trunk/Source/bmalloc/bmalloc/Chunk.h 2019-02-08 02:30:22 UTC (rev 241182)
@@ -50,6 +50,7 @@
char* address(size_t offset);
SmallPage* page(size_t offset);
SmallLine* line(size_t offset);
+ size_t pageNumber(SmallPage*);
char* bytes() { return reinterpret_cast<char*>(this); }
SmallLine* lines() { return &m_lines[0]; }
@@ -77,7 +78,8 @@
{
// We align to at least the page size so we can service aligned allocations
// at equal and smaller powers of two, and also so we can vmDeallocatePhysicalPages().
- size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk));
+ size_t firstPageOffset = max(pageSize, vmPageSize());
+ size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(firstPageOffset, sizeof(Chunk));
Object begin(chunk, metadataSize);
Object end(chunk, chunkSize);
Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (241181 => 241182)
--- trunk/Source/bmalloc/bmalloc/Heap.cpp 2019-02-08 02:01:39 UTC (rev 241181)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp 2019-02-08 02:30:22 UTC (rev 241182)
@@ -44,6 +44,8 @@
namespace bmalloc {
+static_assert(isPowerOfTwo(smallPageSize), "");
+
Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
: m_kind(kind)
, m_vmPageSizePhysical(vmPageSizePhysical())
@@ -93,7 +95,7 @@
void Heap::initializeLineMetadata()
{
size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
+ size_t smallLineCount = smallPageSize / smallLineSize;
m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
@@ -102,7 +104,7 @@
size_t object = 0;
size_t line = 0;
- while (object < m_vmPageSizePhysical) {
+ while (object < smallPageSize) {
line = object / smallLineSize;
size_t leftover = object % smallLineSize;
@@ -116,7 +118,7 @@
}
// Don't allow the last object in a page to escape the page.
- if (object > m_vmPageSizePhysical) {
+ if (object > smallPageSize) {
BASSERT(pageMetadata[line].objectCount);
--pageMetadata[line].objectCount;
}
@@ -128,16 +130,23 @@
auto computePageSize = [&](size_t sizeClass) {
size_t size = objectSize(sizeClass);
if (sizeClass < bmalloc::sizeClass(smallLineSize))
- return m_vmPageSizePhysical;
+ return smallPageSize;
- for (size_t pageSize = m_vmPageSizePhysical;
- pageSize < pageSizeMax;
- pageSize += m_vmPageSizePhysical) {
+ // We want power of 2 pageSizes sizes below physical page size and multiples of physical pages size above that.
+ size_t pageSize = smallPageSize;
+ for (; pageSize < m_vmPageSizePhysical; pageSize *= 2) {
RELEASE_BASSERT(pageSize <= chunkSize / 2);
size_t waste = pageSize % size;
if (waste <= pageSize / pageSizeWasteFactor)
return pageSize;
}
+
+ for (; pageSize < pageSizeMax; pageSize += m_vmPageSizePhysical) {
+ RELEASE_BASSERT(pageSize <= chunkSize / 2);
+ size_t waste = pageSize % size;
+ if (waste <= pageSize / pageSizeWasteFactor)
+ return pageSize;
+ }
return pageSizeMax;
};
@@ -188,14 +197,17 @@
continue;
size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
- size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
- m_freeableMemory -= decommitSize;
- m_footprint -= decommitSize;
- decommitter.addEager(page->begin()->begin(), pageSize);
- page->setHasPhysicalPages(false);
-#if ENABLE_PHYSICAL_PAGE_MAP
- m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
+ if (pageSize >= m_vmPageSizePhysical) {
+ size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
+ m_freeableMemory -= decommitSize;
+ m_footprint -= decommitSize;
+ decommitter.addEager(page->begin()->begin(), pageSize);
+ page->setHasPhysicalPages(false);
+#if ENABLE_PHYSICAL_PAGE_MAP
+ m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
#endif
+ } else
+ tryDecommitSmallPagesInPhysicalPage(lock, decommitter, page, pageSize);
}
}
}
@@ -267,6 +279,63 @@
m_freePages[pageClass].push(chunk);
}
+void Heap::tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, SmallPage* smallPage, size_t pageSize)
+{
+ Chunk* chunk = Chunk::get(smallPage);
+
+ char* pageBegin = smallPage->begin()->begin();
+ char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, pageBegin);
+
+ // The first page in a physical page takes care of decommitting its physical neighbors
+ if (pageBegin != physicalPageBegin)
+ return;
+
+ size_t beginPageOffset = chunk->offset(physicalPageBegin);
+ size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical;
+
+ Object begin(chunk, beginPageOffset);
+ Object end(chunk, endPageOffset);
+
+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) {
+ if (it.page()->refCount(lock))
+ return;
+ }
+
+ size_t decommitSize = m_vmPageSizePhysical;
+ m_freeableMemory -= decommitSize;
+ m_footprint -= decommitSize;
+
+ decommitter.addEager(physicalPageBegin, decommitSize);
+
+ for (auto it = begin; it + pageSize <= end; it = it + pageSize)
+ it.page()->setHasPhysicalPages(false);
+#if ENABLE_PHYSICAL_PAGE_MAP
+ m_physicalPageMap.decommit(smallPage, decommitSize);
+#endif
+}
+
+void Heap::commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage* page, size_t pageSize)
+{
+ Chunk* chunk = Chunk::get(page);
+
+ char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, page->begin()->begin());
+
+ size_t beginPageOffset = chunk->offset(physicalPageBegin);
+ size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical;
+
+ Object begin(chunk, beginPageOffset);
+ Object end(chunk, endPageOffset);
+
+ m_footprint += m_vmPageSizePhysical;
+ vmAllocatePhysicalPagesSloppy(physicalPageBegin, m_vmPageSizePhysical);
+
+ for (auto it = begin; it + pageSize <= end; it = it + pageSize)
+ it.page()->setHasPhysicalPages(true);
+#if ENABLE_PHYSICAL_PAGE_MAP
+ m_physicalPageMap.commit(begin.page(), m_vmPageSizePhysical);
+#endif
+}
+
void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
{
m_objectTypes.set(chunk, ObjectType::Large);
@@ -325,12 +394,15 @@
m_freeableMemory -= physicalSize;
else {
m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
- m_footprint += physicalSize;
- vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
- page->setHasPhysicalPages(true);
-#if ENABLE_PHYSICAL_PAGE_MAP
- m_physicalPageMap.commit(page->begin()->begin(), pageSize);
+ if (pageSize >= m_vmPageSizePhysical) {
+ m_footprint += physicalSize;
+ vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
+ page->setHasPhysicalPages(true);
+#if ENABLE_PHYSICAL_PAGE_MAP
+ m_physicalPageMap.commit(page->begin()->begin(), pageSize);
#endif
+ } else
+ commitSmallPagesInPhysicalPage(lock, page, pageSize);
}
return page;
@@ -390,7 +462,7 @@
SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
SmallLine* lines = page->begin();
BASSERT(page->hasFreeLines(lock));
- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
+ size_t smallLineCount = smallPageSize / smallLineSize;
LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
auto findSmallBumpRange = [&](size_t& lineNumber) {
Modified: trunk/Source/bmalloc/bmalloc/Heap.h (241181 => 241182)
--- trunk/Source/bmalloc/bmalloc/Heap.h 2019-02-08 02:01:39 UTC (rev 241181)
+++ trunk/Source/bmalloc/bmalloc/Heap.h 2019-02-08 02:30:22 UTC (rev 241182)
@@ -120,6 +120,9 @@
SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&);
void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
+ void tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>&, BulkDecommit& decommitter, SmallPage*, size_t pageSize);
+ void commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage*, size_t pageSize);
+
void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass);
void deallocateSmallChunk(Chunk*, size_t pageClass);