Title: [241305] trunk/Source/bmalloc
Revision
241305
Author
commit-qu...@webkit.org
Date
2019-02-12 11:20:51 -0800 (Tue, 12 Feb 2019)

Log Message

Unreviewed, rolling out r241182.
https://bugs.webkit.org/show_bug.cgi?id=194547

causes a 2-3% Speedometer2 regression. (Requested by
keith_miller on #webkit).

Reverted changeset:

"bmalloc uses more memory on iOS compared to macOS due to
physical page size differences"
https://bugs.webkit.org/show_bug.cgi?id=192389
https://trac.webkit.org/changeset/241182

Modified Paths

Diff

Modified: trunk/Source/bmalloc/ChangeLog (241304 => 241305)


--- trunk/Source/bmalloc/ChangeLog	2019-02-12 18:45:31 UTC (rev 241304)
+++ trunk/Source/bmalloc/ChangeLog	2019-02-12 19:20:51 UTC (rev 241305)
@@ -1,3 +1,18 @@
+2019-02-12  Commit Queue  <commit-qu...@webkit.org>
+
+        Unreviewed, rolling out r241182.
+        https://bugs.webkit.org/show_bug.cgi?id=194547
+
+        causes a 2-3% Speedometer2 regression. (Requested by
+        keith_miller on #webkit).
+
+        Reverted changeset:
+
+        "bmalloc uses more memory on iOS compared to macOS due to
+        physical page size differences"
+        https://bugs.webkit.org/show_bug.cgi?id=192389
+        https://trac.webkit.org/changeset/241182
+
 2019-02-07  Michael Saboff  <msab...@apple.com>
 
         bmalloc uses more memory on iOS compared to macOS due to physical page size differences

Modified: trunk/Source/bmalloc/bmalloc/Chunk.h (241304 => 241305)


--- trunk/Source/bmalloc/bmalloc/Chunk.h	2019-02-12 18:45:31 UTC (rev 241304)
+++ trunk/Source/bmalloc/bmalloc/Chunk.h	2019-02-12 19:20:51 UTC (rev 241305)
@@ -50,7 +50,6 @@
     char* address(size_t offset);
     SmallPage* page(size_t offset);
     SmallLine* line(size_t offset);
-    size_t pageNumber(SmallPage*);
 
     char* bytes() { return reinterpret_cast<char*>(this); }
     SmallLine* lines() { return &m_lines[0]; }
@@ -78,8 +77,7 @@
 {
     // We align to at least the page size so we can service aligned allocations
     // at equal and smaller powers of two, and also so we can vmDeallocatePhysicalPages().
-    size_t firstPageOffset = max(pageSize, vmPageSize());
-    size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(firstPageOffset, sizeof(Chunk));
+    size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk));
 
     Object begin(chunk, metadataSize);
     Object end(chunk, chunkSize);

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (241304 => 241305)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2019-02-12 18:45:31 UTC (rev 241304)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2019-02-12 19:20:51 UTC (rev 241305)
@@ -44,8 +44,6 @@
 
 namespace bmalloc {
 
-static_assert(isPowerOfTwo(smallPageSize), "");
-
 Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&)
     : m_kind(kind)
     , m_vmPageSizePhysical(vmPageSizePhysical())
@@ -95,7 +93,7 @@
 void Heap::initializeLineMetadata()
 {
     size_t sizeClassCount = bmalloc::sizeClass(smallLineSize);
-    size_t smallLineCount = smallPageSize / smallLineSize;
+    size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
     m_smallLineMetadata.grow(sizeClassCount * smallLineCount);
 
     for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) {
@@ -104,7 +102,7 @@
 
         size_t object = 0;
         size_t line = 0;
-        while (object < smallPageSize) {
+        while (object < m_vmPageSizePhysical) {
             line = object / smallLineSize;
             size_t leftover = object % smallLineSize;
 
@@ -118,7 +116,7 @@
         }
 
         // Don't allow the last object in a page to escape the page.
-        if (object > smallPageSize) {
+        if (object > m_vmPageSizePhysical) {
             BASSERT(pageMetadata[line].objectCount);
             --pageMetadata[line].objectCount;
         }
@@ -130,23 +128,16 @@
     auto computePageSize = [&](size_t sizeClass) {
         size_t size = objectSize(sizeClass);
         if (sizeClass < bmalloc::sizeClass(smallLineSize))
-            return smallPageSize;
+            return m_vmPageSizePhysical;
 
-        // We want power of 2 pageSizes sizes below physical page size and multiples of physical pages size above that.
-        size_t pageSize = smallPageSize;
-        for (; pageSize < m_vmPageSizePhysical; pageSize *= 2) {
+        for (size_t pageSize = m_vmPageSizePhysical;
+            pageSize < pageSizeMax;
+            pageSize += m_vmPageSizePhysical) {
             RELEASE_BASSERT(pageSize <= chunkSize / 2);
             size_t waste = pageSize % size;
             if (waste <= pageSize / pageSizeWasteFactor)
                 return pageSize;
         }
-
-        for (; pageSize < pageSizeMax; pageSize += m_vmPageSizePhysical) {
-            RELEASE_BASSERT(pageSize <= chunkSize / 2);
-            size_t waste = pageSize % size;
-            if (waste <= pageSize / pageSizeWasteFactor)
-                return pageSize;
-        }
         
         return pageSizeMax;
     };
@@ -197,17 +188,14 @@
                     continue;
 
                 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
-                if (pageSize >= m_vmPageSizePhysical) {
-                    size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
-                    m_freeableMemory -= decommitSize;
-                    m_footprint -= decommitSize;
-                    decommitter.addEager(page->begin()->begin(), pageSize);
-                    page->setHasPhysicalPages(false);
-#if ENABLE_PHYSICAL_PAGE_MAP
-                    m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
+                size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
+                m_freeableMemory -= decommitSize;
+                m_footprint -= decommitSize;
+                decommitter.addEager(page->begin()->begin(), pageSize);
+                page->setHasPhysicalPages(false);
+#if ENABLE_PHYSICAL_PAGE_MAP 
+                m_physicalPageMap.decommit(page->begin()->begin(), pageSize);
 #endif
-                } else
-                    tryDecommitSmallPagesInPhysicalPage(lock, decommitter, page, pageSize);
             }
         }
     }
@@ -279,63 +267,6 @@
     m_freePages[pageClass].push(chunk);
 }
 
-void Heap::tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, SmallPage* smallPage, size_t pageSize)
-{
-    Chunk* chunk = Chunk::get(smallPage);
-
-    char* pageBegin = smallPage->begin()->begin();
-    char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, pageBegin);
-
-    // The first page in a physical page takes care of decommitting its physical neighbors
-    if (pageBegin != physicalPageBegin)
-        return;
-
-    size_t beginPageOffset = chunk->offset(physicalPageBegin);
-    size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical;
-
-    Object begin(chunk, beginPageOffset);
-    Object end(chunk, endPageOffset);
-
-    for (auto it = begin; it + pageSize <= end; it = it + pageSize) {
-        if (it.page()->refCount(lock))
-            return;
-    }
-
-    size_t decommitSize = m_vmPageSizePhysical;
-    m_freeableMemory -= decommitSize;
-    m_footprint -= decommitSize;
-
-    decommitter.addEager(physicalPageBegin, decommitSize);
-
-    for (auto it = begin; it + pageSize <= end; it = it + pageSize)
-        it.page()->setHasPhysicalPages(false);
-#if ENABLE_PHYSICAL_PAGE_MAP
-    m_physicalPageMap.decommit(smallPage, decommitSize);
-#endif
-}
-
-void Heap::commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage* page, size_t pageSize)
-{
-    Chunk* chunk = Chunk::get(page);
-
-    char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, page->begin()->begin());
-
-    size_t beginPageOffset = chunk->offset(physicalPageBegin);
-    size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical;
-
-    Object begin(chunk, beginPageOffset);
-    Object end(chunk, endPageOffset);
-
-    m_footprint += m_vmPageSizePhysical;
-    vmAllocatePhysicalPagesSloppy(physicalPageBegin, m_vmPageSizePhysical);
-
-    for (auto it = begin; it + pageSize <= end; it = it + pageSize)
-        it.page()->setHasPhysicalPages(true);
-#if ENABLE_PHYSICAL_PAGE_MAP
-    m_physicalPageMap.commit(begin.page(), m_vmPageSizePhysical);
-#endif
-}
-
 void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass)
 {
     m_objectTypes.set(chunk, ObjectType::Large);
@@ -394,15 +325,12 @@
             m_freeableMemory -= physicalSize;
         else {
             m_scavenger->scheduleIfUnderMemoryPressure(pageSize);
-            if (pageSize >= m_vmPageSizePhysical) {
-                m_footprint += physicalSize;
-                vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
-                page->setHasPhysicalPages(true);
-#if ENABLE_PHYSICAL_PAGE_MAP
-                m_physicalPageMap.commit(page->begin()->begin(), pageSize);
+            m_footprint += physicalSize;
+            vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize);
+            page->setHasPhysicalPages(true);
+#if ENABLE_PHYSICAL_PAGE_MAP 
+            m_physicalPageMap.commit(page->begin()->begin(), pageSize);
 #endif
-            } else
-                commitSmallPagesInPhysicalPage(lock, page, pageSize);
         }
 
         return page;
@@ -462,7 +390,7 @@
     SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache);
     SmallLine* lines = page->begin();
     BASSERT(page->hasFreeLines(lock));
-    size_t smallLineCount = smallPageSize / smallLineSize;
+    size_t smallLineCount = m_vmPageSizePhysical / smallLineSize;
     LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount];
     
     auto findSmallBumpRange = [&](size_t& lineNumber) {

Modified: trunk/Source/bmalloc/bmalloc/Heap.h (241304 => 241305)


--- trunk/Source/bmalloc/bmalloc/Heap.h	2019-02-12 18:45:31 UTC (rev 241304)
+++ trunk/Source/bmalloc/bmalloc/Heap.h	2019-02-12 19:20:51 UTC (rev 241305)
@@ -120,9 +120,6 @@
     SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&);
     void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&);
 
-    void tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>&, BulkDecommit& decommitter, SmallPage*, size_t pageSize);
-    void commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage*, size_t pageSize);
-
     void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass);
     void deallocateSmallChunk(Chunk*, size_t pageClass);
 

Modified: trunk/Source/bmalloc/bmalloc/SmallPage.h (241304 => 241305)


--- trunk/Source/bmalloc/bmalloc/SmallPage.h	2019-02-12 18:45:31 UTC (rev 241304)
+++ trunk/Source/bmalloc/bmalloc/SmallPage.h	2019-02-12 19:20:51 UTC (rev 241305)
@@ -41,8 +41,7 @@
     void ref(std::unique_lock<Mutex>&);
     bool deref(std::unique_lock<Mutex>&);
     unsigned refCount(std::unique_lock<Mutex>&) { return m_refCount; }
-    unsigned refCount(std::lock_guard<Mutex>&) { return m_refCount; }
-
+    
     size_t sizeClass() { return m_sizeClass; }
     void setSizeClass(size_t sizeClass) { m_sizeClass = sizeClass; }
     
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to